4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 from ganeti import rpc
62 from ganeti import runtime
64 import ganeti.masterd.instance # pylint: disable=W0611
67 #: Size of DRBD meta block device
71 INSTANCE_DOWN = [constants.ADMINST_DOWN]
72 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
73 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
75 #: Instance status in which an instance can be marked as offline/online
76 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
77 constants.ADMINST_OFFLINE,
82 """Data container for LU results with jobs.
84 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
85 by L{mcpu._ProcessResult}. The latter will then submit the jobs
86 contained in the C{jobs} attribute and include the job IDs in the opcode
90 def __init__(self, jobs, **kwargs):
91 """Initializes this class.
93 Additional return values can be specified as keyword arguments.
95 @type jobs: list of lists of L{opcode.OpCode}
96 @param jobs: A list of lists of opcode objects
103 class LogicalUnit(object):
104 """Logical Unit base class.
106 Subclasses must follow these rules:
107 - implement ExpandNames
108 - implement CheckPrereq (except when tasklets are used)
109 - implement Exec (except when tasklets are used)
110 - implement BuildHooksEnv
111 - implement BuildHooksNodes
112 - redefine HPATH and HTYPE
113 - optionally redefine their run requirements:
114 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
116 Note that all commands require root permissions.
118 @ivar dry_run_result: the value (if any) that will be returned to the caller
119 in dry-run mode (signalled by opcode dry_run parameter)
126 def __init__(self, processor, op, context, rpc_runner):
127 """Constructor for LogicalUnit.
129 This needs to be overridden in derived classes in order to check op
133 self.proc = processor
135 self.cfg = context.cfg
136 self.glm = context.glm
138 self.owned_locks = context.glm.list_owned
139 self.context = context
140 self.rpc = rpc_runner
141 # Dicts used to declare locking needs to mcpu
142 self.needed_locks = None
143 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
145 self.remove_locks = {}
146 # Used to force good behavior when calling helper functions
147 self.recalculate_locks = {}
149 self.Log = processor.Log # pylint: disable=C0103
150 self.LogWarning = processor.LogWarning # pylint: disable=C0103
151 self.LogInfo = processor.LogInfo # pylint: disable=C0103
152 self.LogStep = processor.LogStep # pylint: disable=C0103
153 # support for dry-run
154 self.dry_run_result = None
155 # support for generic debug attribute
156 if (not hasattr(self.op, "debug_level") or
157 not isinstance(self.op.debug_level, int)):
158 self.op.debug_level = 0
163 # Validate opcode parameters and set defaults
164 self.op.Validate(True)
166 self.CheckArguments()
168 def CheckArguments(self):
169 """Check syntactic validity for the opcode arguments.
171 This method is for doing a simple syntactic check and ensure
172 validity of opcode parameters, without any cluster-related
173 checks. While the same can be accomplished in ExpandNames and/or
174 CheckPrereq, doing these separate is better because:
176 - ExpandNames is left as as purely a lock-related function
177 - CheckPrereq is run after we have acquired locks (and possible
180 The function is allowed to change the self.op attribute so that
181 later methods can no longer worry about missing parameters.
186 def ExpandNames(self):
187 """Expand names for this LU.
189 This method is called before starting to execute the opcode, and it should
190 update all the parameters of the opcode to their canonical form (e.g. a
191 short node name must be fully expanded after this method has successfully
192 completed). This way locking, hooks, logging, etc. can work correctly.
194 LUs which implement this method must also populate the self.needed_locks
195 member, as a dict with lock levels as keys, and a list of needed lock names
198 - use an empty dict if you don't need any lock
199 - if you don't need any lock at a particular level omit that
200 level (note that in this case C{DeclareLocks} won't be called
201 at all for that level)
202 - if you need locks at a level, but you can't calculate it in
203 this function, initialise that level with an empty list and do
204 further processing in L{LogicalUnit.DeclareLocks} (see that
205 function's docstring)
206 - don't put anything for the BGL level
207 - if you want all locks at a level use L{locking.ALL_SET} as a value
209 If you need to share locks (rather than acquire them exclusively) at one
210 level you can modify self.share_locks, setting a true value (usually 1) for
211 that level. By default locks are not shared.
213 This function can also define a list of tasklets, which then will be
214 executed in order instead of the usual LU-level CheckPrereq and Exec
215 functions, if those are not defined by the LU.
219 # Acquire all nodes and one instance
220 self.needed_locks = {
221 locking.LEVEL_NODE: locking.ALL_SET,
222 locking.LEVEL_INSTANCE: ['instance1.example.com'],
224 # Acquire just two nodes
225 self.needed_locks = {
226 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
229 self.needed_locks = {} # No, you can't leave it to the default value None
232 # The implementation of this method is mandatory only if the new LU is
233 # concurrent, so that old LUs don't need to be changed all at the same
236 self.needed_locks = {} # Exclusive LUs don't need locks.
238 raise NotImplementedError
240 def DeclareLocks(self, level):
241 """Declare LU locking needs for a level
243 While most LUs can just declare their locking needs at ExpandNames time,
244 sometimes there's the need to calculate some locks after having acquired
245 the ones before. This function is called just before acquiring locks at a
246 particular level, but after acquiring the ones at lower levels, and permits
247 such calculations. It can be used to modify self.needed_locks, and by
248 default it does nothing.
250 This function is only called if you have something already set in
251 self.needed_locks for the level.
253 @param level: Locking level which is going to be locked
254 @type level: member of L{ganeti.locking.LEVELS}
258 def CheckPrereq(self):
259 """Check prerequisites for this LU.
261 This method should check that the prerequisites for the execution
262 of this LU are fulfilled. It can do internode communication, but
263 it should be idempotent - no cluster or system changes are
266 The method should raise errors.OpPrereqError in case something is
267 not fulfilled. Its return value is ignored.
269 This method should also update all the parameters of the opcode to
270 their canonical form if it hasn't been done by ExpandNames before.
273 if self.tasklets is not None:
274 for (idx, tl) in enumerate(self.tasklets):
275 logging.debug("Checking prerequisites for tasklet %s/%s",
276 idx + 1, len(self.tasklets))
281 def Exec(self, feedback_fn):
284 This method should implement the actual work. It should raise
285 errors.OpExecError for failures that are somewhat dealt with in
289 if self.tasklets is not None:
290 for (idx, tl) in enumerate(self.tasklets):
291 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
294 raise NotImplementedError
296 def BuildHooksEnv(self):
297 """Build hooks environment for this LU.
300 @return: Dictionary containing the environment that will be used for
301 running the hooks for this LU. The keys of the dict must not be prefixed
302 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
303 will extend the environment with additional variables. If no environment
304 should be defined, an empty dictionary should be returned (not C{None}).
305 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
309 raise NotImplementedError
311 def BuildHooksNodes(self):
312 """Build list of nodes to run LU's hooks.
314 @rtype: tuple; (list, list)
315 @return: Tuple containing a list of node names on which the hook
316 should run before the execution and a list of node names on which the
317 hook should run after the execution. No nodes should be returned as an
318 empty list (and not None).
319 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
323 raise NotImplementedError
325 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
326 """Notify the LU about the results of its hooks.
328 This method is called every time a hooks phase is executed, and notifies
329 the Logical Unit about the hooks' result. The LU can then use it to alter
330 its result based on the hooks. By default the method does nothing and the
331 previous result is passed back unchanged but any LU can define it if it
332 wants to use the local cluster hook-scripts somehow.
334 @param phase: one of L{constants.HOOKS_PHASE_POST} or
335 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
336 @param hook_results: the results of the multi-node hooks rpc call
337 @param feedback_fn: function used send feedback back to the caller
338 @param lu_result: the previous Exec result this LU had, or None
340 @return: the new Exec result, based on the previous result
344 # API must be kept, thus we ignore the unused argument and could
345 # be a function warnings
346 # pylint: disable=W0613,R0201
349 def _ExpandAndLockInstance(self):
350 """Helper function to expand and lock an instance.
352 Many LUs that work on an instance take its name in self.op.instance_name
353 and need to expand it and then declare the expanded name for locking. This
354 function does it, and then updates self.op.instance_name to the expanded
355 name. It also initializes needed_locks as a dict, if this hasn't been done
359 if self.needed_locks is None:
360 self.needed_locks = {}
362 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
363 "_ExpandAndLockInstance called with instance-level locks set"
364 self.op.instance_name = _ExpandInstanceName(self.cfg,
365 self.op.instance_name)
366 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
368 def _LockInstancesNodes(self, primary_only=False,
369 level=locking.LEVEL_NODE):
370 """Helper function to declare instances' nodes for locking.
372 This function should be called after locking one or more instances to lock
373 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
374 with all primary or secondary nodes for instances already locked and
375 present in self.needed_locks[locking.LEVEL_INSTANCE].
377 It should be called from DeclareLocks, and for safety only works if
378 self.recalculate_locks[locking.LEVEL_NODE] is set.
380 In the future it may grow parameters to just lock some instance's nodes, or
381 to just lock primaries or secondary nodes, if needed.
383 If should be called in DeclareLocks in a way similar to::
385 if level == locking.LEVEL_NODE:
386 self._LockInstancesNodes()
388 @type primary_only: boolean
389 @param primary_only: only lock primary nodes of locked instances
390 @param level: Which lock level to use for locking nodes
393 assert level in self.recalculate_locks, \
394 "_LockInstancesNodes helper function called with no nodes to recalculate"
396 # TODO: check if we're really been called with the instance locks held
398 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
399 # future we might want to have different behaviors depending on the value
400 # of self.recalculate_locks[locking.LEVEL_NODE]
402 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
403 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
404 wanted_nodes.append(instance.primary_node)
406 wanted_nodes.extend(instance.secondary_nodes)
408 if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
409 self.needed_locks[level] = wanted_nodes
410 elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
411 self.needed_locks[level].extend(wanted_nodes)
413 raise errors.ProgrammerError("Unknown recalculation mode")
415 del self.recalculate_locks[level]
418 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
419 """Simple LU which runs no hooks.
421 This LU is intended as a parent for other LogicalUnits which will
422 run no hooks, in order to reduce duplicate code.
428 def BuildHooksEnv(self):
429 """Empty BuildHooksEnv for NoHooksLu.
431 This just raises an error.
434 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
436 def BuildHooksNodes(self):
437 """Empty BuildHooksNodes for NoHooksLU.
440 raise AssertionError("BuildHooksNodes called for NoHooksLU")
444 """Tasklet base class.
446 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447 they can mix legacy code with tasklets. Locking needs to be done in the LU,
448 tasklets know nothing about locks.
450 Subclasses must follow these rules:
451 - Implement CheckPrereq
455 def __init__(self, lu):
462 def CheckPrereq(self):
463 """Check prerequisites for this tasklets.
465 This method should check whether the prerequisites for the execution of
466 this tasklet are fulfilled. It can do internode communication, but it
467 should be idempotent - no cluster or system changes are allowed.
469 The method should raise errors.OpPrereqError in case something is not
470 fulfilled. Its return value is ignored.
472 This method should also update all parameters to their canonical form if it
473 hasn't been done before.
478 def Exec(self, feedback_fn):
479 """Execute the tasklet.
481 This method should implement the actual work. It should raise
482 errors.OpExecError for failures that are somewhat dealt with in code, or
486 raise NotImplementedError
490 """Base for query utility classes.
493 #: Attribute holding field definitions
499 def __init__(self, qfilter, fields, use_locking):
500 """Initializes this class.
503 self.use_locking = use_locking
505 self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
506 namefield=self.SORT_FIELD)
507 self.requested_data = self.query.RequestedData()
508 self.names = self.query.RequestedNames()
510 # Sort only if no names were requested
511 self.sort_by_name = not self.names
513 self.do_locking = None
516 def _GetNames(self, lu, all_names, lock_level):
517 """Helper function to determine names asked for in the query.
521 names = lu.owned_locks(lock_level)
525 if self.wanted == locking.ALL_SET:
526 assert not self.names
527 # caller didn't specify names, so ordering is not important
528 return utils.NiceSort(names)
530 # caller specified names and we must keep the same order
532 assert not self.do_locking or lu.glm.is_owned(lock_level)
534 missing = set(self.wanted).difference(names)
536 raise errors.OpExecError("Some items were removed before retrieving"
537 " their data: %s" % missing)
539 # Return expanded names
542 def ExpandNames(self, lu):
543 """Expand names for this query.
545 See L{LogicalUnit.ExpandNames}.
548 raise NotImplementedError()
550 def DeclareLocks(self, lu, level):
551 """Declare locks for this query.
553 See L{LogicalUnit.DeclareLocks}.
556 raise NotImplementedError()
558 def _GetQueryData(self, lu):
559 """Collects all data for this query.
561 @return: Query data object
564 raise NotImplementedError()
566 def NewStyleQuery(self, lu):
567 """Collect data and execute query.
570 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
571 sort_by_name=self.sort_by_name)
573 def OldStyleQuery(self, lu):
574 """Collect data and execute query.
577 return self.query.OldStyleQuery(self._GetQueryData(lu),
578 sort_by_name=self.sort_by_name)
582 """Returns a dict declaring all lock levels shared.
585 return dict.fromkeys(locking.LEVELS, 1)
588 def _MakeLegacyNodeInfo(data):
589 """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
591 Converts the data into a single dictionary. This is fine for most use cases,
592 but some require information from more than one volume group or hypervisor.
595 (bootid, (vg_info, ), (hv_info, )) = data
597 return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
602 def _AnnotateDiskParams(instance, devs, cfg):
603 """Little helper wrapper to the rpc annotation method.
605 @param instance: The instance object
606 @type devs: List of L{objects.Disk}
607 @param devs: The root devices (not any of its children!)
608 @param cfg: The config object
609 @returns The annotated disk copies
610 @see L{rpc.AnnotateDiskParams}
613 return rpc.AnnotateDiskParams(instance.disk_template, devs,
614 cfg.GetInstanceDiskParams(instance))
617 def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
619 """Checks if node groups for locked instances are still correct.
621 @type cfg: L{config.ConfigWriter}
622 @param cfg: Cluster configuration
623 @type instances: dict; string as key, L{objects.Instance} as value
624 @param instances: Dictionary, instance name as key, instance object as value
625 @type owned_groups: iterable of string
626 @param owned_groups: List of owned groups
627 @type owned_nodes: iterable of string
628 @param owned_nodes: List of owned nodes
629 @type cur_group_uuid: string or None
630 @param cur_group_uuid: Optional group UUID to check against instance's groups
633 for (name, inst) in instances.items():
634 assert owned_nodes.issuperset(inst.all_nodes), \
635 "Instance %s's nodes changed while we kept the lock" % name
637 inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
639 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
640 "Instance %s has no node in group %s" % (name, cur_group_uuid)
643 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
644 """Checks if the owned node groups are still correct for an instance.
646 @type cfg: L{config.ConfigWriter}
647 @param cfg: The cluster configuration
648 @type instance_name: string
649 @param instance_name: Instance name
650 @type owned_groups: set or frozenset
651 @param owned_groups: List of currently owned node groups
654 inst_groups = cfg.GetInstanceNodeGroups(instance_name)
656 if not owned_groups.issuperset(inst_groups):
657 raise errors.OpPrereqError("Instance %s's node groups changed since"
658 " locks were acquired, current groups are"
659 " are '%s', owning groups '%s'; retry the"
662 utils.CommaJoin(inst_groups),
663 utils.CommaJoin(owned_groups)),
669 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
670 """Checks if the instances in a node group are still correct.
672 @type cfg: L{config.ConfigWriter}
673 @param cfg: The cluster configuration
674 @type group_uuid: string
675 @param group_uuid: Node group UUID
676 @type owned_instances: set or frozenset
677 @param owned_instances: List of currently owned instances
680 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
681 if owned_instances != wanted_instances:
682 raise errors.OpPrereqError("Instances in node group '%s' changed since"
683 " locks were acquired, wanted '%s', have '%s';"
684 " retry the operation" %
686 utils.CommaJoin(wanted_instances),
687 utils.CommaJoin(owned_instances)),
690 return wanted_instances
693 def _SupportsOob(cfg, node):
694 """Tells if node supports OOB.
696 @type cfg: L{config.ConfigWriter}
697 @param cfg: The cluster configuration
698 @type node: L{objects.Node}
699 @param node: The node
700 @return: The OOB script if supported or an empty string otherwise
703 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
706 def _GetWantedNodes(lu, nodes):
707 """Returns list of checked and expanded node names.
709 @type lu: L{LogicalUnit}
710 @param lu: the logical unit on whose behalf we execute
712 @param nodes: list of node names or None for all nodes
714 @return: the list of nodes, sorted
715 @raise errors.ProgrammerError: if the nodes parameter is wrong type
719 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
721 return utils.NiceSort(lu.cfg.GetNodeList())
724 def _GetWantedInstances(lu, instances):
725 """Returns list of checked and expanded instance names.
727 @type lu: L{LogicalUnit}
728 @param lu: the logical unit on whose behalf we execute
729 @type instances: list
730 @param instances: list of instance names or None for all instances
732 @return: the list of instances, sorted
733 @raise errors.OpPrereqError: if the instances parameter is wrong type
734 @raise errors.OpPrereqError: if any of the passed instances is not found
738 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
740 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
744 def _GetUpdatedParams(old_params, update_dict,
745 use_default=True, use_none=False):
746 """Return the new version of a parameter dictionary.
748 @type old_params: dict
749 @param old_params: old parameters
750 @type update_dict: dict
751 @param update_dict: dict containing new parameter values, or
752 constants.VALUE_DEFAULT to reset the parameter to its default
754 @param use_default: boolean
755 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
756 values as 'to be deleted' values
757 @param use_none: boolean
758 @type use_none: whether to recognise C{None} values as 'to be
761 @return: the new parameter dictionary
764 params_copy = copy.deepcopy(old_params)
765 for key, val in update_dict.iteritems():
766 if ((use_default and val == constants.VALUE_DEFAULT) or
767 (use_none and val is None)):
773 params_copy[key] = val
777 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
778 """Return the new version of a instance policy.
780 @param group_policy: whether this policy applies to a group and thus
781 we should support removal of policy entries
784 use_none = use_default = group_policy
785 ipolicy = copy.deepcopy(old_ipolicy)
786 for key, value in new_ipolicy.items():
787 if key not in constants.IPOLICY_ALL_KEYS:
788 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
790 if key in constants.IPOLICY_ISPECS:
791 utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
792 ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
794 use_default=use_default)
796 if (not value or value == [constants.VALUE_DEFAULT] or
797 value == constants.VALUE_DEFAULT):
801 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
802 " on the cluster'" % key,
805 if key in constants.IPOLICY_PARAMETERS:
806 # FIXME: we assume all such values are float
808 ipolicy[key] = float(value)
809 except (TypeError, ValueError), err:
810 raise errors.OpPrereqError("Invalid value for attribute"
811 " '%s': '%s', error: %s" %
812 (key, value, err), errors.ECODE_INVAL)
814 # FIXME: we assume all others are lists; this should be redone
816 ipolicy[key] = list(value)
818 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
819 except errors.ConfigurationError, err:
820 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
825 def _UpdateAndVerifySubDict(base, updates, type_check):
826 """Updates and verifies a dict with sub dicts of the same type.
828 @param base: The dict with the old data
829 @param updates: The dict with the new data
830 @param type_check: Dict suitable to ForceDictType to verify correct types
831 @returns: A new dict with updated and verified values
835 new = _GetUpdatedParams(old, value)
836 utils.ForceDictType(new, type_check)
839 ret = copy.deepcopy(base)
840 ret.update(dict((key, fn(base.get(key, {}), value))
841 for key, value in updates.items()))
845 def _MergeAndVerifyHvState(op_input, obj_input):
846 """Combines the hv state from an opcode with the one of the object
848 @param op_input: The input dict from the opcode
849 @param obj_input: The input dict from the objects
850 @return: The verified and updated dict
854 invalid_hvs = set(op_input) - constants.HYPER_TYPES
856 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
857 " %s" % utils.CommaJoin(invalid_hvs),
859 if obj_input is None:
861 type_check = constants.HVSTS_PARAMETER_TYPES
862 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
867 def _MergeAndVerifyDiskState(op_input, obj_input):
868 """Combines the disk state from an opcode with the one of the object
870 @param op_input: The input dict from the opcode
871 @param obj_input: The input dict from the objects
872 @return: The verified and updated dict
875 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
877 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
878 utils.CommaJoin(invalid_dst),
880 type_check = constants.DSS_PARAMETER_TYPES
881 if obj_input is None:
883 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
885 for key, value in op_input.items())
890 def _ReleaseLocks(lu, level, names=None, keep=None):
891 """Releases locks owned by an LU.
893 @type lu: L{LogicalUnit}
894 @param level: Lock level
895 @type names: list or None
896 @param names: Names of locks to release
897 @type keep: list or None
898 @param keep: Names of locks to retain
901 assert not (keep is not None and names is not None), \
902 "Only one of the 'names' and the 'keep' parameters can be given"
904 if names is not None:
905 should_release = names.__contains__
907 should_release = lambda name: name not in keep
909 should_release = None
911 owned = lu.owned_locks(level)
913 # Not owning any lock at this level, do nothing
920 # Determine which locks to release
922 if should_release(name):
927 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
929 # Release just some locks
930 lu.glm.release(level, names=release)
932 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
935 lu.glm.release(level)
937 assert not lu.glm.is_owned(level), "No locks should be owned"
940 def _MapInstanceDisksToNodes(instances):
941 """Creates a map from (node, volume) to instance name.
943 @type instances: list of L{objects.Instance}
944 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
947 return dict(((node, vol), inst.name)
948 for inst in instances
949 for (node, vols) in inst.MapLVsByNode().items()
953 def _RunPostHook(lu, node_name):
954 """Runs the post-hook for an opcode on a single node.
957 hm = lu.proc.BuildHooksManager(lu)
959 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
960 except Exception, err: # pylint: disable=W0703
961 lu.LogWarning("Errors occurred running hooks on %s: %s" % (node_name, err))
964 def _CheckOutputFields(static, dynamic, selected):
965 """Checks whether all selected fields are valid.
967 @type static: L{utils.FieldSet}
968 @param static: static fields set
969 @type dynamic: L{utils.FieldSet}
970 @param dynamic: dynamic fields set
977 delta = f.NonMatching(selected)
979 raise errors.OpPrereqError("Unknown output fields selected: %s"
980 % ",".join(delta), errors.ECODE_INVAL)
983 def _CheckGlobalHvParams(params):
984 """Validates that given hypervisor params are not global ones.
986 This will ensure that instances don't get customised versions of
990 used_globals = constants.HVC_GLOBALS.intersection(params)
992 msg = ("The following hypervisor parameters are global and cannot"
993 " be customized at instance level, please modify them at"
994 " cluster level: %s" % utils.CommaJoin(used_globals))
995 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
998 def _CheckNodeOnline(lu, node, msg=None):
999 """Ensure that a given node is online.
1001 @param lu: the LU on behalf of which we make the check
1002 @param node: the node to check
1003 @param msg: if passed, should be a message to replace the default one
1004 @raise errors.OpPrereqError: if the node is offline
1008 msg = "Can't use offline node"
1009 if lu.cfg.GetNodeInfo(node).offline:
1010 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
1013 def _CheckNodeNotDrained(lu, node):
1014 """Ensure that a given node is not drained.
1016 @param lu: the LU on behalf of which we make the check
1017 @param node: the node to check
1018 @raise errors.OpPrereqError: if the node is drained
1021 if lu.cfg.GetNodeInfo(node).drained:
1022 raise errors.OpPrereqError("Can't use drained node %s" % node,
1026 def _CheckNodeVmCapable(lu, node):
1027 """Ensure that a given node is vm capable.
1029 @param lu: the LU on behalf of which we make the check
1030 @param node: the node to check
1031 @raise errors.OpPrereqError: if the node is not vm capable
1034 if not lu.cfg.GetNodeInfo(node).vm_capable:
1035 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
1039 def _CheckNodeHasOS(lu, node, os_name, force_variant):
1040 """Ensure that a node supports a given OS.
1042 @param lu: the LU on behalf of which we make the check
1043 @param node: the node to check
1044 @param os_name: the OS to query about
1045 @param force_variant: whether to ignore variant errors
1046 @raise errors.OpPrereqError: if the node is not supporting the OS
1049 result = lu.rpc.call_os_get(node, os_name)
1050 result.Raise("OS '%s' not in supported OS list for node %s" %
1052 prereq=True, ecode=errors.ECODE_INVAL)
1053 if not force_variant:
1054 _CheckOSVariant(result.payload, os_name)
1057 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
1058 """Ensure that a node has the given secondary ip.
1060 @type lu: L{LogicalUnit}
1061 @param lu: the LU on behalf of which we make the check
1063 @param node: the node to check
1064 @type secondary_ip: string
1065 @param secondary_ip: the ip to check
1066 @type prereq: boolean
1067 @param prereq: whether to throw a prerequisite or an execute error
1068 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
1069 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
1072 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
1073 result.Raise("Failure checking secondary ip on node %s" % node,
1074 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1075 if not result.payload:
1076 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
1077 " please fix and re-run this command" % secondary_ip)
1079 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
1081 raise errors.OpExecError(msg)
1084 def _GetClusterDomainSecret():
1085 """Reads the cluster domain secret.
1088 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
1092 def _CheckInstanceState(lu, instance, req_states, msg=None):
1093 """Ensure that an instance is in one of the required states.
1095 @param lu: the LU on behalf of which we make the check
1096 @param instance: the instance to check
1097 @param msg: if passed, should be a message to replace the default one
1098 @raise errors.OpPrereqError: if the instance is not in the required state
1102 msg = "can't use instance from outside %s states" % ", ".join(req_states)
1103 if instance.admin_state not in req_states:
1104 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1105 (instance.name, instance.admin_state, msg),
1108 if constants.ADMINST_UP not in req_states:
1109 pnode = instance.primary_node
1110 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
1111 ins_l.Raise("Can't contact node %s for instance information" % pnode,
1112 prereq=True, ecode=errors.ECODE_ENVIRON)
1114 if instance.name in ins_l.payload:
1115 raise errors.OpPrereqError("Instance %s is running, %s" %
1116 (instance.name, msg), errors.ECODE_STATE)
1119 def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
1120 """Computes if value is in the desired range.
1122 @param name: name of the parameter for which we perform the check
1123 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
1125 @param ipolicy: dictionary containing min, max and std values
1126 @param value: actual value that we want to use
1127 @return: None or element not meeting the criteria
1131 if value in [None, constants.VALUE_AUTO]:
1133 max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
1134 min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
1135 if value > max_v or min_v > value:
1137 fqn = "%s/%s" % (name, qualifier)
1140 return ("%s value %s is not in range [%s, %s]" %
1141 (fqn, value, min_v, max_v))
1145 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
1146 nic_count, disk_sizes, spindle_use,
1147 _compute_fn=_ComputeMinMaxSpec):
1148 """Verifies ipolicy against provided specs.
1151 @param ipolicy: The ipolicy
1153 @param mem_size: The memory size
1154 @type cpu_count: int
1155 @param cpu_count: Used cpu cores
1156 @type disk_count: int
1157 @param disk_count: Number of disks used
1158 @type nic_count: int
1159 @param nic_count: Number of nics used
1160 @type disk_sizes: list of ints
1161 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
1162 @type spindle_use: int
1163 @param spindle_use: The number of spindles this instance uses
1164 @param _compute_fn: The compute function (unittest only)
1165 @return: A list of violations, or an empty list of no violations are found
1168 assert disk_count == len(disk_sizes)
1171 (constants.ISPEC_MEM_SIZE, "", mem_size),
1172 (constants.ISPEC_CPU_COUNT, "", cpu_count),
1173 (constants.ISPEC_DISK_COUNT, "", disk_count),
1174 (constants.ISPEC_NIC_COUNT, "", nic_count),
1175 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
1176 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
1177 for idx, d in enumerate(disk_sizes)]
1180 (_compute_fn(name, qualifier, ipolicy, value)
1181 for (name, qualifier, value) in test_settings))
1184 def _ComputeIPolicyInstanceViolation(ipolicy, instance,
1185 _compute_fn=_ComputeIPolicySpecViolation):
1186 """Compute if instance meets the specs of ipolicy.
1189 @param ipolicy: The ipolicy to verify against
1190 @type instance: L{objects.Instance}
1191 @param instance: The instance to verify
1192 @param _compute_fn: The function to verify ipolicy (unittest only)
1193 @see: L{_ComputeIPolicySpecViolation}
1196 mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
1197 cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
1198 spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
1199 disk_count = len(instance.disks)
1200 disk_sizes = [disk.size for disk in instance.disks]
1201 nic_count = len(instance.nics)
1203 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1204 disk_sizes, spindle_use)
1207 def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
1208 _compute_fn=_ComputeIPolicySpecViolation):
1209 """Compute if instance specs meets the specs of ipolicy.
1212 @param ipolicy: The ipolicy to verify against
1213 @param instance_spec: dict
1214 @param instance_spec: The instance spec to verify
1215 @param _compute_fn: The function to verify ipolicy (unittest only)
1216 @see: L{_ComputeIPolicySpecViolation}
1219 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
1220 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
1221 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
1222 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
1223 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
1224 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
1226 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1227 disk_sizes, spindle_use)
1230 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
1232 _compute_fn=_ComputeIPolicyInstanceViolation):
1233 """Compute if instance meets the specs of the new target group.
1235 @param ipolicy: The ipolicy to verify
1236 @param instance: The instance object to verify
1237 @param current_group: The current group of the instance
1238 @param target_group: The new group of the instance
1239 @param _compute_fn: The function to verify ipolicy (unittest only)
1240 @see: L{_ComputeIPolicySpecViolation}
1243 if current_group == target_group:
1246 return _compute_fn(ipolicy, instance)
1249 def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
1250 _compute_fn=_ComputeIPolicyNodeViolation):
1251 """Checks that the target node is correct in terms of instance policy.
1253 @param ipolicy: The ipolicy to verify
1254 @param instance: The instance object to verify
1255 @param node: The new node to relocate
1256 @param ignore: Ignore violations of the ipolicy
1257 @param _compute_fn: The function to verify ipolicy (unittest only)
1258 @see: L{_ComputeIPolicySpecViolation}
1261 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
1262 res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
1265 msg = ("Instance does not meet target node group's (%s) instance"
1266 " policy: %s") % (node.group, utils.CommaJoin(res))
1270 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1273 def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
1274 """Computes a set of any instances that would violate the new ipolicy.
1276 @param old_ipolicy: The current (still in-place) ipolicy
1277 @param new_ipolicy: The new (to become) ipolicy
1278 @param instances: List of instances to verify
1279 @return: A list of instances which violates the new ipolicy but
1283 return (_ComputeViolatingInstances(new_ipolicy, instances) -
1284 _ComputeViolatingInstances(old_ipolicy, instances))
1287 def _ExpandItemName(fn, name, kind):
1288 """Expand an item name.
1290 @param fn: the function to use for expansion
1291 @param name: requested item name
1292 @param kind: text description ('Node' or 'Instance')
1293 @return: the resolved (full) name
1294 @raise errors.OpPrereqError: if the item is not found
1297 full_name = fn(name)
1298 if full_name is None:
1299 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
1304 def _ExpandNodeName(cfg, name):
1305 """Wrapper over L{_ExpandItemName} for nodes."""
1306 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
1309 def _ExpandInstanceName(cfg, name):
1310 """Wrapper over L{_ExpandItemName} for instance."""
1311 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
1314 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
1315 minmem, maxmem, vcpus, nics, disk_template, disks,
1316 bep, hvp, hypervisor_name, tags):
1317 """Builds instance related env variables for hooks
1319 This builds the hook environment from individual variables.
1322 @param name: the name of the instance
1323 @type primary_node: string
1324 @param primary_node: the name of the instance's primary node
1325 @type secondary_nodes: list
1326 @param secondary_nodes: list of secondary nodes as strings
1327 @type os_type: string
1328 @param os_type: the name of the instance's OS
1329 @type status: string
1330 @param status: the desired status of the instance
1331 @type minmem: string
1332 @param minmem: the minimum memory size of the instance
1333 @type maxmem: string
1334 @param maxmem: the maximum memory size of the instance
1336 @param vcpus: the count of VCPUs the instance has
1338 @param nics: list of tuples (ip, mac, mode, link) representing
1339 the NICs the instance has
1340 @type disk_template: string
1341 @param disk_template: the disk template of the instance
1343 @param disks: the list of (size, mode) pairs
1345 @param bep: the backend parameters for the instance
1347 @param hvp: the hypervisor parameters for the instance
1348 @type hypervisor_name: string
1349 @param hypervisor_name: the hypervisor for the instance
1351 @param tags: list of instance tags as strings
1353 @return: the hook environment for this instance
1358 "INSTANCE_NAME": name,
1359 "INSTANCE_PRIMARY": primary_node,
1360 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
1361 "INSTANCE_OS_TYPE": os_type,
1362 "INSTANCE_STATUS": status,
1363 "INSTANCE_MINMEM": minmem,
1364 "INSTANCE_MAXMEM": maxmem,
1365 # TODO(2.7) remove deprecated "memory" value
1366 "INSTANCE_MEMORY": maxmem,
1367 "INSTANCE_VCPUS": vcpus,
1368 "INSTANCE_DISK_TEMPLATE": disk_template,
1369 "INSTANCE_HYPERVISOR": hypervisor_name,
1372 nic_count = len(nics)
1373 for idx, (ip, mac, mode, link) in enumerate(nics):
1376 env["INSTANCE_NIC%d_IP" % idx] = ip
1377 env["INSTANCE_NIC%d_MAC" % idx] = mac
1378 env["INSTANCE_NIC%d_MODE" % idx] = mode
1379 env["INSTANCE_NIC%d_LINK" % idx] = link
1380 if mode == constants.NIC_MODE_BRIDGED:
1381 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1385 env["INSTANCE_NIC_COUNT"] = nic_count
1388 disk_count = len(disks)
1389 for idx, (size, mode) in enumerate(disks):
1390 env["INSTANCE_DISK%d_SIZE" % idx] = size
1391 env["INSTANCE_DISK%d_MODE" % idx] = mode
1395 env["INSTANCE_DISK_COUNT"] = disk_count
1400 env["INSTANCE_TAGS"] = " ".join(tags)
1402 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1403 for key, value in source.items():
1404 env["INSTANCE_%s_%s" % (kind, key)] = value
1409 def _NICListToTuple(lu, nics):
1410 """Build a list of nic information tuples.
1412 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1413 value in LUInstanceQueryData.
1415 @type lu: L{LogicalUnit}
1416 @param lu: the logical unit on whose behalf we execute
1417 @type nics: list of L{objects.NIC}
1418 @param nics: list of nics to convert to hooks tuples
1422 cluster = lu.cfg.GetClusterInfo()
1426 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1427 mode = filled_params[constants.NIC_MODE]
1428 link = filled_params[constants.NIC_LINK]
1429 hooks_nics.append((ip, mac, mode, link))
1433 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1434 """Builds instance related env variables for hooks from an object.
1436 @type lu: L{LogicalUnit}
1437 @param lu: the logical unit on whose behalf we execute
1438 @type instance: L{objects.Instance}
1439 @param instance: the instance for which we should build the
1441 @type override: dict
1442 @param override: dictionary with key/values that will override
1445 @return: the hook environment dictionary
1448 cluster = lu.cfg.GetClusterInfo()
1449 bep = cluster.FillBE(instance)
1450 hvp = cluster.FillHV(instance)
1452 "name": instance.name,
1453 "primary_node": instance.primary_node,
1454 "secondary_nodes": instance.secondary_nodes,
1455 "os_type": instance.os,
1456 "status": instance.admin_state,
1457 "maxmem": bep[constants.BE_MAXMEM],
1458 "minmem": bep[constants.BE_MINMEM],
1459 "vcpus": bep[constants.BE_VCPUS],
1460 "nics": _NICListToTuple(lu, instance.nics),
1461 "disk_template": instance.disk_template,
1462 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1465 "hypervisor_name": instance.hypervisor,
1466 "tags": instance.tags,
1469 args.update(override)
1470 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1473 def _AdjustCandidatePool(lu, exceptions):
1474 """Adjust the candidate pool after node operations.
1477 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1479 lu.LogInfo("Promoted nodes to master candidate role: %s",
1480 utils.CommaJoin(node.name for node in mod_list))
1481 for name in mod_list:
1482 lu.context.ReaddNode(name)
1483 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1485 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1489 def _DecideSelfPromotion(lu, exceptions=None):
1490 """Decide whether I should promote myself as a master candidate.
1493 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1494 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1495 # the new node will increase mc_max with one, so:
1496 mc_should = min(mc_should + 1, cp_size)
1497 return mc_now < mc_should
1500 def _CalculateGroupIPolicy(cluster, group):
1501 """Calculate instance policy for group.
1504 return cluster.SimpleFillIPolicy(group.ipolicy)
1507 def _ComputeViolatingInstances(ipolicy, instances):
1508 """Computes a set of instances who violates given ipolicy.
1510 @param ipolicy: The ipolicy to verify
1511 @type instances: object.Instance
1512 @param instances: List of instances to verify
1513 @return: A frozenset of instance names violating the ipolicy
1516 return frozenset([inst.name for inst in instances
1517 if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
1520 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1521 """Check that the brigdes needed by a list of nics exist.
1524 cluster = lu.cfg.GetClusterInfo()
1525 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1526 brlist = [params[constants.NIC_LINK] for params in paramslist
1527 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1529 result = lu.rpc.call_bridges_exist(target_node, brlist)
1530 result.Raise("Error checking bridges on destination node '%s'" %
1531 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1534 def _CheckInstanceBridgesExist(lu, instance, node=None):
1535 """Check that the brigdes needed by an instance exist.
1539 node = instance.primary_node
1540 _CheckNicsBridgesExist(lu, instance.nics, node)
1543 def _CheckOSVariant(os_obj, name):
1544 """Check whether an OS name conforms to the os variants specification.
1546 @type os_obj: L{objects.OS}
1547 @param os_obj: OS object to check
1549 @param name: OS name passed by the user, to check for validity
1552 variant = objects.OS.GetVariant(name)
1553 if not os_obj.supported_variants:
1555 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1556 " passed)" % (os_obj.name, variant),
1560 raise errors.OpPrereqError("OS name must include a variant",
1563 if variant not in os_obj.supported_variants:
1564 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1567 def _GetNodeInstancesInner(cfg, fn):
1568 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1571 def _GetNodeInstances(cfg, node_name):
1572 """Returns a list of all primary and secondary instances on a node.
1576 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1579 def _GetNodePrimaryInstances(cfg, node_name):
1580 """Returns primary instances on a node.
1583 return _GetNodeInstancesInner(cfg,
1584 lambda inst: node_name == inst.primary_node)
1587 def _GetNodeSecondaryInstances(cfg, node_name):
1588 """Returns secondary instances on a node.
1591 return _GetNodeInstancesInner(cfg,
1592 lambda inst: node_name in inst.secondary_nodes)
1595 def _GetStorageTypeArgs(cfg, storage_type):
1596 """Returns the arguments for a storage type.
1599 # Special case for file storage
1600 if storage_type == constants.ST_FILE:
1601 # storage.FileStorage wants a list of storage directories
1602 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1607 def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
1610 for dev in instance.disks:
1611 cfg.SetDiskID(dev, node_name)
1613 result = rpc_runner.call_blockdev_getmirrorstatus(node_name, (instance.disks,
1615 result.Raise("Failed to get disk status from node %s" % node_name,
1616 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1618 for idx, bdev_status in enumerate(result.payload):
1619 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1625 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1626 """Check the sanity of iallocator and node arguments and use the
1627 cluster-wide iallocator if appropriate.
1629 Check that at most one of (iallocator, node) is specified. If none is
1630 specified, then the LU's opcode's iallocator slot is filled with the
1631 cluster-wide default iallocator.
1633 @type iallocator_slot: string
1634 @param iallocator_slot: the name of the opcode iallocator slot
1635 @type node_slot: string
1636 @param node_slot: the name of the opcode target node slot
1639 node = getattr(lu.op, node_slot, None)
1640 iallocator = getattr(lu.op, iallocator_slot, None)
1642 if node is not None and iallocator is not None:
1643 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1645 elif node is None and iallocator is None:
1646 default_iallocator = lu.cfg.GetDefaultIAllocator()
1647 if default_iallocator:
1648 setattr(lu.op, iallocator_slot, default_iallocator)
1650 raise errors.OpPrereqError("No iallocator or node given and no"
1651 " cluster-wide default iallocator found;"
1652 " please specify either an iallocator or a"
1653 " node, or set a cluster-wide default"
1657 def _GetDefaultIAllocator(cfg, iallocator):
1658 """Decides on which iallocator to use.
1660 @type cfg: L{config.ConfigWriter}
1661 @param cfg: Cluster configuration object
1662 @type iallocator: string or None
1663 @param iallocator: Iallocator specified in opcode
1665 @return: Iallocator name
1669 # Use default iallocator
1670 iallocator = cfg.GetDefaultIAllocator()
1673 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1674 " opcode nor as a cluster-wide default",
1680 class LUClusterPostInit(LogicalUnit):
1681 """Logical unit for running hooks after cluster initialization.
1684 HPATH = "cluster-init"
1685 HTYPE = constants.HTYPE_CLUSTER
1687 def BuildHooksEnv(self):
1692 "OP_TARGET": self.cfg.GetClusterName(),
1695 def BuildHooksNodes(self):
1696 """Build hooks nodes.
1699 return ([], [self.cfg.GetMasterNode()])
1701 def Exec(self, feedback_fn):
1708 class LUClusterDestroy(LogicalUnit):
1709 """Logical unit for destroying the cluster.
1712 HPATH = "cluster-destroy"
1713 HTYPE = constants.HTYPE_CLUSTER
1715 def BuildHooksEnv(self):
1720 "OP_TARGET": self.cfg.GetClusterName(),
1723 def BuildHooksNodes(self):
1724 """Build hooks nodes.
1729 def CheckPrereq(self):
1730 """Check prerequisites.
1732 This checks whether the cluster is empty.
1734 Any errors are signaled by raising errors.OpPrereqError.
1737 master = self.cfg.GetMasterNode()
1739 nodelist = self.cfg.GetNodeList()
1740 if len(nodelist) != 1 or nodelist[0] != master:
1741 raise errors.OpPrereqError("There are still %d node(s) in"
1742 " this cluster." % (len(nodelist) - 1),
1744 instancelist = self.cfg.GetInstanceList()
1746 raise errors.OpPrereqError("There are still %d instance(s) in"
1747 " this cluster." % len(instancelist),
1750 def Exec(self, feedback_fn):
1751 """Destroys the cluster.
1754 master_params = self.cfg.GetMasterNetworkParameters()
1756 # Run post hooks on master node before it's removed
1757 _RunPostHook(self, master_params.name)
1759 ems = self.cfg.GetUseExternalMipScript()
1760 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
1763 self.LogWarning("Error disabling the master IP address: %s",
1766 return master_params.name
1769 def _VerifyCertificate(filename):
1770 """Verifies a certificate for L{LUClusterVerifyConfig}.
1772 @type filename: string
1773 @param filename: Path to PEM file
1777 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1778 utils.ReadFile(filename))
1779 except Exception, err: # pylint: disable=W0703
1780 return (LUClusterVerifyConfig.ETYPE_ERROR,
1781 "Failed to load X509 certificate %s: %s" % (filename, err))
1784 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1785 constants.SSL_CERT_EXPIRATION_ERROR)
1788 fnamemsg = "While verifying %s: %s" % (filename, msg)
1793 return (None, fnamemsg)
1794 elif errcode == utils.CERT_WARNING:
1795 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1796 elif errcode == utils.CERT_ERROR:
1797 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1799 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1802 def _GetAllHypervisorParameters(cluster, instances):
1803 """Compute the set of all hypervisor parameters.
1805 @type cluster: L{objects.Cluster}
1806 @param cluster: the cluster object
1807 @param instances: list of L{objects.Instance}
1808 @param instances: additional instances from which to obtain parameters
1809 @rtype: list of (origin, hypervisor, parameters)
1810 @return: a list with all parameters found, indicating the hypervisor they
1811 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1816 for hv_name in cluster.enabled_hypervisors:
1817 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1819 for os_name, os_hvp in cluster.os_hvp.items():
1820 for hv_name, hv_params in os_hvp.items():
1822 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1823 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1825 # TODO: collapse identical parameter values in a single one
1826 for instance in instances:
1827 if instance.hvparams:
1828 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1829 cluster.FillHV(instance)))
1834 class _VerifyErrors(object):
1835 """Mix-in for cluster/group verify LUs.
1837 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1838 self.op and self._feedback_fn to be available.)
1842 ETYPE_FIELD = "code"
1843 ETYPE_ERROR = "ERROR"
1844 ETYPE_WARNING = "WARNING"
1846 def _Error(self, ecode, item, msg, *args, **kwargs):
1847 """Format an error message.
1849 Based on the opcode's error_codes parameter, either format a
1850 parseable error code, or a simpler error string.
1852 This must be called only from Exec and functions called from Exec.
1855 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1856 itype, etxt, _ = ecode
1857 # first complete the msg
1860 # then format the whole message
1861 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1862 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1868 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1869 # and finally report it via the feedback_fn
1870 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1872 def _ErrorIf(self, cond, ecode, *args, **kwargs):
1873 """Log an error message if the passed condition is True.
1877 or self.op.debug_simulate_errors) # pylint: disable=E1101
1879 # If the error code is in the list of ignored errors, demote the error to a
1881 (_, etxt, _) = ecode
1882 if etxt in self.op.ignore_errors: # pylint: disable=E1101
1883 kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
1886 self._Error(ecode, *args, **kwargs)
1888 # do not mark the operation as failed for WARN cases only
1889 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1890 self.bad = self.bad or cond
1893 class LUClusterVerify(NoHooksLU):
1894 """Submits all jobs necessary to verify the cluster.
1899 def ExpandNames(self):
1900 self.needed_locks = {}
1902 def Exec(self, feedback_fn):
1905 if self.op.group_name:
1906 groups = [self.op.group_name]
1907 depends_fn = lambda: None
1909 groups = self.cfg.GetNodeGroupList()
1911 # Verify global configuration
1913 opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
1916 # Always depend on global verification
1917 depends_fn = lambda: [(-len(jobs), [])]
1919 jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1920 ignore_errors=self.op.ignore_errors,
1921 depends=depends_fn())]
1922 for group in groups)
1924 # Fix up all parameters
1925 for op in itertools.chain(*jobs): # pylint: disable=W0142
1926 op.debug_simulate_errors = self.op.debug_simulate_errors
1927 op.verbose = self.op.verbose
1928 op.error_codes = self.op.error_codes
1930 op.skip_checks = self.op.skip_checks
1931 except AttributeError:
1932 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1934 return ResultWithJobs(jobs)
1937 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1938 """Verifies the cluster config.
1943 def _VerifyHVP(self, hvp_data):
1944 """Verifies locally the syntax of the hypervisor parameters.
1947 for item, hv_name, hv_params in hvp_data:
1948 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1951 hv_class = hypervisor.GetHypervisor(hv_name)
1952 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1953 hv_class.CheckParameterSyntax(hv_params)
1954 except errors.GenericError, err:
1955 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
1957 def ExpandNames(self):
1958 self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
1959 self.share_locks = _ShareAll()
1961 def CheckPrereq(self):
1962 """Check prerequisites.
1965 # Retrieve all information
1966 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1967 self.all_node_info = self.cfg.GetAllNodesInfo()
1968 self.all_inst_info = self.cfg.GetAllInstancesInfo()
1970 def Exec(self, feedback_fn):
1971 """Verify integrity of cluster, performing various test on nodes.
1975 self._feedback_fn = feedback_fn
1977 feedback_fn("* Verifying cluster config")
1979 for msg in self.cfg.VerifyConfig():
1980 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
1982 feedback_fn("* Verifying cluster certificate files")
1984 for cert_filename in constants.ALL_CERT_FILES:
1985 (errcode, msg) = _VerifyCertificate(cert_filename)
1986 self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
1988 feedback_fn("* Verifying hypervisor parameters")
1990 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1991 self.all_inst_info.values()))
1993 feedback_fn("* Verifying all nodes belong to an existing group")
1995 # We do this verification here because, should this bogus circumstance
1996 # occur, it would never be caught by VerifyGroup, which only acts on
1997 # nodes/instances reachable from existing node groups.
1999 dangling_nodes = set(node.name for node in self.all_node_info.values()
2000 if node.group not in self.all_group_info)
2002 dangling_instances = {}
2003 no_node_instances = []
2005 for inst in self.all_inst_info.values():
2006 if inst.primary_node in dangling_nodes:
2007 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
2008 elif inst.primary_node not in self.all_node_info:
2009 no_node_instances.append(inst.name)
2014 utils.CommaJoin(dangling_instances.get(node.name,
2016 for node in dangling_nodes]
2018 self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
2020 "the following nodes (and their instances) belong to a non"
2021 " existing group: %s", utils.CommaJoin(pretty_dangling))
2023 self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
2025 "the following instances have a non-existing primary-node:"
2026 " %s", utils.CommaJoin(no_node_instances))
2031 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
2032 """Verifies the status of a node group.
2035 HPATH = "cluster-verify"
2036 HTYPE = constants.HTYPE_CLUSTER
2039 _HOOKS_INDENT_RE = re.compile("^", re.M)
2041 class NodeImage(object):
2042 """A class representing the logical and physical status of a node.
2045 @ivar name: the node name to which this object refers
2046 @ivar volumes: a structure as returned from
2047 L{ganeti.backend.GetVolumeList} (runtime)
2048 @ivar instances: a list of running instances (runtime)
2049 @ivar pinst: list of configured primary instances (config)
2050 @ivar sinst: list of configured secondary instances (config)
2051 @ivar sbp: dictionary of {primary-node: list of instances} for all
2052 instances for which this node is secondary (config)
2053 @ivar mfree: free memory, as reported by hypervisor (runtime)
2054 @ivar dfree: free disk, as reported by the node (runtime)
2055 @ivar offline: the offline status (config)
2056 @type rpc_fail: boolean
2057 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
2058 not whether the individual keys were correct) (runtime)
2059 @type lvm_fail: boolean
2060 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
2061 @type hyp_fail: boolean
2062 @ivar hyp_fail: whether the RPC call didn't return the instance list
2063 @type ghost: boolean
2064 @ivar ghost: whether this is a known node or not (config)
2065 @type os_fail: boolean
2066 @ivar os_fail: whether the RPC call didn't return valid OS data
2068 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
2069 @type vm_capable: boolean
2070 @ivar vm_capable: whether the node can host instances
2073 def __init__(self, offline=False, name=None, vm_capable=True):
2082 self.offline = offline
2083 self.vm_capable = vm_capable
2084 self.rpc_fail = False
2085 self.lvm_fail = False
2086 self.hyp_fail = False
2088 self.os_fail = False
2091 def ExpandNames(self):
2092 # This raises errors.OpPrereqError on its own:
2093 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2095 # Get instances in node group; this is unsafe and needs verification later
2097 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2099 self.needed_locks = {
2100 locking.LEVEL_INSTANCE: inst_names,
2101 locking.LEVEL_NODEGROUP: [self.group_uuid],
2102 locking.LEVEL_NODE: [],
2105 self.share_locks = _ShareAll()
2107 def DeclareLocks(self, level):
2108 if level == locking.LEVEL_NODE:
2109 # Get members of node group; this is unsafe and needs verification later
2110 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
2112 all_inst_info = self.cfg.GetAllInstancesInfo()
2114 # In Exec(), we warn about mirrored instances that have primary and
2115 # secondary living in separate node groups. To fully verify that
2116 # volumes for these instances are healthy, we will need to do an
2117 # extra call to their secondaries. We ensure here those nodes will
2119 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
2120 # Important: access only the instances whose lock is owned
2121 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
2122 nodes.update(all_inst_info[inst].secondary_nodes)
2124 self.needed_locks[locking.LEVEL_NODE] = nodes
2126 def CheckPrereq(self):
2127 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
2128 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
2130 group_nodes = set(self.group_info.members)
2132 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2135 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2137 unlocked_instances = \
2138 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
2141 raise errors.OpPrereqError("Missing lock for nodes: %s" %
2142 utils.CommaJoin(unlocked_nodes),
2145 if unlocked_instances:
2146 raise errors.OpPrereqError("Missing lock for instances: %s" %
2147 utils.CommaJoin(unlocked_instances),
2150 self.all_node_info = self.cfg.GetAllNodesInfo()
2151 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2153 self.my_node_names = utils.NiceSort(group_nodes)
2154 self.my_inst_names = utils.NiceSort(group_instances)
2156 self.my_node_info = dict((name, self.all_node_info[name])
2157 for name in self.my_node_names)
2159 self.my_inst_info = dict((name, self.all_inst_info[name])
2160 for name in self.my_inst_names)
2162 # We detect here the nodes that will need the extra RPC calls for verifying
2163 # split LV volumes; they should be locked.
2164 extra_lv_nodes = set()
2166 for inst in self.my_inst_info.values():
2167 if inst.disk_template in constants.DTS_INT_MIRROR:
2168 for nname in inst.all_nodes:
2169 if self.all_node_info[nname].group != self.group_uuid:
2170 extra_lv_nodes.add(nname)
2172 unlocked_lv_nodes = \
2173 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2175 if unlocked_lv_nodes:
2176 raise errors.OpPrereqError("Missing node locks for LV check: %s" %
2177 utils.CommaJoin(unlocked_lv_nodes),
2179 self.extra_lv_nodes = list(extra_lv_nodes)
2181 def _VerifyNode(self, ninfo, nresult):
2182 """Perform some basic validation on data returned from a node.
2184 - check the result data structure is well formed and has all the
2186 - check ganeti version
2188 @type ninfo: L{objects.Node}
2189 @param ninfo: the node to check
2190 @param nresult: the results from the node
2192 @return: whether overall this call was successful (and we can expect
2193 reasonable values in the respose)
2197 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2199 # main result, nresult should be a non-empty dict
2200 test = not nresult or not isinstance(nresult, dict)
2201 _ErrorIf(test, constants.CV_ENODERPC, node,
2202 "unable to verify node: no data returned")
2206 # compares ganeti version
2207 local_version = constants.PROTOCOL_VERSION
2208 remote_version = nresult.get("version", None)
2209 test = not (remote_version and
2210 isinstance(remote_version, (list, tuple)) and
2211 len(remote_version) == 2)
2212 _ErrorIf(test, constants.CV_ENODERPC, node,
2213 "connection to node returned invalid data")
2217 test = local_version != remote_version[0]
2218 _ErrorIf(test, constants.CV_ENODEVERSION, node,
2219 "incompatible protocol versions: master %s,"
2220 " node %s", local_version, remote_version[0])
2224 # node seems compatible, we can actually try to look into its results
2226 # full package version
2227 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
2228 constants.CV_ENODEVERSION, node,
2229 "software version mismatch: master %s, node %s",
2230 constants.RELEASE_VERSION, remote_version[1],
2231 code=self.ETYPE_WARNING)
2233 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
2234 if ninfo.vm_capable and isinstance(hyp_result, dict):
2235 for hv_name, hv_result in hyp_result.iteritems():
2236 test = hv_result is not None
2237 _ErrorIf(test, constants.CV_ENODEHV, node,
2238 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
2240 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
2241 if ninfo.vm_capable and isinstance(hvp_result, list):
2242 for item, hv_name, hv_result in hvp_result:
2243 _ErrorIf(True, constants.CV_ENODEHV, node,
2244 "hypervisor %s parameter verify failure (source %s): %s",
2245 hv_name, item, hv_result)
2247 test = nresult.get(constants.NV_NODESETUP,
2248 ["Missing NODESETUP results"])
2249 _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
2254 def _VerifyNodeTime(self, ninfo, nresult,
2255 nvinfo_starttime, nvinfo_endtime):
2256 """Check the node time.
2258 @type ninfo: L{objects.Node}
2259 @param ninfo: the node to check
2260 @param nresult: the remote results for the node
2261 @param nvinfo_starttime: the start time of the RPC call
2262 @param nvinfo_endtime: the end time of the RPC call
2266 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2268 ntime = nresult.get(constants.NV_TIME, None)
2270 ntime_merged = utils.MergeTime(ntime)
2271 except (ValueError, TypeError):
2272 _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
2275 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
2276 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
2277 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
2278 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
2282 _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
2283 "Node time diverges by at least %s from master node time",
2286 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
2287 """Check the node LVM results.
2289 @type ninfo: L{objects.Node}
2290 @param ninfo: the node to check
2291 @param nresult: the remote results for the node
2292 @param vg_name: the configured VG name
2299 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2301 # checks vg existence and size > 20G
2302 vglist = nresult.get(constants.NV_VGLIST, None)
2304 _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
2306 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
2307 constants.MIN_VG_SIZE)
2308 _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
2311 pvlist = nresult.get(constants.NV_PVLIST, None)
2312 test = pvlist is None
2313 _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
2315 # check that ':' is not present in PV names, since it's a
2316 # special character for lvcreate (denotes the range of PEs to
2318 for _, pvname, owner_vg in pvlist:
2319 test = ":" in pvname
2320 _ErrorIf(test, constants.CV_ENODELVM, node,
2321 "Invalid character ':' in PV '%s' of VG '%s'",
2324 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
2325 """Check the node bridges.
2327 @type ninfo: L{objects.Node}
2328 @param ninfo: the node to check
2329 @param nresult: the remote results for the node
2330 @param bridges: the expected list of bridges
2337 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2339 missing = nresult.get(constants.NV_BRIDGES, None)
2340 test = not isinstance(missing, list)
2341 _ErrorIf(test, constants.CV_ENODENET, node,
2342 "did not return valid bridge information")
2344 _ErrorIf(bool(missing), constants.CV_ENODENET, node,
2345 "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
2347 def _VerifyNodeUserScripts(self, ninfo, nresult):
2348 """Check the results of user scripts presence and executability on the node
2350 @type ninfo: L{objects.Node}
2351 @param ninfo: the node to check
2352 @param nresult: the remote results for the node
2357 test = not constants.NV_USERSCRIPTS in nresult
2358 self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
2359 "did not return user scripts information")
2361 broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
2363 self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
2364 "user scripts not present or not executable: %s" %
2365 utils.CommaJoin(sorted(broken_scripts)))
2367 def _VerifyNodeNetwork(self, ninfo, nresult):
2368 """Check the node network connectivity results.
2370 @type ninfo: L{objects.Node}
2371 @param ninfo: the node to check
2372 @param nresult: the remote results for the node
2376 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2378 test = constants.NV_NODELIST not in nresult
2379 _ErrorIf(test, constants.CV_ENODESSH, node,
2380 "node hasn't returned node ssh connectivity data")
2382 if nresult[constants.NV_NODELIST]:
2383 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
2384 _ErrorIf(True, constants.CV_ENODESSH, node,
2385 "ssh communication with node '%s': %s", a_node, a_msg)
2387 test = constants.NV_NODENETTEST not in nresult
2388 _ErrorIf(test, constants.CV_ENODENET, node,
2389 "node hasn't returned node tcp connectivity data")
2391 if nresult[constants.NV_NODENETTEST]:
2392 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
2394 _ErrorIf(True, constants.CV_ENODENET, node,
2395 "tcp communication with node '%s': %s",
2396 anode, nresult[constants.NV_NODENETTEST][anode])
2398 test = constants.NV_MASTERIP not in nresult
2399 _ErrorIf(test, constants.CV_ENODENET, node,
2400 "node hasn't returned node master IP reachability data")
2402 if not nresult[constants.NV_MASTERIP]:
2403 if node == self.master_node:
2404 msg = "the master node cannot reach the master IP (not configured?)"
2406 msg = "cannot reach the master IP"
2407 _ErrorIf(True, constants.CV_ENODENET, node, msg)
2409 def _VerifyInstance(self, instance, instanceconfig, node_image,
2411 """Verify an instance.
2413 This function checks to see if the required block devices are
2414 available on the instance's node.
2417 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2418 node_current = instanceconfig.primary_node
2420 node_vol_should = {}
2421 instanceconfig.MapLVsByNode(node_vol_should)
2423 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
2424 err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
2425 _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
2427 for node in node_vol_should:
2428 n_img = node_image[node]
2429 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2430 # ignore missing volumes on offline or broken nodes
2432 for volume in node_vol_should[node]:
2433 test = volume not in n_img.volumes
2434 _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2435 "volume %s missing on node %s", volume, node)
2437 if instanceconfig.admin_state == constants.ADMINST_UP:
2438 pri_img = node_image[node_current]
2439 test = instance not in pri_img.instances and not pri_img.offline
2440 _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
2441 "instance not running on its primary node %s",
2444 diskdata = [(nname, success, status, idx)
2445 for (nname, disks) in diskstatus.items()
2446 for idx, (success, status) in enumerate(disks)]
2448 for nname, success, bdev_status, idx in diskdata:
2449 # the 'ghost node' construction in Exec() ensures that we have a
2451 snode = node_image[nname]
2452 bad_snode = snode.ghost or snode.offline
2453 _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
2454 not success and not bad_snode,
2455 constants.CV_EINSTANCEFAULTYDISK, instance,
2456 "couldn't retrieve status for disk/%s on %s: %s",
2457 idx, nname, bdev_status)
2458 _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
2459 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
2460 constants.CV_EINSTANCEFAULTYDISK, instance,
2461 "disk/%s on %s is faulty", idx, nname)
2463 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2464 """Verify if there are any unknown volumes in the cluster.
2466 The .os, .swap and backup volumes are ignored. All other volumes are
2467 reported as unknown.
2469 @type reserved: L{ganeti.utils.FieldSet}
2470 @param reserved: a FieldSet of reserved volume names
2473 for node, n_img in node_image.items():
2474 if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2475 self.all_node_info[node].group != self.group_uuid):
2476 # skip non-healthy nodes
2478 for volume in n_img.volumes:
2479 test = ((node not in node_vol_should or
2480 volume not in node_vol_should[node]) and
2481 not reserved.Matches(volume))
2482 self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
2483 "volume %s is unknown", volume)
2485 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2486 """Verify N+1 Memory Resilience.
2488 Check that if one single node dies we can still start all the
2489 instances it was primary for.
2492 cluster_info = self.cfg.GetClusterInfo()
2493 for node, n_img in node_image.items():
2494 # This code checks that every node which is now listed as
2495 # secondary has enough memory to host all instances it is
2496 # supposed to should a single other node in the cluster fail.
2497 # FIXME: not ready for failover to an arbitrary node
2498 # FIXME: does not support file-backed instances
2499 # WARNING: we currently take into account down instances as well
2500 # as up ones, considering that even if they're down someone
2501 # might want to start them even in the event of a node failure.
2502 if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2503 # we're skipping nodes marked offline and nodes in other groups from
2504 # the N+1 warning, since most likely we don't have good memory
2505 # infromation from them; we already list instances living on such
2506 # nodes, and that's enough warning
2508 #TODO(dynmem): also consider ballooning out other instances
2509 for prinode, instances in n_img.sbp.items():
2511 for instance in instances:
2512 bep = cluster_info.FillBE(instance_cfg[instance])
2513 if bep[constants.BE_AUTO_BALANCE]:
2514 needed_mem += bep[constants.BE_MINMEM]
2515 test = n_img.mfree < needed_mem
2516 self._ErrorIf(test, constants.CV_ENODEN1, node,
2517 "not enough memory to accomodate instance failovers"
2518 " should node %s fail (%dMiB needed, %dMiB available)",
2519 prinode, needed_mem, n_img.mfree)
2522 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2523 (files_all, files_opt, files_mc, files_vm)):
2524 """Verifies file checksums collected from all nodes.
2526 @param errorif: Callback for reporting errors
2527 @param nodeinfo: List of L{objects.Node} objects
2528 @param master_node: Name of master node
2529 @param all_nvinfo: RPC results
2532 # Define functions determining which nodes to consider for a file
2535 (files_mc, lambda node: (node.master_candidate or
2536 node.name == master_node)),
2537 (files_vm, lambda node: node.vm_capable),
2540 # Build mapping from filename to list of nodes which should have the file
2542 for (files, fn) in files2nodefn:
2544 filenodes = nodeinfo
2546 filenodes = filter(fn, nodeinfo)
2547 nodefiles.update((filename,
2548 frozenset(map(operator.attrgetter("name"), filenodes)))
2549 for filename in files)
2551 assert set(nodefiles) == (files_all | files_mc | files_vm)
2553 fileinfo = dict((filename, {}) for filename in nodefiles)
2554 ignore_nodes = set()
2556 for node in nodeinfo:
2558 ignore_nodes.add(node.name)
2561 nresult = all_nvinfo[node.name]
2563 if nresult.fail_msg or not nresult.payload:
2566 node_files = nresult.payload.get(constants.NV_FILELIST, None)
2568 test = not (node_files and isinstance(node_files, dict))
2569 errorif(test, constants.CV_ENODEFILECHECK, node.name,
2570 "Node did not return file checksum data")
2572 ignore_nodes.add(node.name)
2575 # Build per-checksum mapping from filename to nodes having it
2576 for (filename, checksum) in node_files.items():
2577 assert filename in nodefiles
2578 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2580 for (filename, checksums) in fileinfo.items():
2581 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2583 # Nodes having the file
2584 with_file = frozenset(node_name
2585 for nodes in fileinfo[filename].values()
2586 for node_name in nodes) - ignore_nodes
2588 expected_nodes = nodefiles[filename] - ignore_nodes
2590 # Nodes missing file
2591 missing_file = expected_nodes - with_file
2593 if filename in files_opt:
2595 errorif(missing_file and missing_file != expected_nodes,
2596 constants.CV_ECLUSTERFILECHECK, None,
2597 "File %s is optional, but it must exist on all or no"
2598 " nodes (not found on %s)",
2599 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2601 errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
2602 "File %s is missing from node(s) %s", filename,
2603 utils.CommaJoin(utils.NiceSort(missing_file)))
2605 # Warn if a node has a file it shouldn't
2606 unexpected = with_file - expected_nodes
2608 constants.CV_ECLUSTERFILECHECK, None,
2609 "File %s should not exist on node(s) %s",
2610 filename, utils.CommaJoin(utils.NiceSort(unexpected)))
2612 # See if there are multiple versions of the file
2613 test = len(checksums) > 1
2615 variants = ["variant %s on %s" %
2616 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2617 for (idx, (checksum, nodes)) in
2618 enumerate(sorted(checksums.items()))]
2622 errorif(test, constants.CV_ECLUSTERFILECHECK, None,
2623 "File %s found with %s different checksums (%s)",
2624 filename, len(checksums), "; ".join(variants))
2626 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2628 """Verifies and the node DRBD status.
2630 @type ninfo: L{objects.Node}
2631 @param ninfo: the node to check
2632 @param nresult: the remote results for the node
2633 @param instanceinfo: the dict of instances
2634 @param drbd_helper: the configured DRBD usermode helper
2635 @param drbd_map: the DRBD map as returned by
2636 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2640 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2643 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2644 test = (helper_result == None)
2645 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2646 "no drbd usermode helper returned")
2648 status, payload = helper_result
2650 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2651 "drbd usermode helper check unsuccessful: %s", payload)
2652 test = status and (payload != drbd_helper)
2653 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2654 "wrong drbd usermode helper: %s", payload)
2656 # compute the DRBD minors
2658 for minor, instance in drbd_map[node].items():
2659 test = instance not in instanceinfo
2660 _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2661 "ghost instance '%s' in temporary DRBD map", instance)
2662 # ghost instance should not be running, but otherwise we
2663 # don't give double warnings (both ghost instance and
2664 # unallocated minor in use)
2666 node_drbd[minor] = (instance, False)
2668 instance = instanceinfo[instance]
2669 node_drbd[minor] = (instance.name,
2670 instance.admin_state == constants.ADMINST_UP)
2672 # and now check them
2673 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2674 test = not isinstance(used_minors, (tuple, list))
2675 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2676 "cannot parse drbd status file: %s", str(used_minors))
2678 # we cannot check drbd status
2681 for minor, (iname, must_exist) in node_drbd.items():
2682 test = minor not in used_minors and must_exist
2683 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2684 "drbd minor %d of instance %s is not active", minor, iname)
2685 for minor in used_minors:
2686 test = minor not in node_drbd
2687 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2688 "unallocated drbd minor %d is in use", minor)
2690 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2691 """Builds the node OS structures.
2693 @type ninfo: L{objects.Node}
2694 @param ninfo: the node to check
2695 @param nresult: the remote results for the node
2696 @param nimg: the node image object
2700 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2702 remote_os = nresult.get(constants.NV_OSLIST, None)
2703 test = (not isinstance(remote_os, list) or
2704 not compat.all(isinstance(v, list) and len(v) == 7
2705 for v in remote_os))
2707 _ErrorIf(test, constants.CV_ENODEOS, node,
2708 "node hasn't returned valid OS data")
2717 for (name, os_path, status, diagnose,
2718 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2720 if name not in os_dict:
2723 # parameters is a list of lists instead of list of tuples due to
2724 # JSON lacking a real tuple type, fix it:
2725 parameters = [tuple(v) for v in parameters]
2726 os_dict[name].append((os_path, status, diagnose,
2727 set(variants), set(parameters), set(api_ver)))
2729 nimg.oslist = os_dict
2731 def _VerifyNodeOS(self, ninfo, nimg, base):
2732 """Verifies the node OS list.
2734 @type ninfo: L{objects.Node}
2735 @param ninfo: the node to check
2736 @param nimg: the node image object
2737 @param base: the 'template' node we match against (e.g. from the master)
2741 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2743 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2745 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2746 for os_name, os_data in nimg.oslist.items():
2747 assert os_data, "Empty OS status for OS %s?!" % os_name
2748 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2749 _ErrorIf(not f_status, constants.CV_ENODEOS, node,
2750 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2751 _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
2752 "OS '%s' has multiple entries (first one shadows the rest): %s",
2753 os_name, utils.CommaJoin([v[0] for v in os_data]))
2754 # comparisons with the 'base' image
2755 test = os_name not in base.oslist
2756 _ErrorIf(test, constants.CV_ENODEOS, node,
2757 "Extra OS %s not present on reference node (%s)",
2761 assert base.oslist[os_name], "Base node has empty OS status?"
2762 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2764 # base OS is invalid, skipping
2766 for kind, a, b in [("API version", f_api, b_api),
2767 ("variants list", f_var, b_var),
2768 ("parameters", beautify_params(f_param),
2769 beautify_params(b_param))]:
2770 _ErrorIf(a != b, constants.CV_ENODEOS, node,
2771 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2772 kind, os_name, base.name,
2773 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2775 # check any missing OSes
2776 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2777 _ErrorIf(missing, constants.CV_ENODEOS, node,
2778 "OSes present on reference node %s but missing on this node: %s",
2779 base.name, utils.CommaJoin(missing))
2781 def _VerifyOob(self, ninfo, nresult):
2782 """Verifies out of band functionality of a node.
2784 @type ninfo: L{objects.Node}
2785 @param ninfo: the node to check
2786 @param nresult: the remote results for the node
2790 # We just have to verify the paths on master and/or master candidates
2791 # as the oob helper is invoked on the master
2792 if ((ninfo.master_candidate or ninfo.master_capable) and
2793 constants.NV_OOB_PATHS in nresult):
2794 for path_result in nresult[constants.NV_OOB_PATHS]:
2795 self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
2797 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2798 """Verifies and updates the node volume data.
2800 This function will update a L{NodeImage}'s internal structures
2801 with data from the remote call.
2803 @type ninfo: L{objects.Node}
2804 @param ninfo: the node to check
2805 @param nresult: the remote results for the node
2806 @param nimg: the node image object
2807 @param vg_name: the configured VG name
2811 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2813 nimg.lvm_fail = True
2814 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2817 elif isinstance(lvdata, basestring):
2818 _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
2819 utils.SafeEncode(lvdata))
2820 elif not isinstance(lvdata, dict):
2821 _ErrorIf(True, constants.CV_ENODELVM, node,
2822 "rpc call to node failed (lvlist)")
2824 nimg.volumes = lvdata
2825 nimg.lvm_fail = False
2827 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2828 """Verifies and updates the node instance list.
2830 If the listing was successful, then updates this node's instance
2831 list. Otherwise, it marks the RPC call as failed for the instance
2834 @type ninfo: L{objects.Node}
2835 @param ninfo: the node to check
2836 @param nresult: the remote results for the node
2837 @param nimg: the node image object
2840 idata = nresult.get(constants.NV_INSTANCELIST, None)
2841 test = not isinstance(idata, list)
2842 self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2843 "rpc call to node failed (instancelist): %s",
2844 utils.SafeEncode(str(idata)))
2846 nimg.hyp_fail = True
2848 nimg.instances = idata
2850 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2851 """Verifies and computes a node information map
2853 @type ninfo: L{objects.Node}
2854 @param ninfo: the node to check
2855 @param nresult: the remote results for the node
2856 @param nimg: the node image object
2857 @param vg_name: the configured VG name
2861 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2863 # try to read free memory (from the hypervisor)
2864 hv_info = nresult.get(constants.NV_HVINFO, None)
2865 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2866 _ErrorIf(test, constants.CV_ENODEHV, node,
2867 "rpc call to node failed (hvinfo)")
2870 nimg.mfree = int(hv_info["memory_free"])
2871 except (ValueError, TypeError):
2872 _ErrorIf(True, constants.CV_ENODERPC, node,
2873 "node returned invalid nodeinfo, check hypervisor")
2875 # FIXME: devise a free space model for file based instances as well
2876 if vg_name is not None:
2877 test = (constants.NV_VGLIST not in nresult or
2878 vg_name not in nresult[constants.NV_VGLIST])
2879 _ErrorIf(test, constants.CV_ENODELVM, node,
2880 "node didn't return data for the volume group '%s'"
2881 " - it is either missing or broken", vg_name)
2884 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2885 except (ValueError, TypeError):
2886 _ErrorIf(True, constants.CV_ENODERPC, node,
2887 "node returned invalid LVM info, check LVM status")
2889 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2890 """Gets per-disk status information for all instances.
2892 @type nodelist: list of strings
2893 @param nodelist: Node names
2894 @type node_image: dict of (name, L{objects.Node})
2895 @param node_image: Node objects
2896 @type instanceinfo: dict of (name, L{objects.Instance})
2897 @param instanceinfo: Instance objects
2898 @rtype: {instance: {node: [(succes, payload)]}}
2899 @return: a dictionary of per-instance dictionaries with nodes as
2900 keys and disk information as values; the disk information is a
2901 list of tuples (success, payload)
2904 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2907 node_disks_devonly = {}
2908 diskless_instances = set()
2909 diskless = constants.DT_DISKLESS
2911 for nname in nodelist:
2912 node_instances = list(itertools.chain(node_image[nname].pinst,
2913 node_image[nname].sinst))
2914 diskless_instances.update(inst for inst in node_instances
2915 if instanceinfo[inst].disk_template == diskless)
2916 disks = [(inst, disk)
2917 for inst in node_instances
2918 for disk in instanceinfo[inst].disks]
2921 # No need to collect data
2924 node_disks[nname] = disks
2926 # _AnnotateDiskParams makes already copies of the disks
2928 for (inst, dev) in disks:
2929 (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
2930 self.cfg.SetDiskID(anno_disk, nname)
2931 devonly.append(anno_disk)
2933 node_disks_devonly[nname] = devonly
2935 assert len(node_disks) == len(node_disks_devonly)
2937 # Collect data from all nodes with disks
2938 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2941 assert len(result) == len(node_disks)
2945 for (nname, nres) in result.items():
2946 disks = node_disks[nname]
2949 # No data from this node
2950 data = len(disks) * [(False, "node offline")]
2953 _ErrorIf(msg, constants.CV_ENODERPC, nname,
2954 "while getting disk information: %s", msg)
2956 # No data from this node
2957 data = len(disks) * [(False, msg)]
2960 for idx, i in enumerate(nres.payload):
2961 if isinstance(i, (tuple, list)) and len(i) == 2:
2964 logging.warning("Invalid result from node %s, entry %d: %s",
2966 data.append((False, "Invalid result from the remote node"))
2968 for ((inst, _), status) in zip(disks, data):
2969 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2971 # Add empty entries for diskless instances.
2972 for inst in diskless_instances:
2973 assert inst not in instdisk
2976 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2977 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2978 compat.all(isinstance(s, (tuple, list)) and
2979 len(s) == 2 for s in statuses)
2980 for inst, nnames in instdisk.items()
2981 for nname, statuses in nnames.items())
2982 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2987 def _SshNodeSelector(group_uuid, all_nodes):
2988 """Create endless iterators for all potential SSH check hosts.
2991 nodes = [node for node in all_nodes
2992 if (node.group != group_uuid and
2994 keyfunc = operator.attrgetter("group")
2996 return map(itertools.cycle,
2997 [sorted(map(operator.attrgetter("name"), names))
2998 for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
3002 def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
3003 """Choose which nodes should talk to which other nodes.
3005 We will make nodes contact all nodes in their group, and one node from
3008 @warning: This algorithm has a known issue if one node group is much
3009 smaller than others (e.g. just one node). In such a case all other
3010 nodes will talk to the single node.
3013 online_nodes = sorted(node.name for node in group_nodes if not node.offline)
3014 sel = cls._SshNodeSelector(group_uuid, all_nodes)
3016 return (online_nodes,
3017 dict((name, sorted([i.next() for i in sel]))
3018 for name in online_nodes))
3020 def BuildHooksEnv(self):
3023 Cluster-Verify hooks just ran in the post phase and their failure makes
3024 the output be logged in the verify output and the verification to fail.
3028 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
3031 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
3032 for node in self.my_node_info.values())
3036 def BuildHooksNodes(self):
3037 """Build hooks nodes.
3040 return ([], self.my_node_names)
3042 def Exec(self, feedback_fn):
3043 """Verify integrity of the node group, performing various test on nodes.
3046 # This method has too many local variables. pylint: disable=R0914
3047 feedback_fn("* Verifying group '%s'" % self.group_info.name)
3049 if not self.my_node_names:
3051 feedback_fn("* Empty node group, skipping verification")
3055 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3056 verbose = self.op.verbose
3057 self._feedback_fn = feedback_fn
3059 vg_name = self.cfg.GetVGName()
3060 drbd_helper = self.cfg.GetDRBDHelper()
3061 cluster = self.cfg.GetClusterInfo()
3062 groupinfo = self.cfg.GetAllNodeGroupsInfo()
3063 hypervisors = cluster.enabled_hypervisors
3064 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
3066 i_non_redundant = [] # Non redundant instances
3067 i_non_a_balanced = [] # Non auto-balanced instances
3068 i_offline = 0 # Count of offline instances
3069 n_offline = 0 # Count of offline nodes
3070 n_drained = 0 # Count of nodes being drained
3071 node_vol_should = {}
3073 # FIXME: verify OS list
3076 filemap = _ComputeAncillaryFiles(cluster, False)
3078 # do local checksums
3079 master_node = self.master_node = self.cfg.GetMasterNode()
3080 master_ip = self.cfg.GetMasterIP()
3082 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
3085 if self.cfg.GetUseExternalMipScript():
3086 user_scripts.append(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
3088 node_verify_param = {
3089 constants.NV_FILELIST:
3090 utils.UniqueSequence(filename
3091 for files in filemap
3092 for filename in files),
3093 constants.NV_NODELIST:
3094 self._SelectSshCheckNodes(node_data_list, self.group_uuid,
3095 self.all_node_info.values()),
3096 constants.NV_HYPERVISOR: hypervisors,
3097 constants.NV_HVPARAMS:
3098 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
3099 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
3100 for node in node_data_list
3101 if not node.offline],
3102 constants.NV_INSTANCELIST: hypervisors,
3103 constants.NV_VERSION: None,
3104 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
3105 constants.NV_NODESETUP: None,
3106 constants.NV_TIME: None,
3107 constants.NV_MASTERIP: (master_node, master_ip),
3108 constants.NV_OSLIST: None,
3109 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
3110 constants.NV_USERSCRIPTS: user_scripts,
3113 if vg_name is not None:
3114 node_verify_param[constants.NV_VGLIST] = None
3115 node_verify_param[constants.NV_LVLIST] = vg_name
3116 node_verify_param[constants.NV_PVLIST] = [vg_name]
3117 node_verify_param[constants.NV_DRBDLIST] = None
3120 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
3123 # FIXME: this needs to be changed per node-group, not cluster-wide
3125 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
3126 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3127 bridges.add(default_nicpp[constants.NIC_LINK])
3128 for instance in self.my_inst_info.values():
3129 for nic in instance.nics:
3130 full_nic = cluster.SimpleFillNIC(nic.nicparams)
3131 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3132 bridges.add(full_nic[constants.NIC_LINK])
3135 node_verify_param[constants.NV_BRIDGES] = list(bridges)
3137 # Build our expected cluster state
3138 node_image = dict((node.name, self.NodeImage(offline=node.offline,
3140 vm_capable=node.vm_capable))
3141 for node in node_data_list)
3145 for node in self.all_node_info.values():
3146 path = _SupportsOob(self.cfg, node)
3147 if path and path not in oob_paths:
3148 oob_paths.append(path)
3151 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
3153 for instance in self.my_inst_names:
3154 inst_config = self.my_inst_info[instance]
3155 if inst_config.admin_state == constants.ADMINST_OFFLINE:
3158 for nname in inst_config.all_nodes:
3159 if nname not in node_image:
3160 gnode = self.NodeImage(name=nname)
3161 gnode.ghost = (nname not in self.all_node_info)
3162 node_image[nname] = gnode
3164 inst_config.MapLVsByNode(node_vol_should)
3166 pnode = inst_config.primary_node
3167 node_image[pnode].pinst.append(instance)
3169 for snode in inst_config.secondary_nodes:
3170 nimg = node_image[snode]
3171 nimg.sinst.append(instance)
3172 if pnode not in nimg.sbp:
3173 nimg.sbp[pnode] = []
3174 nimg.sbp[pnode].append(instance)
3176 # At this point, we have the in-memory data structures complete,
3177 # except for the runtime information, which we'll gather next
3179 # Due to the way our RPC system works, exact response times cannot be
3180 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
3181 # time before and after executing the request, we can at least have a time
3183 nvinfo_starttime = time.time()
3184 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
3186 self.cfg.GetClusterName())
3187 nvinfo_endtime = time.time()
3189 if self.extra_lv_nodes and vg_name is not None:
3191 self.rpc.call_node_verify(self.extra_lv_nodes,
3192 {constants.NV_LVLIST: vg_name},
3193 self.cfg.GetClusterName())
3195 extra_lv_nvinfo = {}
3197 all_drbd_map = self.cfg.ComputeDRBDMap()
3199 feedback_fn("* Gathering disk information (%s nodes)" %
3200 len(self.my_node_names))
3201 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
3204 feedback_fn("* Verifying configuration file consistency")
3206 # If not all nodes are being checked, we need to make sure the master node
3207 # and a non-checked vm_capable node are in the list.
3208 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
3210 vf_nvinfo = all_nvinfo.copy()
3211 vf_node_info = list(self.my_node_info.values())
3212 additional_nodes = []
3213 if master_node not in self.my_node_info:
3214 additional_nodes.append(master_node)
3215 vf_node_info.append(self.all_node_info[master_node])
3216 # Add the first vm_capable node we find which is not included,
3217 # excluding the master node (which we already have)
3218 for node in absent_nodes:
3219 nodeinfo = self.all_node_info[node]
3220 if (nodeinfo.vm_capable and not nodeinfo.offline and
3221 node != master_node):
3222 additional_nodes.append(node)
3223 vf_node_info.append(self.all_node_info[node])
3225 key = constants.NV_FILELIST
3226 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
3227 {key: node_verify_param[key]},
3228 self.cfg.GetClusterName()))
3230 vf_nvinfo = all_nvinfo
3231 vf_node_info = self.my_node_info.values()
3233 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
3235 feedback_fn("* Verifying node status")
3239 for node_i in node_data_list:
3241 nimg = node_image[node]
3245 feedback_fn("* Skipping offline node %s" % (node,))
3249 if node == master_node:
3251 elif node_i.master_candidate:
3252 ntype = "master candidate"
3253 elif node_i.drained:
3259 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
3261 msg = all_nvinfo[node].fail_msg
3262 _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
3265 nimg.rpc_fail = True
3268 nresult = all_nvinfo[node].payload
3270 nimg.call_ok = self._VerifyNode(node_i, nresult)
3271 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
3272 self._VerifyNodeNetwork(node_i, nresult)
3273 self._VerifyNodeUserScripts(node_i, nresult)
3274 self._VerifyOob(node_i, nresult)
3277 self._VerifyNodeLVM(node_i, nresult, vg_name)
3278 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
3281 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
3282 self._UpdateNodeInstances(node_i, nresult, nimg)
3283 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
3284 self._UpdateNodeOS(node_i, nresult, nimg)
3286 if not nimg.os_fail:
3287 if refos_img is None:
3289 self._VerifyNodeOS(node_i, nimg, refos_img)
3290 self._VerifyNodeBridges(node_i, nresult, bridges)
3292 # Check whether all running instancies are primary for the node. (This
3293 # can no longer be done from _VerifyInstance below, since some of the
3294 # wrong instances could be from other node groups.)
3295 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
3297 for inst in non_primary_inst:
3298 test = inst in self.all_inst_info
3299 _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
3300 "instance should not run on node %s", node_i.name)
3301 _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
3302 "node is running unknown instance %s", inst)
3304 for node, result in extra_lv_nvinfo.items():
3305 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
3306 node_image[node], vg_name)
3308 feedback_fn("* Verifying instance status")
3309 for instance in self.my_inst_names:
3311 feedback_fn("* Verifying instance %s" % instance)
3312 inst_config = self.my_inst_info[instance]
3313 self._VerifyInstance(instance, inst_config, node_image,
3315 inst_nodes_offline = []
3317 pnode = inst_config.primary_node
3318 pnode_img = node_image[pnode]
3319 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
3320 constants.CV_ENODERPC, pnode, "instance %s, connection to"
3321 " primary node failed", instance)
3323 _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
3325 constants.CV_EINSTANCEBADNODE, instance,
3326 "instance is marked as running and lives on offline node %s",
3327 inst_config.primary_node)
3329 # If the instance is non-redundant we cannot survive losing its primary
3330 # node, so we are not N+1 compliant. On the other hand we have no disk
3331 # templates with more than one secondary so that situation is not well
3333 # FIXME: does not support file-backed instances
3334 if not inst_config.secondary_nodes:
3335 i_non_redundant.append(instance)
3337 _ErrorIf(len(inst_config.secondary_nodes) > 1,
3338 constants.CV_EINSTANCELAYOUT,
3339 instance, "instance has multiple secondary nodes: %s",
3340 utils.CommaJoin(inst_config.secondary_nodes),
3341 code=self.ETYPE_WARNING)
3343 if inst_config.disk_template in constants.DTS_INT_MIRROR:
3344 pnode = inst_config.primary_node
3345 instance_nodes = utils.NiceSort(inst_config.all_nodes)
3346 instance_groups = {}
3348 for node in instance_nodes:
3349 instance_groups.setdefault(self.all_node_info[node].group,
3353 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
3354 # Sort so that we always list the primary node first.
3355 for group, nodes in sorted(instance_groups.items(),
3356 key=lambda (_, nodes): pnode in nodes,
3359 self._ErrorIf(len(instance_groups) > 1,
3360 constants.CV_EINSTANCESPLITGROUPS,
3361 instance, "instance has primary and secondary nodes in"
3362 " different groups: %s", utils.CommaJoin(pretty_list),
3363 code=self.ETYPE_WARNING)
3365 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
3366 i_non_a_balanced.append(instance)
3368 for snode in inst_config.secondary_nodes:
3369 s_img = node_image[snode]
3370 _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
3371 snode, "instance %s, connection to secondary node failed",
3375 inst_nodes_offline.append(snode)
3377 # warn that the instance lives on offline nodes
3378 _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
3379 "instance has offline secondary node(s) %s",
3380 utils.CommaJoin(inst_nodes_offline))
3381 # ... or ghost/non-vm_capable nodes
3382 for node in inst_config.all_nodes:
3383 _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
3384 instance, "instance lives on ghost node %s", node)
3385 _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
3386 instance, "instance lives on non-vm_capable node %s", node)
3388 feedback_fn("* Verifying orphan volumes")
3389 reserved = utils.FieldSet(*cluster.reserved_lvs)
3391 # We will get spurious "unknown volume" warnings if any node of this group
3392 # is secondary for an instance whose primary is in another group. To avoid
3393 # them, we find these instances and add their volumes to node_vol_should.
3394 for inst in self.all_inst_info.values():
3395 for secondary in inst.secondary_nodes:
3396 if (secondary in self.my_node_info
3397 and inst.name not in self.my_inst_info):
3398 inst.MapLVsByNode(node_vol_should)
3401 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
3403 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
3404 feedback_fn("* Verifying N+1 Memory redundancy")
3405 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
3407 feedback_fn("* Other Notes")
3409 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
3410 % len(i_non_redundant))
3412 if i_non_a_balanced:
3413 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
3414 % len(i_non_a_balanced))
3417 feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
3420 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
3423 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
3427 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
3428 """Analyze the post-hooks' result
3430 This method analyses the hook result, handles it, and sends some
3431 nicely-formatted feedback back to the user.
3433 @param phase: one of L{constants.HOOKS_PHASE_POST} or
3434 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
3435 @param hooks_results: the results of the multi-node hooks rpc call
3436 @param feedback_fn: function used send feedback back to the caller
3437 @param lu_result: previous Exec result
3438 @return: the new Exec result, based on the previous result
3442 # We only really run POST phase hooks, only for non-empty groups,
3443 # and are only interested in their results
3444 if not self.my_node_names:
3447 elif phase == constants.HOOKS_PHASE_POST:
3448 # Used to change hooks' output to proper indentation
3449 feedback_fn("* Hooks Results")
3450 assert hooks_results, "invalid result from hooks"
3452 for node_name in hooks_results:
3453 res = hooks_results[node_name]
3455 test = msg and not res.offline
3456 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3457 "Communication failure in hooks execution: %s", msg)
3458 if res.offline or msg:
3459 # No need to investigate payload if node is offline or gave
3462 for script, hkr, output in res.payload:
3463 test = hkr == constants.HKR_FAIL
3464 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3465 "Script %s failed, output:", script)
3467 output = self._HOOKS_INDENT_RE.sub(" ", output)
3468 feedback_fn("%s" % output)
3474 class LUClusterVerifyDisks(NoHooksLU):
3475 """Verifies the cluster disks status.
3480 def ExpandNames(self):
3481 self.share_locks = _ShareAll()
3482 self.needed_locks = {
3483 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3486 def Exec(self, feedback_fn):
3487 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3489 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3490 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3491 for group in group_names])
3494 class LUGroupVerifyDisks(NoHooksLU):
3495 """Verifies the status of all disks in a node group.
3500 def ExpandNames(self):
3501 # Raises errors.OpPrereqError on its own if group can't be found
3502 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3504 self.share_locks = _ShareAll()
3505 self.needed_locks = {
3506 locking.LEVEL_INSTANCE: [],
3507 locking.LEVEL_NODEGROUP: [],
3508 locking.LEVEL_NODE: [],
3511 def DeclareLocks(self, level):
3512 if level == locking.LEVEL_INSTANCE:
3513 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3515 # Lock instances optimistically, needs verification once node and group
3516 # locks have been acquired
3517 self.needed_locks[locking.LEVEL_INSTANCE] = \
3518 self.cfg.GetNodeGroupInstances(self.group_uuid)
3520 elif level == locking.LEVEL_NODEGROUP:
3521 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3523 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3524 set([self.group_uuid] +
3525 # Lock all groups used by instances optimistically; this requires
3526 # going via the node before it's locked, requiring verification
3529 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3530 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3532 elif level == locking.LEVEL_NODE:
3533 # This will only lock the nodes in the group to be verified which contain
3535 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3536 self._LockInstancesNodes()
3538 # Lock all nodes in group to be verified
3539 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3540 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3541 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3543 def CheckPrereq(self):
3544 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3545 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3546 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3548 assert self.group_uuid in owned_groups
3550 # Check if locked instances are still correct
3551 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3553 # Get instance information
3554 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3556 # Check if node groups for locked instances are still correct
3557 _CheckInstancesNodeGroups(self.cfg, self.instances,
3558 owned_groups, owned_nodes, self.group_uuid)
3560 def Exec(self, feedback_fn):
3561 """Verify integrity of cluster disks.
3563 @rtype: tuple of three items
3564 @return: a tuple of (dict of node-to-node_error, list of instances
3565 which need activate-disks, dict of instance: (node, volume) for
3570 res_instances = set()
3573 nv_dict = _MapInstanceDisksToNodes([inst
3574 for inst in self.instances.values()
3575 if inst.admin_state == constants.ADMINST_UP])
3578 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3579 set(self.cfg.GetVmCapableNodeList()))
3581 node_lvs = self.rpc.call_lv_list(nodes, [])
3583 for (node, node_res) in node_lvs.items():
3584 if node_res.offline:
3587 msg = node_res.fail_msg
3589 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3590 res_nodes[node] = msg
3593 for lv_name, (_, _, lv_online) in node_res.payload.items():
3594 inst = nv_dict.pop((node, lv_name), None)
3595 if not (lv_online or inst is None):
3596 res_instances.add(inst)
3598 # any leftover items in nv_dict are missing LVs, let's arrange the data
3600 for key, inst in nv_dict.iteritems():
3601 res_missing.setdefault(inst, []).append(list(key))
3603 return (res_nodes, list(res_instances), res_missing)
3606 class LUClusterRepairDiskSizes(NoHooksLU):
3607 """Verifies the cluster disks sizes.
3612 def ExpandNames(self):
3613 if self.op.instances:
3614 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3615 self.needed_locks = {
3616 locking.LEVEL_NODE_RES: [],
3617 locking.LEVEL_INSTANCE: self.wanted_names,
3619 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
3621 self.wanted_names = None
3622 self.needed_locks = {
3623 locking.LEVEL_NODE_RES: locking.ALL_SET,
3624 locking.LEVEL_INSTANCE: locking.ALL_SET,
3626 self.share_locks = {
3627 locking.LEVEL_NODE_RES: 1,
3628 locking.LEVEL_INSTANCE: 0,
3631 def DeclareLocks(self, level):
3632 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
3633 self._LockInstancesNodes(primary_only=True, level=level)
3635 def CheckPrereq(self):
3636 """Check prerequisites.
3638 This only checks the optional instance list against the existing names.
3641 if self.wanted_names is None:
3642 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3644 self.wanted_instances = \
3645 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3647 def _EnsureChildSizes(self, disk):
3648 """Ensure children of the disk have the needed disk size.
3650 This is valid mainly for DRBD8 and fixes an issue where the
3651 children have smaller disk size.
3653 @param disk: an L{ganeti.objects.Disk} object
3656 if disk.dev_type == constants.LD_DRBD8:
3657 assert disk.children, "Empty children for DRBD8?"
3658 fchild = disk.children[0]
3659 mismatch = fchild.size < disk.size
3661 self.LogInfo("Child disk has size %d, parent %d, fixing",
3662 fchild.size, disk.size)
3663 fchild.size = disk.size
3665 # and we recurse on this child only, not on the metadev
3666 return self._EnsureChildSizes(fchild) or mismatch
3670 def Exec(self, feedback_fn):
3671 """Verify the size of cluster disks.
3674 # TODO: check child disks too
3675 # TODO: check differences in size between primary/secondary nodes
3677 for instance in self.wanted_instances:
3678 pnode = instance.primary_node
3679 if pnode not in per_node_disks:
3680 per_node_disks[pnode] = []
3681 for idx, disk in enumerate(instance.disks):
3682 per_node_disks[pnode].append((instance, idx, disk))
3684 assert not (frozenset(per_node_disks.keys()) -
3685 self.owned_locks(locking.LEVEL_NODE_RES)), \
3686 "Not owning correct locks"
3687 assert not self.owned_locks(locking.LEVEL_NODE)
3690 for node, dskl in per_node_disks.items():
3691 newl = [v[2].Copy() for v in dskl]
3693 self.cfg.SetDiskID(dsk, node)
3694 result = self.rpc.call_blockdev_getsize(node, newl)
3696 self.LogWarning("Failure in blockdev_getsize call to node"
3697 " %s, ignoring", node)
3699 if len(result.payload) != len(dskl):
3700 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3701 " result.payload=%s", node, len(dskl), result.payload)
3702 self.LogWarning("Invalid result from node %s, ignoring node results",
3705 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3707 self.LogWarning("Disk %d of instance %s did not return size"
3708 " information, ignoring", idx, instance.name)
3710 if not isinstance(size, (int, long)):
3711 self.LogWarning("Disk %d of instance %s did not return valid"
3712 " size information, ignoring", idx, instance.name)
3715 if size != disk.size:
3716 self.LogInfo("Disk %d of instance %s has mismatched size,"
3717 " correcting: recorded %d, actual %d", idx,
3718 instance.name, disk.size, size)
3720 self.cfg.Update(instance, feedback_fn)
3721 changed.append((instance.name, idx, size))
3722 if self._EnsureChildSizes(disk):
3723 self.cfg.Update(instance, feedback_fn)
3724 changed.append((instance.name, idx, disk.size))
3728 class LUClusterRename(LogicalUnit):
3729 """Rename the cluster.
3732 HPATH = "cluster-rename"
3733 HTYPE = constants.HTYPE_CLUSTER
3735 def BuildHooksEnv(self):
3740 "OP_TARGET": self.cfg.GetClusterName(),
3741 "NEW_NAME": self.op.name,
3744 def BuildHooksNodes(self):
3745 """Build hooks nodes.
3748 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3750 def CheckPrereq(self):
3751 """Verify that the passed name is a valid one.
3754 hostname = netutils.GetHostname(name=self.op.name,
3755 family=self.cfg.GetPrimaryIPFamily())
3757 new_name = hostname.name
3758 self.ip = new_ip = hostname.ip
3759 old_name = self.cfg.GetClusterName()
3760 old_ip = self.cfg.GetMasterIP()
3761 if new_name == old_name and new_ip == old_ip:
3762 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3763 " cluster has changed",
3765 if new_ip != old_ip:
3766 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3767 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3768 " reachable on the network" %
3769 new_ip, errors.ECODE_NOTUNIQUE)
3771 self.op.name = new_name
3773 def Exec(self, feedback_fn):
3774 """Rename the cluster.
3777 clustername = self.op.name
3780 # shutdown the master IP
3781 master_params = self.cfg.GetMasterNetworkParameters()
3782 ems = self.cfg.GetUseExternalMipScript()
3783 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
3785 result.Raise("Could not disable the master role")
3788 cluster = self.cfg.GetClusterInfo()
3789 cluster.cluster_name = clustername
3790 cluster.master_ip = new_ip
3791 self.cfg.Update(cluster, feedback_fn)
3793 # update the known hosts file
3794 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3795 node_list = self.cfg.GetOnlineNodeList()
3797 node_list.remove(master_params.name)
3800 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3802 master_params.ip = new_ip
3803 result = self.rpc.call_node_activate_master_ip(master_params.name,
3805 msg = result.fail_msg
3807 self.LogWarning("Could not re-enable the master role on"
3808 " the master, please restart manually: %s", msg)
3813 def _ValidateNetmask(cfg, netmask):
3814 """Checks if a netmask is valid.
3816 @type cfg: L{config.ConfigWriter}
3817 @param cfg: The cluster configuration
3819 @param netmask: the netmask to be verified
3820 @raise errors.OpPrereqError: if the validation fails
3823 ip_family = cfg.GetPrimaryIPFamily()
3825 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
3826 except errors.ProgrammerError:
3827 raise errors.OpPrereqError("Invalid primary ip family: %s." %
3829 if not ipcls.ValidateNetmask(netmask):
3830 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
3834 class LUClusterSetParams(LogicalUnit):
3835 """Change the parameters of the cluster.
3838 HPATH = "cluster-modify"
3839 HTYPE = constants.HTYPE_CLUSTER
3842 def CheckArguments(self):
3846 if self.op.uid_pool:
3847 uidpool.CheckUidPool(self.op.uid_pool)
3849 if self.op.add_uids:
3850 uidpool.CheckUidPool(self.op.add_uids)
3852 if self.op.remove_uids:
3853 uidpool.CheckUidPool(self.op.remove_uids)
3855 if self.op.master_netmask is not None:
3856 _ValidateNetmask(self.cfg, self.op.master_netmask)
3858 if self.op.diskparams:
3859 for dt_params in self.op.diskparams.values():
3860 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
3862 utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
3863 except errors.OpPrereqError, err:
3864 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
3867 def ExpandNames(self):
3868 # FIXME: in the future maybe other cluster params won't require checking on
3869 # all nodes to be modified.
3870 self.needed_locks = {
3871 locking.LEVEL_NODE: locking.ALL_SET,
3872 locking.LEVEL_INSTANCE: locking.ALL_SET,
3873 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3875 self.share_locks = {
3876 locking.LEVEL_NODE: 1,
3877 locking.LEVEL_INSTANCE: 1,
3878 locking.LEVEL_NODEGROUP: 1,
3881 def BuildHooksEnv(self):
3886 "OP_TARGET": self.cfg.GetClusterName(),
3887 "NEW_VG_NAME": self.op.vg_name,
3890 def BuildHooksNodes(self):
3891 """Build hooks nodes.
3894 mn = self.cfg.GetMasterNode()
3897 def CheckPrereq(self):
3898 """Check prerequisites.
3900 This checks whether the given params don't conflict and
3901 if the given volume group is valid.
3904 if self.op.vg_name is not None and not self.op.vg_name:
3905 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3906 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3907 " instances exist", errors.ECODE_INVAL)
3909 if self.op.drbd_helper is not None and not self.op.drbd_helper:
3910 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3911 raise errors.OpPrereqError("Cannot disable drbd helper while"
3912 " drbd-based instances exist",
3915 node_list = self.owned_locks(locking.LEVEL_NODE)
3917 # if vg_name not None, checks given volume group on all nodes
3919 vglist = self.rpc.call_vg_list(node_list)
3920 for node in node_list:
3921 msg = vglist[node].fail_msg
3923 # ignoring down node
3924 self.LogWarning("Error while gathering data on node %s"
3925 " (ignoring node): %s", node, msg)
3927 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3929 constants.MIN_VG_SIZE)
3931 raise errors.OpPrereqError("Error on node '%s': %s" %
3932 (node, vgstatus), errors.ECODE_ENVIRON)
3934 if self.op.drbd_helper:
3935 # checks given drbd helper on all nodes
3936 helpers = self.rpc.call_drbd_helper(node_list)
3937 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3939 self.LogInfo("Not checking drbd helper on offline node %s", node)
3941 msg = helpers[node].fail_msg
3943 raise errors.OpPrereqError("Error checking drbd helper on node"
3944 " '%s': %s" % (node, msg),
3945 errors.ECODE_ENVIRON)
3946 node_helper = helpers[node].payload
3947 if node_helper != self.op.drbd_helper:
3948 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3949 (node, node_helper), errors.ECODE_ENVIRON)
3951 self.cluster = cluster = self.cfg.GetClusterInfo()
3952 # validate params changes
3953 if self.op.beparams:
3954 objects.UpgradeBeParams(self.op.beparams)
3955 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3956 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3958 if self.op.ndparams:
3959 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3960 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3962 # TODO: we need a more general way to handle resetting
3963 # cluster-level parameters to default values
3964 if self.new_ndparams["oob_program"] == "":
3965 self.new_ndparams["oob_program"] = \
3966 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3968 if self.op.hv_state:
3969 new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
3970 self.cluster.hv_state_static)
3971 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
3972 for hv, values in new_hv_state.items())
3974 if self.op.disk_state:
3975 new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
3976 self.cluster.disk_state_static)
3977 self.new_disk_state = \
3978 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
3979 for name, values in svalues.items()))
3980 for storage, svalues in new_disk_state.items())
3983 self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
3986 all_instances = self.cfg.GetAllInstancesInfo().values()
3988 for group in self.cfg.GetAllNodeGroupsInfo().values():
3989 instances = frozenset([inst for inst in all_instances
3990 if compat.any(node in group.members
3991 for node in inst.all_nodes)])
3992 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
3993 new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
3995 new_ipolicy, instances)
3997 violations.update(new)
4000 self.LogWarning("After the ipolicy change the following instances"
4001 " violate them: %s",
4002 utils.CommaJoin(utils.NiceSort(violations)))
4004 if self.op.nicparams:
4005 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
4006 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
4007 objects.NIC.CheckParameterSyntax(self.new_nicparams)
4010 # check all instances for consistency
4011 for instance in self.cfg.GetAllInstancesInfo().values():
4012 for nic_idx, nic in enumerate(instance.nics):
4013 params_copy = copy.deepcopy(nic.nicparams)
4014 params_filled = objects.FillDict(self.new_nicparams, params_copy)
4016 # check parameter syntax
4018 objects.NIC.CheckParameterSyntax(params_filled)
4019 except errors.ConfigurationError, err:
4020 nic_errors.append("Instance %s, nic/%d: %s" %
4021 (instance.name, nic_idx, err))
4023 # if we're moving instances to routed, check that they have an ip
4024 target_mode = params_filled[constants.NIC_MODE]
4025 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
4026 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
4027 " address" % (instance.name, nic_idx))
4029 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
4030 "\n".join(nic_errors))
4032 # hypervisor list/parameters
4033 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
4034 if self.op.hvparams:
4035 for hv_name, hv_dict in self.op.hvparams.items():
4036 if hv_name not in self.new_hvparams:
4037 self.new_hvparams[hv_name] = hv_dict
4039 self.new_hvparams[hv_name].update(hv_dict)
4041 # disk template parameters
4042 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
4043 if self.op.diskparams:
4044 for dt_name, dt_params in self.op.diskparams.items():
4045 if dt_name not in self.op.diskparams:
4046 self.new_diskparams[dt_name] = dt_params
4048 self.new_diskparams[dt_name].update(dt_params)
4050 # os hypervisor parameters
4051 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
4053 for os_name, hvs in self.op.os_hvp.items():
4054 if os_name not in self.new_os_hvp:
4055 self.new_os_hvp[os_name] = hvs
4057 for hv_name, hv_dict in hvs.items():
4058 if hv_name not in self.new_os_hvp[os_name]:
4059 self.new_os_hvp[os_name][hv_name] = hv_dict
4061 self.new_os_hvp[os_name][hv_name].update(hv_dict)
4064 self.new_osp = objects.FillDict(cluster.osparams, {})
4065 if self.op.osparams:
4066 for os_name, osp in self.op.osparams.items():
4067 if os_name not in self.new_osp:
4068 self.new_osp[os_name] = {}
4070 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
4073 if not self.new_osp[os_name]:
4074 # we removed all parameters
4075 del self.new_osp[os_name]
4077 # check the parameter validity (remote check)
4078 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
4079 os_name, self.new_osp[os_name])
4081 # changes to the hypervisor list
4082 if self.op.enabled_hypervisors is not None:
4083 self.hv_list = self.op.enabled_hypervisors
4084 for hv in self.hv_list:
4085 # if the hypervisor doesn't already exist in the cluster
4086 # hvparams, we initialize it to empty, and then (in both
4087 # cases) we make sure to fill the defaults, as we might not
4088 # have a complete defaults list if the hypervisor wasn't
4090 if hv not in new_hvp:
4092 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
4093 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
4095 self.hv_list = cluster.enabled_hypervisors
4097 if self.op.hvparams or self.op.enabled_hypervisors is not None:
4098 # either the enabled list has changed, or the parameters have, validate
4099 for hv_name, hv_params in self.new_hvparams.items():
4100 if ((self.op.hvparams and hv_name in self.op.hvparams) or
4101 (self.op.enabled_hypervisors and
4102 hv_name in self.op.enabled_hypervisors)):
4103 # either this is a new hypervisor, or its parameters have changed
4104 hv_class = hypervisor.GetHypervisor(hv_name)
4105 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4106 hv_class.CheckParameterSyntax(hv_params)
4107 _CheckHVParams(self, node_list, hv_name, hv_params)
4110 # no need to check any newly-enabled hypervisors, since the
4111 # defaults have already been checked in the above code-block
4112 for os_name, os_hvp in self.new_os_hvp.items():
4113 for hv_name, hv_params in os_hvp.items():
4114 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4115 # we need to fill in the new os_hvp on top of the actual hv_p
4116 cluster_defaults = self.new_hvparams.get(hv_name, {})
4117 new_osp = objects.FillDict(cluster_defaults, hv_params)
4118 hv_class = hypervisor.GetHypervisor(hv_name)
4119 hv_class.CheckParameterSyntax(new_osp)
4120 _CheckHVParams(self, node_list, hv_name, new_osp)
4122 if self.op.default_iallocator:
4123 alloc_script = utils.FindFile(self.op.default_iallocator,
4124 constants.IALLOCATOR_SEARCH_PATH,
4126 if alloc_script is None:
4127 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
4128 " specified" % self.op.default_iallocator,
4131 def Exec(self, feedback_fn):
4132 """Change the parameters of the cluster.
4135 if self.op.vg_name is not None:
4136 new_volume = self.op.vg_name
4139 if new_volume != self.cfg.GetVGName():
4140 self.cfg.SetVGName(new_volume)
4142 feedback_fn("Cluster LVM configuration already in desired"
4143 " state, not changing")
4144 if self.op.drbd_helper is not None:
4145 new_helper = self.op.drbd_helper
4148 if new_helper != self.cfg.GetDRBDHelper():
4149 self.cfg.SetDRBDHelper(new_helper)
4151 feedback_fn("Cluster DRBD helper already in desired state,"
4153 if self.op.hvparams:
4154 self.cluster.hvparams = self.new_hvparams
4156 self.cluster.os_hvp = self.new_os_hvp
4157 if self.op.enabled_hypervisors is not None:
4158 self.cluster.hvparams = self.new_hvparams
4159 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
4160 if self.op.beparams:
4161 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
4162 if self.op.nicparams:
4163 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
4165 self.cluster.ipolicy = self.new_ipolicy
4166 if self.op.osparams:
4167 self.cluster.osparams = self.new_osp
4168 if self.op.ndparams:
4169 self.cluster.ndparams = self.new_ndparams
4170 if self.op.diskparams:
4171 self.cluster.diskparams = self.new_diskparams
4172 if self.op.hv_state:
4173 self.cluster.hv_state_static = self.new_hv_state
4174 if self.op.disk_state:
4175 self.cluster.disk_state_static = self.new_disk_state
4177 if self.op.candidate_pool_size is not None:
4178 self.cluster.candidate_pool_size = self.op.candidate_pool_size
4179 # we need to update the pool size here, otherwise the save will fail
4180 _AdjustCandidatePool(self, [])
4182 if self.op.maintain_node_health is not None:
4183 if self.op.maintain_node_health and not constants.ENABLE_CONFD:
4184 feedback_fn("Note: CONFD was disabled at build time, node health"
4185 " maintenance is not useful (still enabling it)")
4186 self.cluster.maintain_node_health = self.op.maintain_node_health
4188 if self.op.prealloc_wipe_disks is not None:
4189 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
4191 if self.op.add_uids is not None:
4192 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
4194 if self.op.remove_uids is not None:
4195 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
4197 if self.op.uid_pool is not None:
4198 self.cluster.uid_pool = self.op.uid_pool
4200 if self.op.default_iallocator is not None:
4201 self.cluster.default_iallocator = self.op.default_iallocator
4203 if self.op.reserved_lvs is not None:
4204 self.cluster.reserved_lvs = self.op.reserved_lvs
4206 if self.op.use_external_mip_script is not None:
4207 self.cluster.use_external_mip_script = self.op.use_external_mip_script
4209 def helper_os(aname, mods, desc):
4211 lst = getattr(self.cluster, aname)
4212 for key, val in mods:
4213 if key == constants.DDM_ADD:
4215 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
4218 elif key == constants.DDM_REMOVE:
4222 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
4224 raise errors.ProgrammerError("Invalid modification '%s'" % key)
4226 if self.op.hidden_os:
4227 helper_os("hidden_os", self.op.hidden_os, "hidden")
4229 if self.op.blacklisted_os:
4230 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
4232 if self.op.master_netdev:
4233 master_params = self.cfg.GetMasterNetworkParameters()
4234 ems = self.cfg.GetUseExternalMipScript()
4235 feedback_fn("Shutting down master ip on the current netdev (%s)" %
4236 self.cluster.master_netdev)
4237 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4239 result.Raise("Could not disable the master ip")
4240 feedback_fn("Changing master_netdev from %s to %s" %
4241 (master_params.netdev, self.op.master_netdev))
4242 self.cluster.master_netdev = self.op.master_netdev
4244 if self.op.master_netmask:
4245 master_params = self.cfg.GetMasterNetworkParameters()
4246 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
4247 result = self.rpc.call_node_change_master_netmask(master_params.name,
4248 master_params.netmask,
4249 self.op.master_netmask,
4251 master_params.netdev)
4253 msg = "Could not change the master IP netmask: %s" % result.fail_msg
4256 self.cluster.master_netmask = self.op.master_netmask
4258 self.cfg.Update(self.cluster, feedback_fn)
4260 if self.op.master_netdev:
4261 master_params = self.cfg.GetMasterNetworkParameters()
4262 feedback_fn("Starting the master ip on the new master netdev (%s)" %
4263 self.op.master_netdev)
4264 ems = self.cfg.GetUseExternalMipScript()
4265 result = self.rpc.call_node_activate_master_ip(master_params.name,
4268 self.LogWarning("Could not re-enable the master ip on"
4269 " the master, please restart manually: %s",
4273 def _UploadHelper(lu, nodes, fname):
4274 """Helper for uploading a file and showing warnings.
4277 if os.path.exists(fname):
4278 result = lu.rpc.call_upload_file(nodes, fname)
4279 for to_node, to_result in result.items():
4280 msg = to_result.fail_msg
4282 msg = ("Copy of file %s to node %s failed: %s" %
4283 (fname, to_node, msg))
4284 lu.proc.LogWarning(msg)
4287 def _ComputeAncillaryFiles(cluster, redist):
4288 """Compute files external to Ganeti which need to be consistent.
4290 @type redist: boolean
4291 @param redist: Whether to include files which need to be redistributed
4294 # Compute files for all nodes
4296 constants.SSH_KNOWN_HOSTS_FILE,
4297 constants.CONFD_HMAC_KEY,
4298 constants.CLUSTER_DOMAIN_SECRET_FILE,
4299 constants.SPICE_CERT_FILE,
4300 constants.SPICE_CACERT_FILE,
4301 constants.RAPI_USERS_FILE,
4305 files_all.update(constants.ALL_CERT_FILES)
4306 files_all.update(ssconf.SimpleStore().GetFileList())
4308 # we need to ship at least the RAPI certificate
4309 files_all.add(constants.RAPI_CERT_FILE)
4311 if cluster.modify_etc_hosts:
4312 files_all.add(constants.ETC_HOSTS)
4314 if cluster.use_external_mip_script:
4315 files_all.add(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
4317 # Files which are optional, these must:
4318 # - be present in one other category as well
4319 # - either exist or not exist on all nodes of that category (mc, vm all)
4321 constants.RAPI_USERS_FILE,
4324 # Files which should only be on master candidates
4328 files_mc.add(constants.CLUSTER_CONF_FILE)
4330 # Files which should only be on VM-capable nodes
4331 files_vm = set(filename
4332 for hv_name in cluster.enabled_hypervisors
4333 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
4335 files_opt |= set(filename
4336 for hv_name in cluster.enabled_hypervisors
4337 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
4339 # Filenames in each category must be unique
4340 all_files_set = files_all | files_mc | files_vm
4341 assert (len(all_files_set) ==
4342 sum(map(len, [files_all, files_mc, files_vm]))), \
4343 "Found file listed in more than one file list"
4345 # Optional files must be present in one other category
4346 assert all_files_set.issuperset(files_opt), \
4347 "Optional file not in a different required list"
4349 return (files_all, files_opt, files_mc, files_vm)
4352 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
4353 """Distribute additional files which are part of the cluster configuration.
4355 ConfigWriter takes care of distributing the config and ssconf files, but
4356 there are more files which should be distributed to all nodes. This function
4357 makes sure those are copied.
4359 @param lu: calling logical unit
4360 @param additional_nodes: list of nodes not in the config to distribute to
4361 @type additional_vm: boolean
4362 @param additional_vm: whether the additional nodes are vm-capable or not
4365 # Gather target nodes
4366 cluster = lu.cfg.GetClusterInfo()
4367 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
4369 online_nodes = lu.cfg.GetOnlineNodeList()
4370 online_set = frozenset(online_nodes)
4371 vm_nodes = list(online_set.intersection(lu.cfg.GetVmCapableNodeList()))
4373 if additional_nodes is not None:
4374 online_nodes.extend(additional_nodes)
4376 vm_nodes.extend(additional_nodes)
4378 # Never distribute to master node
4379 for nodelist in [online_nodes, vm_nodes]:
4380 if master_info.name in nodelist:
4381 nodelist.remove(master_info.name)
4384 (files_all, _, files_mc, files_vm) = \
4385 _ComputeAncillaryFiles(cluster, True)
4387 # Never re-distribute configuration file from here
4388 assert not (constants.CLUSTER_CONF_FILE in files_all or
4389 constants.CLUSTER_CONF_FILE in files_vm)
4390 assert not files_mc, "Master candidates not handled in this function"
4393 (online_nodes, files_all),
4394 (vm_nodes, files_vm),
4398 for (node_list, files) in filemap:
4400 _UploadHelper(lu, node_list, fname)
4403 class LUClusterRedistConf(NoHooksLU):
4404 """Force the redistribution of cluster configuration.
4406 This is a very simple LU.
4411 def ExpandNames(self):
4412 self.needed_locks = {
4413 locking.LEVEL_NODE: locking.ALL_SET,
4415 self.share_locks[locking.LEVEL_NODE] = 1
4417 def Exec(self, feedback_fn):
4418 """Redistribute the configuration.
4421 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
4422 _RedistributeAncillaryFiles(self)
4425 class LUClusterActivateMasterIp(NoHooksLU):
4426 """Activate the master IP on the master node.
4429 def Exec(self, feedback_fn):
4430 """Activate the master IP.
4433 master_params = self.cfg.GetMasterNetworkParameters()
4434 ems = self.cfg.GetUseExternalMipScript()
4435 result = self.rpc.call_node_activate_master_ip(master_params.name,
4437 result.Raise("Could not activate the master IP")
4440 class LUClusterDeactivateMasterIp(NoHooksLU):
4441 """Deactivate the master IP on the master node.
4444 def Exec(self, feedback_fn):
4445 """Deactivate the master IP.
4448 master_params = self.cfg.GetMasterNetworkParameters()
4449 ems = self.cfg.GetUseExternalMipScript()
4450 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4452 result.Raise("Could not deactivate the master IP")
4455 def _WaitForSync(lu, instance, disks=None, oneshot=False):
4456 """Sleep and poll for an instance's disk to sync.
4459 if not instance.disks or disks is not None and not disks:
4462 disks = _ExpandCheckDisks(instance, disks)
4465 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
4467 node = instance.primary_node
4470 lu.cfg.SetDiskID(dev, node)
4472 # TODO: Convert to utils.Retry
4475 degr_retries = 10 # in seconds, as we sleep 1 second each time
4479 cumul_degraded = False
4480 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
4481 msg = rstats.fail_msg
4483 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
4486 raise errors.RemoteError("Can't contact node %s for mirror data,"
4487 " aborting." % node)
4490 rstats = rstats.payload
4492 for i, mstat in enumerate(rstats):
4494 lu.LogWarning("Can't compute data for node %s/%s",
4495 node, disks[i].iv_name)
4498 cumul_degraded = (cumul_degraded or
4499 (mstat.is_degraded and mstat.sync_percent is None))
4500 if mstat.sync_percent is not None:
4502 if mstat.estimated_time is not None:
4503 rem_time = ("%s remaining (estimated)" %
4504 utils.FormatSeconds(mstat.estimated_time))
4505 max_time = mstat.estimated_time
4507 rem_time = "no time estimate"
4508 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
4509 (disks[i].iv_name, mstat.sync_percent, rem_time))
4511 # if we're done but degraded, let's do a few small retries, to
4512 # make sure we see a stable and not transient situation; therefore
4513 # we force restart of the loop
4514 if (done or oneshot) and cumul_degraded and degr_retries > 0:
4515 logging.info("Degraded disks found, %d retries left", degr_retries)
4523 time.sleep(min(60, max_time))
4526 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
4527 return not cumul_degraded
4530 def _BlockdevFind(lu, node, dev, instance):
4531 """Wrapper around call_blockdev_find to annotate diskparams.
4533 @param lu: A reference to the lu object
4534 @param node: The node to call out
4535 @param dev: The device to find
4536 @param instance: The instance object the device belongs to
4537 @returns The result of the rpc call
4540 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4541 return lu.rpc.call_blockdev_find(node, disk)
4544 def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4545 """Wrapper around L{_CheckDiskConsistencyInner}.
4548 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4549 return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
4553 def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
4555 """Check that mirrors are not degraded.
4557 @attention: The device has to be annotated already.
4559 The ldisk parameter, if True, will change the test from the
4560 is_degraded attribute (which represents overall non-ok status for
4561 the device(s)) to the ldisk (representing the local storage status).
4564 lu.cfg.SetDiskID(dev, node)
4568 if on_primary or dev.AssembleOnSecondary():
4569 rstats = lu.rpc.call_blockdev_find(node, dev)
4570 msg = rstats.fail_msg
4572 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
4574 elif not rstats.payload:
4575 lu.LogWarning("Can't find disk on node %s", node)
4579 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
4581 result = result and not rstats.payload.is_degraded
4584 for child in dev.children:
4585 result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
4591 class LUOobCommand(NoHooksLU):
4592 """Logical unit for OOB handling.
4596 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
4598 def ExpandNames(self):
4599 """Gather locks we need.
4602 if self.op.node_names:
4603 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
4604 lock_names = self.op.node_names
4606 lock_names = locking.ALL_SET
4608 self.needed_locks = {
4609 locking.LEVEL_NODE: lock_names,
4612 def CheckPrereq(self):
4613 """Check prerequisites.
4616 - the node exists in the configuration
4619 Any errors are signaled by raising errors.OpPrereqError.
4623 self.master_node = self.cfg.GetMasterNode()
4625 assert self.op.power_delay >= 0.0
4627 if self.op.node_names:
4628 if (self.op.command in self._SKIP_MASTER and
4629 self.master_node in self.op.node_names):
4630 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
4631 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
4633 if master_oob_handler:
4634 additional_text = ("run '%s %s %s' if you want to operate on the"
4635 " master regardless") % (master_oob_handler,
4639 additional_text = "it does not support out-of-band operations"
4641 raise errors.OpPrereqError(("Operating on the master node %s is not"
4642 " allowed for %s; %s") %
4643 (self.master_node, self.op.command,
4644 additional_text), errors.ECODE_INVAL)
4646 self.op.node_names = self.cfg.GetNodeList()
4647 if self.op.command in self._SKIP_MASTER:
4648 self.op.node_names.remove(self.master_node)
4650 if self.op.command in self._SKIP_MASTER:
4651 assert self.master_node not in self.op.node_names
4653 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
4655 raise errors.OpPrereqError("Node %s not found" % node_name,
4658 self.nodes.append(node)
4660 if (not self.op.ignore_status and
4661 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
4662 raise errors.OpPrereqError(("Cannot power off node %s because it is"
4663 " not marked offline") % node_name,
4666 def Exec(self, feedback_fn):
4667 """Execute OOB and return result if we expect any.
4670 master_node = self.master_node
4673 for idx, node in enumerate(utils.NiceSort(self.nodes,
4674 key=lambda node: node.name)):
4675 node_entry = [(constants.RS_NORMAL, node.name)]
4676 ret.append(node_entry)
4678 oob_program = _SupportsOob(self.cfg, node)
4681 node_entry.append((constants.RS_UNAVAIL, None))
4684 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4685 self.op.command, oob_program, node.name)
4686 result = self.rpc.call_run_oob(master_node, oob_program,
4687 self.op.command, node.name,
4691 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4692 node.name, result.fail_msg)
4693 node_entry.append((constants.RS_NODATA, None))
4696 self._CheckPayload(result)
4697 except errors.OpExecError, err:
4698 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4700 node_entry.append((constants.RS_NODATA, None))
4702 if self.op.command == constants.OOB_HEALTH:
4703 # For health we should log important events
4704 for item, status in result.payload:
4705 if status in [constants.OOB_STATUS_WARNING,
4706 constants.OOB_STATUS_CRITICAL]:
4707 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4708 item, node.name, status)
4710 if self.op.command == constants.OOB_POWER_ON:
4712 elif self.op.command == constants.OOB_POWER_OFF:
4713 node.powered = False
4714 elif self.op.command == constants.OOB_POWER_STATUS:
4715 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4716 if powered != node.powered:
4717 logging.warning(("Recorded power state (%s) of node '%s' does not"
4718 " match actual power state (%s)"), node.powered,
4721 # For configuration changing commands we should update the node
4722 if self.op.command in (constants.OOB_POWER_ON,
4723 constants.OOB_POWER_OFF):
4724 self.cfg.Update(node, feedback_fn)
4726 node_entry.append((constants.RS_NORMAL, result.payload))
4728 if (self.op.command == constants.OOB_POWER_ON and
4729 idx < len(self.nodes) - 1):
4730 time.sleep(self.op.power_delay)
4734 def _CheckPayload(self, result):
4735 """Checks if the payload is valid.
4737 @param result: RPC result
4738 @raises errors.OpExecError: If payload is not valid
4742 if self.op.command == constants.OOB_HEALTH:
4743 if not isinstance(result.payload, list):
4744 errs.append("command 'health' is expected to return a list but got %s" %
4745 type(result.payload))
4747 for item, status in result.payload:
4748 if status not in constants.OOB_STATUSES:
4749 errs.append("health item '%s' has invalid status '%s'" %
4752 if self.op.command == constants.OOB_POWER_STATUS:
4753 if not isinstance(result.payload, dict):
4754 errs.append("power-status is expected to return a dict but got %s" %
4755 type(result.payload))
4757 if self.op.command in [
4758 constants.OOB_POWER_ON,
4759 constants.OOB_POWER_OFF,
4760 constants.OOB_POWER_CYCLE,
4762 if result.payload is not None:
4763 errs.append("%s is expected to not return payload but got '%s'" %
4764 (self.op.command, result.payload))
4767 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4768 utils.CommaJoin(errs))
4771 class _OsQuery(_QueryBase):
4772 FIELDS = query.OS_FIELDS
4774 def ExpandNames(self, lu):
4775 # Lock all nodes in shared mode
4776 # Temporary removal of locks, should be reverted later
4777 # TODO: reintroduce locks when they are lighter-weight
4778 lu.needed_locks = {}
4779 #self.share_locks[locking.LEVEL_NODE] = 1
4780 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4782 # The following variables interact with _QueryBase._GetNames
4784 self.wanted = self.names
4786 self.wanted = locking.ALL_SET
4788 self.do_locking = self.use_locking
4790 def DeclareLocks(self, lu, level):
4794 def _DiagnoseByOS(rlist):
4795 """Remaps a per-node return list into an a per-os per-node dictionary
4797 @param rlist: a map with node names as keys and OS objects as values
4800 @return: a dictionary with osnames as keys and as value another
4801 map, with nodes as keys and tuples of (path, status, diagnose,
4802 variants, parameters, api_versions) as values, eg::
4804 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4805 (/srv/..., False, "invalid api")],
4806 "node2": [(/srv/..., True, "", [], [])]}
4811 # we build here the list of nodes that didn't fail the RPC (at RPC
4812 # level), so that nodes with a non-responding node daemon don't
4813 # make all OSes invalid
4814 good_nodes = [node_name for node_name in rlist
4815 if not rlist[node_name].fail_msg]
4816 for node_name, nr in rlist.items():
4817 if nr.fail_msg or not nr.payload:
4819 for (name, path, status, diagnose, variants,
4820 params, api_versions) in nr.payload:
4821 if name not in all_os:
4822 # build a list of nodes for this os containing empty lists
4823 # for each node in node_list
4825 for nname in good_nodes:
4826 all_os[name][nname] = []
4827 # convert params from [name, help] to (name, help)
4828 params = [tuple(v) for v in params]
4829 all_os[name][node_name].append((path, status, diagnose,
4830 variants, params, api_versions))
4833 def _GetQueryData(self, lu):
4834 """Computes the list of nodes and their attributes.
4837 # Locking is not used
4838 assert not (compat.any(lu.glm.is_owned(level)
4839 for level in locking.LEVELS
4840 if level != locking.LEVEL_CLUSTER) or
4841 self.do_locking or self.use_locking)
4843 valid_nodes = [node.name
4844 for node in lu.cfg.GetAllNodesInfo().values()
4845 if not node.offline and node.vm_capable]
4846 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4847 cluster = lu.cfg.GetClusterInfo()
4851 for (os_name, os_data) in pol.items():
4852 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4853 hidden=(os_name in cluster.hidden_os),
4854 blacklisted=(os_name in cluster.blacklisted_os))
4858 api_versions = set()
4860 for idx, osl in enumerate(os_data.values()):
4861 info.valid = bool(info.valid and osl and osl[0][1])
4865 (node_variants, node_params, node_api) = osl[0][3:6]
4868 variants.update(node_variants)
4869 parameters.update(node_params)
4870 api_versions.update(node_api)
4872 # Filter out inconsistent values
4873 variants.intersection_update(node_variants)
4874 parameters.intersection_update(node_params)
4875 api_versions.intersection_update(node_api)
4877 info.variants = list(variants)
4878 info.parameters = list(parameters)
4879 info.api_versions = list(api_versions)
4881 data[os_name] = info
4883 # Prepare data in requested order
4884 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4888 class LUOsDiagnose(NoHooksLU):
4889 """Logical unit for OS diagnose/query.
4895 def _BuildFilter(fields, names):
4896 """Builds a filter for querying OSes.
4899 name_filter = qlang.MakeSimpleFilter("name", names)
4901 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4902 # respective field is not requested
4903 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4904 for fname in ["hidden", "blacklisted"]
4905 if fname not in fields]
4906 if "valid" not in fields:
4907 status_filter.append([qlang.OP_TRUE, "valid"])
4910 status_filter.insert(0, qlang.OP_AND)
4912 status_filter = None
4914 if name_filter and status_filter:
4915 return [qlang.OP_AND, name_filter, status_filter]
4919 return status_filter
4921 def CheckArguments(self):
4922 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4923 self.op.output_fields, False)
4925 def ExpandNames(self):
4926 self.oq.ExpandNames(self)
4928 def Exec(self, feedback_fn):
4929 return self.oq.OldStyleQuery(self)
4932 class LUNodeRemove(LogicalUnit):
4933 """Logical unit for removing a node.
4936 HPATH = "node-remove"
4937 HTYPE = constants.HTYPE_NODE
4939 def BuildHooksEnv(self):
4944 "OP_TARGET": self.op.node_name,
4945 "NODE_NAME": self.op.node_name,
4948 def BuildHooksNodes(self):
4949 """Build hooks nodes.
4951 This doesn't run on the target node in the pre phase as a failed
4952 node would then be impossible to remove.
4955 all_nodes = self.cfg.GetNodeList()
4957 all_nodes.remove(self.op.node_name)
4960 return (all_nodes, all_nodes)
4962 def CheckPrereq(self):
4963 """Check prerequisites.
4966 - the node exists in the configuration
4967 - it does not have primary or secondary instances
4968 - it's not the master
4970 Any errors are signaled by raising errors.OpPrereqError.
4973 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4974 node = self.cfg.GetNodeInfo(self.op.node_name)
4975 assert node is not None
4977 masternode = self.cfg.GetMasterNode()
4978 if node.name == masternode:
4979 raise errors.OpPrereqError("Node is the master node, failover to another"
4980 " node is required", errors.ECODE_INVAL)
4982 for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
4983 if node.name in instance.all_nodes:
4984 raise errors.OpPrereqError("Instance %s is still running on the node,"
4985 " please remove first" % instance_name,
4987 self.op.node_name = node.name
4990 def Exec(self, feedback_fn):
4991 """Removes the node from the cluster.
4995 logging.info("Stopping the node daemon and removing configs from node %s",
4998 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
5000 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5003 # Promote nodes to master candidate as needed
5004 _AdjustCandidatePool(self, exceptions=[node.name])
5005 self.context.RemoveNode(node.name)
5007 # Run post hooks on the node before it's removed
5008 _RunPostHook(self, node.name)
5010 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
5011 msg = result.fail_msg
5013 self.LogWarning("Errors encountered on the remote node while leaving"
5014 " the cluster: %s", msg)
5016 # Remove node from our /etc/hosts
5017 if self.cfg.GetClusterInfo().modify_etc_hosts:
5018 master_node = self.cfg.GetMasterNode()
5019 result = self.rpc.call_etc_hosts_modify(master_node,
5020 constants.ETC_HOSTS_REMOVE,
5022 result.Raise("Can't update hosts file with new host data")
5023 _RedistributeAncillaryFiles(self)
5026 class _NodeQuery(_QueryBase):
5027 FIELDS = query.NODE_FIELDS
5029 def ExpandNames(self, lu):
5030 lu.needed_locks = {}
5031 lu.share_locks = _ShareAll()
5034 self.wanted = _GetWantedNodes(lu, self.names)
5036 self.wanted = locking.ALL_SET
5038 self.do_locking = (self.use_locking and
5039 query.NQ_LIVE in self.requested_data)
5042 # If any non-static field is requested we need to lock the nodes
5043 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
5045 def DeclareLocks(self, lu, level):
5048 def _GetQueryData(self, lu):
5049 """Computes the list of nodes and their attributes.
5052 all_info = lu.cfg.GetAllNodesInfo()
5054 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
5056 # Gather data as requested
5057 if query.NQ_LIVE in self.requested_data:
5058 # filter out non-vm_capable nodes
5059 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
5061 node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
5062 [lu.cfg.GetHypervisorType()])
5063 live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
5064 for (name, nresult) in node_data.items()
5065 if not nresult.fail_msg and nresult.payload)
5069 if query.NQ_INST in self.requested_data:
5070 node_to_primary = dict([(name, set()) for name in nodenames])
5071 node_to_secondary = dict([(name, set()) for name in nodenames])
5073 inst_data = lu.cfg.GetAllInstancesInfo()
5075 for inst in inst_data.values():
5076 if inst.primary_node in node_to_primary:
5077 node_to_primary[inst.primary_node].add(inst.name)
5078 for secnode in inst.secondary_nodes:
5079 if secnode in node_to_secondary:
5080 node_to_secondary[secnode].add(inst.name)
5082 node_to_primary = None
5083 node_to_secondary = None
5085 if query.NQ_OOB in self.requested_data:
5086 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
5087 for name, node in all_info.iteritems())
5091 if query.NQ_GROUP in self.requested_data:
5092 groups = lu.cfg.GetAllNodeGroupsInfo()
5096 return query.NodeQueryData([all_info[name] for name in nodenames],
5097 live_data, lu.cfg.GetMasterNode(),
5098 node_to_primary, node_to_secondary, groups,
5099 oob_support, lu.cfg.GetClusterInfo())
5102 class LUNodeQuery(NoHooksLU):
5103 """Logical unit for querying nodes.
5106 # pylint: disable=W0142
5109 def CheckArguments(self):
5110 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
5111 self.op.output_fields, self.op.use_locking)
5113 def ExpandNames(self):
5114 self.nq.ExpandNames(self)
5116 def DeclareLocks(self, level):
5117 self.nq.DeclareLocks(self, level)
5119 def Exec(self, feedback_fn):
5120 return self.nq.OldStyleQuery(self)
5123 class LUNodeQueryvols(NoHooksLU):
5124 """Logical unit for getting volumes on node(s).
5128 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
5129 _FIELDS_STATIC = utils.FieldSet("node")
5131 def CheckArguments(self):
5132 _CheckOutputFields(static=self._FIELDS_STATIC,
5133 dynamic=self._FIELDS_DYNAMIC,
5134 selected=self.op.output_fields)
5136 def ExpandNames(self):
5137 self.share_locks = _ShareAll()
5138 self.needed_locks = {}
5140 if not self.op.nodes:
5141 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5143 self.needed_locks[locking.LEVEL_NODE] = \
5144 _GetWantedNodes(self, self.op.nodes)
5146 def Exec(self, feedback_fn):
5147 """Computes the list of nodes and their attributes.
5150 nodenames = self.owned_locks(locking.LEVEL_NODE)
5151 volumes = self.rpc.call_node_volumes(nodenames)
5153 ilist = self.cfg.GetAllInstancesInfo()
5154 vol2inst = _MapInstanceDisksToNodes(ilist.values())
5157 for node in nodenames:
5158 nresult = volumes[node]
5161 msg = nresult.fail_msg
5163 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
5166 node_vols = sorted(nresult.payload,
5167 key=operator.itemgetter("dev"))
5169 for vol in node_vols:
5171 for field in self.op.output_fields:
5174 elif field == "phys":
5178 elif field == "name":
5180 elif field == "size":
5181 val = int(float(vol["size"]))
5182 elif field == "instance":
5183 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
5185 raise errors.ParameterError(field)
5186 node_output.append(str(val))
5188 output.append(node_output)
5193 class LUNodeQueryStorage(NoHooksLU):
5194 """Logical unit for getting information on storage units on node(s).
5197 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
5200 def CheckArguments(self):
5201 _CheckOutputFields(static=self._FIELDS_STATIC,
5202 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
5203 selected=self.op.output_fields)
5205 def ExpandNames(self):
5206 self.share_locks = _ShareAll()
5207 self.needed_locks = {}
5210 self.needed_locks[locking.LEVEL_NODE] = \
5211 _GetWantedNodes(self, self.op.nodes)
5213 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5215 def Exec(self, feedback_fn):
5216 """Computes the list of nodes and their attributes.
5219 self.nodes = self.owned_locks(locking.LEVEL_NODE)
5221 # Always get name to sort by
5222 if constants.SF_NAME in self.op.output_fields:
5223 fields = self.op.output_fields[:]
5225 fields = [constants.SF_NAME] + self.op.output_fields
5227 # Never ask for node or type as it's only known to the LU
5228 for extra in [constants.SF_NODE, constants.SF_TYPE]:
5229 while extra in fields:
5230 fields.remove(extra)
5232 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
5233 name_idx = field_idx[constants.SF_NAME]
5235 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5236 data = self.rpc.call_storage_list(self.nodes,
5237 self.op.storage_type, st_args,
5238 self.op.name, fields)
5242 for node in utils.NiceSort(self.nodes):
5243 nresult = data[node]
5247 msg = nresult.fail_msg
5249 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
5252 rows = dict([(row[name_idx], row) for row in nresult.payload])
5254 for name in utils.NiceSort(rows.keys()):
5259 for field in self.op.output_fields:
5260 if field == constants.SF_NODE:
5262 elif field == constants.SF_TYPE:
5263 val = self.op.storage_type
5264 elif field in field_idx:
5265 val = row[field_idx[field]]
5267 raise errors.ParameterError(field)
5276 class _InstanceQuery(_QueryBase):
5277 FIELDS = query.INSTANCE_FIELDS
5279 def ExpandNames(self, lu):
5280 lu.needed_locks = {}
5281 lu.share_locks = _ShareAll()
5284 self.wanted = _GetWantedInstances(lu, self.names)
5286 self.wanted = locking.ALL_SET
5288 self.do_locking = (self.use_locking and
5289 query.IQ_LIVE in self.requested_data)
5291 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5292 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
5293 lu.needed_locks[locking.LEVEL_NODE] = []
5294 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5296 self.do_grouplocks = (self.do_locking and
5297 query.IQ_NODES in self.requested_data)
5299 def DeclareLocks(self, lu, level):
5301 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
5302 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
5304 # Lock all groups used by instances optimistically; this requires going
5305 # via the node before it's locked, requiring verification later on
5306 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
5308 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
5309 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
5310 elif level == locking.LEVEL_NODE:
5311 lu._LockInstancesNodes() # pylint: disable=W0212
5314 def _CheckGroupLocks(lu):
5315 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
5316 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
5318 # Check if node groups for locked instances are still correct
5319 for instance_name in owned_instances:
5320 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
5322 def _GetQueryData(self, lu):
5323 """Computes the list of instances and their attributes.
5326 if self.do_grouplocks:
5327 self._CheckGroupLocks(lu)
5329 cluster = lu.cfg.GetClusterInfo()
5330 all_info = lu.cfg.GetAllInstancesInfo()
5332 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
5334 instance_list = [all_info[name] for name in instance_names]
5335 nodes = frozenset(itertools.chain(*(inst.all_nodes
5336 for inst in instance_list)))
5337 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5340 wrongnode_inst = set()
5342 # Gather data as requested
5343 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
5345 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
5347 result = node_data[name]
5349 # offline nodes will be in both lists
5350 assert result.fail_msg
5351 offline_nodes.append(name)
5353 bad_nodes.append(name)
5354 elif result.payload:
5355 for inst in result.payload:
5356 if inst in all_info:
5357 if all_info[inst].primary_node == name:
5358 live_data.update(result.payload)
5360 wrongnode_inst.add(inst)
5362 # orphan instance; we don't list it here as we don't
5363 # handle this case yet in the output of instance listing
5364 logging.warning("Orphan instance '%s' found on node %s",
5366 # else no instance is alive
5370 if query.IQ_DISKUSAGE in self.requested_data:
5371 disk_usage = dict((inst.name,
5372 _ComputeDiskSize(inst.disk_template,
5373 [{constants.IDISK_SIZE: disk.size}
5374 for disk in inst.disks]))
5375 for inst in instance_list)
5379 if query.IQ_CONSOLE in self.requested_data:
5381 for inst in instance_list:
5382 if inst.name in live_data:
5383 # Instance is running
5384 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
5386 consinfo[inst.name] = None
5387 assert set(consinfo.keys()) == set(instance_names)
5391 if query.IQ_NODES in self.requested_data:
5392 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
5394 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
5395 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
5396 for uuid in set(map(operator.attrgetter("group"),
5402 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
5403 disk_usage, offline_nodes, bad_nodes,
5404 live_data, wrongnode_inst, consinfo,
5408 class LUQuery(NoHooksLU):
5409 """Query for resources/items of a certain kind.
5412 # pylint: disable=W0142
5415 def CheckArguments(self):
5416 qcls = _GetQueryImplementation(self.op.what)
5418 self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
5420 def ExpandNames(self):
5421 self.impl.ExpandNames(self)
5423 def DeclareLocks(self, level):
5424 self.impl.DeclareLocks(self, level)
5426 def Exec(self, feedback_fn):
5427 return self.impl.NewStyleQuery(self)
5430 class LUQueryFields(NoHooksLU):
5431 """Query for resources/items of a certain kind.
5434 # pylint: disable=W0142
5437 def CheckArguments(self):
5438 self.qcls = _GetQueryImplementation(self.op.what)
5440 def ExpandNames(self):
5441 self.needed_locks = {}
5443 def Exec(self, feedback_fn):
5444 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
5447 class LUNodeModifyStorage(NoHooksLU):
5448 """Logical unit for modifying a storage volume on a node.
5453 def CheckArguments(self):
5454 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5456 storage_type = self.op.storage_type
5459 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
5461 raise errors.OpPrereqError("Storage units of type '%s' can not be"
5462 " modified" % storage_type,
5465 diff = set(self.op.changes.keys()) - modifiable
5467 raise errors.OpPrereqError("The following fields can not be modified for"
5468 " storage units of type '%s': %r" %
5469 (storage_type, list(diff)),
5472 def ExpandNames(self):
5473 self.needed_locks = {
5474 locking.LEVEL_NODE: self.op.node_name,
5477 def Exec(self, feedback_fn):
5478 """Computes the list of nodes and their attributes.
5481 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5482 result = self.rpc.call_storage_modify(self.op.node_name,
5483 self.op.storage_type, st_args,
5484 self.op.name, self.op.changes)
5485 result.Raise("Failed to modify storage unit '%s' on %s" %
5486 (self.op.name, self.op.node_name))
5489 class LUNodeAdd(LogicalUnit):
5490 """Logical unit for adding node to the cluster.
5494 HTYPE = constants.HTYPE_NODE
5495 _NFLAGS = ["master_capable", "vm_capable"]
5497 def CheckArguments(self):
5498 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
5499 # validate/normalize the node name
5500 self.hostname = netutils.GetHostname(name=self.op.node_name,
5501 family=self.primary_ip_family)
5502 self.op.node_name = self.hostname.name
5504 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
5505 raise errors.OpPrereqError("Cannot readd the master node",
5508 if self.op.readd and self.op.group:
5509 raise errors.OpPrereqError("Cannot pass a node group when a node is"
5510 " being readded", errors.ECODE_INVAL)
5512 def BuildHooksEnv(self):
5515 This will run on all nodes before, and on all nodes + the new node after.
5519 "OP_TARGET": self.op.node_name,
5520 "NODE_NAME": self.op.node_name,
5521 "NODE_PIP": self.op.primary_ip,
5522 "NODE_SIP": self.op.secondary_ip,
5523 "MASTER_CAPABLE": str(self.op.master_capable),
5524 "VM_CAPABLE": str(self.op.vm_capable),
5527 def BuildHooksNodes(self):
5528 """Build hooks nodes.
5531 # Exclude added node
5532 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
5533 post_nodes = pre_nodes + [self.op.node_name, ]
5535 return (pre_nodes, post_nodes)
5537 def CheckPrereq(self):
5538 """Check prerequisites.
5541 - the new node is not already in the config
5543 - its parameters (single/dual homed) matches the cluster
5545 Any errors are signaled by raising errors.OpPrereqError.
5549 hostname = self.hostname
5550 node = hostname.name
5551 primary_ip = self.op.primary_ip = hostname.ip
5552 if self.op.secondary_ip is None:
5553 if self.primary_ip_family == netutils.IP6Address.family:
5554 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
5555 " IPv4 address must be given as secondary",
5557 self.op.secondary_ip = primary_ip
5559 secondary_ip = self.op.secondary_ip
5560 if not netutils.IP4Address.IsValid(secondary_ip):
5561 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5562 " address" % secondary_ip, errors.ECODE_INVAL)
5564 node_list = cfg.GetNodeList()
5565 if not self.op.readd and node in node_list:
5566 raise errors.OpPrereqError("Node %s is already in the configuration" %
5567 node, errors.ECODE_EXISTS)
5568 elif self.op.readd and node not in node_list:
5569 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
5572 self.changed_primary_ip = False
5574 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
5575 if self.op.readd and node == existing_node_name:
5576 if existing_node.secondary_ip != secondary_ip:
5577 raise errors.OpPrereqError("Readded node doesn't have the same IP"
5578 " address configuration as before",
5580 if existing_node.primary_ip != primary_ip:
5581 self.changed_primary_ip = True
5585 if (existing_node.primary_ip == primary_ip or
5586 existing_node.secondary_ip == primary_ip or
5587 existing_node.primary_ip == secondary_ip or
5588 existing_node.secondary_ip == secondary_ip):
5589 raise errors.OpPrereqError("New node ip address(es) conflict with"
5590 " existing node %s" % existing_node.name,
5591 errors.ECODE_NOTUNIQUE)
5593 # After this 'if' block, None is no longer a valid value for the
5594 # _capable op attributes
5596 old_node = self.cfg.GetNodeInfo(node)
5597 assert old_node is not None, "Can't retrieve locked node %s" % node
5598 for attr in self._NFLAGS:
5599 if getattr(self.op, attr) is None:
5600 setattr(self.op, attr, getattr(old_node, attr))
5602 for attr in self._NFLAGS:
5603 if getattr(self.op, attr) is None:
5604 setattr(self.op, attr, True)
5606 if self.op.readd and not self.op.vm_capable:
5607 pri, sec = cfg.GetNodeInstances(node)
5609 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
5610 " flag set to false, but it already holds"
5611 " instances" % node,
5614 # check that the type of the node (single versus dual homed) is the
5615 # same as for the master
5616 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
5617 master_singlehomed = myself.secondary_ip == myself.primary_ip
5618 newbie_singlehomed = secondary_ip == primary_ip
5619 if master_singlehomed != newbie_singlehomed:
5620 if master_singlehomed:
5621 raise errors.OpPrereqError("The master has no secondary ip but the"
5622 " new node has one",
5625 raise errors.OpPrereqError("The master has a secondary ip but the"
5626 " new node doesn't have one",
5629 # checks reachability
5630 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
5631 raise errors.OpPrereqError("Node not reachable by ping",
5632 errors.ECODE_ENVIRON)
5634 if not newbie_singlehomed:
5635 # check reachability from my secondary ip to newbie's secondary ip
5636 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5637 source=myself.secondary_ip):
5638 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5639 " based ping to node daemon port",
5640 errors.ECODE_ENVIRON)
5647 if self.op.master_capable:
5648 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
5650 self.master_candidate = False
5653 self.new_node = old_node
5655 node_group = cfg.LookupNodeGroup(self.op.group)
5656 self.new_node = objects.Node(name=node,
5657 primary_ip=primary_ip,
5658 secondary_ip=secondary_ip,
5659 master_candidate=self.master_candidate,
5660 offline=False, drained=False,
5663 if self.op.ndparams:
5664 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
5666 if self.op.hv_state:
5667 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
5669 if self.op.disk_state:
5670 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
5672 # TODO: If we need to have multiple DnsOnlyRunner we probably should make
5673 # it a property on the base class.
5674 result = rpc.DnsOnlyRunner().call_version([node])[node]
5675 result.Raise("Can't get version information from node %s" % node)
5676 if constants.PROTOCOL_VERSION == result.payload:
5677 logging.info("Communication to node %s fine, sw version %s match",
5678 node, result.payload)
5680 raise errors.OpPrereqError("Version mismatch master version %s,"
5681 " node version %s" %
5682 (constants.PROTOCOL_VERSION, result.payload),
5683 errors.ECODE_ENVIRON)
5685 def Exec(self, feedback_fn):
5686 """Adds the new node to the cluster.
5689 new_node = self.new_node
5690 node = new_node.name
5692 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5695 # We adding a new node so we assume it's powered
5696 new_node.powered = True
5698 # for re-adds, reset the offline/drained/master-candidate flags;
5699 # we need to reset here, otherwise offline would prevent RPC calls
5700 # later in the procedure; this also means that if the re-add
5701 # fails, we are left with a non-offlined, broken node
5703 new_node.drained = new_node.offline = False # pylint: disable=W0201
5704 self.LogInfo("Readding a node, the offline/drained flags were reset")
5705 # if we demote the node, we do cleanup later in the procedure
5706 new_node.master_candidate = self.master_candidate
5707 if self.changed_primary_ip:
5708 new_node.primary_ip = self.op.primary_ip
5710 # copy the master/vm_capable flags
5711 for attr in self._NFLAGS:
5712 setattr(new_node, attr, getattr(self.op, attr))
5714 # notify the user about any possible mc promotion
5715 if new_node.master_candidate:
5716 self.LogInfo("Node will be a master candidate")
5718 if self.op.ndparams:
5719 new_node.ndparams = self.op.ndparams
5721 new_node.ndparams = {}
5723 if self.op.hv_state:
5724 new_node.hv_state_static = self.new_hv_state
5726 if self.op.disk_state:
5727 new_node.disk_state_static = self.new_disk_state
5729 # Add node to our /etc/hosts, and add key to known_hosts
5730 if self.cfg.GetClusterInfo().modify_etc_hosts:
5731 master_node = self.cfg.GetMasterNode()
5732 result = self.rpc.call_etc_hosts_modify(master_node,
5733 constants.ETC_HOSTS_ADD,
5736 result.Raise("Can't update hosts file with new host data")
5738 if new_node.secondary_ip != new_node.primary_ip:
5739 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5742 node_verify_list = [self.cfg.GetMasterNode()]
5743 node_verify_param = {
5744 constants.NV_NODELIST: ([node], {}),
5745 # TODO: do a node-net-test as well?
5748 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5749 self.cfg.GetClusterName())
5750 for verifier in node_verify_list:
5751 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5752 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5754 for failed in nl_payload:
5755 feedback_fn("ssh/hostname verification failed"
5756 " (checking from %s): %s" %
5757 (verifier, nl_payload[failed]))
5758 raise errors.OpExecError("ssh/hostname verification failed")
5761 _RedistributeAncillaryFiles(self)
5762 self.context.ReaddNode(new_node)
5763 # make sure we redistribute the config
5764 self.cfg.Update(new_node, feedback_fn)
5765 # and make sure the new node will not have old files around
5766 if not new_node.master_candidate:
5767 result = self.rpc.call_node_demote_from_mc(new_node.name)
5768 msg = result.fail_msg
5770 self.LogWarning("Node failed to demote itself from master"
5771 " candidate status: %s" % msg)
5773 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5774 additional_vm=self.op.vm_capable)
5775 self.context.AddNode(new_node, self.proc.GetECId())
5778 class LUNodeSetParams(LogicalUnit):
5779 """Modifies the parameters of a node.
5781 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5782 to the node role (as _ROLE_*)
5783 @cvar _R2F: a dictionary from node role to tuples of flags
5784 @cvar _FLAGS: a list of attribute names corresponding to the flags
5787 HPATH = "node-modify"
5788 HTYPE = constants.HTYPE_NODE
5790 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5792 (True, False, False): _ROLE_CANDIDATE,
5793 (False, True, False): _ROLE_DRAINED,
5794 (False, False, True): _ROLE_OFFLINE,
5795 (False, False, False): _ROLE_REGULAR,
5797 _R2F = dict((v, k) for k, v in _F2R.items())
5798 _FLAGS = ["master_candidate", "drained", "offline"]
5800 def CheckArguments(self):
5801 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5802 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5803 self.op.master_capable, self.op.vm_capable,
5804 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
5806 if all_mods.count(None) == len(all_mods):
5807 raise errors.OpPrereqError("Please pass at least one modification",
5809 if all_mods.count(True) > 1:
5810 raise errors.OpPrereqError("Can't set the node into more than one"
5811 " state at the same time",
5814 # Boolean value that tells us whether we might be demoting from MC
5815 self.might_demote = (self.op.master_candidate == False or
5816 self.op.offline == True or
5817 self.op.drained == True or
5818 self.op.master_capable == False)
5820 if self.op.secondary_ip:
5821 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5822 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5823 " address" % self.op.secondary_ip,
5826 self.lock_all = self.op.auto_promote and self.might_demote
5827 self.lock_instances = self.op.secondary_ip is not None
5829 def _InstanceFilter(self, instance):
5830 """Filter for getting affected instances.
5833 return (instance.disk_template in constants.DTS_INT_MIRROR and
5834 self.op.node_name in instance.all_nodes)
5836 def ExpandNames(self):
5838 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5840 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5842 # Since modifying a node can have severe effects on currently running
5843 # operations the resource lock is at least acquired in shared mode
5844 self.needed_locks[locking.LEVEL_NODE_RES] = \
5845 self.needed_locks[locking.LEVEL_NODE]
5847 # Get node resource and instance locks in shared mode; they are not used
5848 # for anything but read-only access
5849 self.share_locks[locking.LEVEL_NODE_RES] = 1
5850 self.share_locks[locking.LEVEL_INSTANCE] = 1
5852 if self.lock_instances:
5853 self.needed_locks[locking.LEVEL_INSTANCE] = \
5854 frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
5856 def BuildHooksEnv(self):
5859 This runs on the master node.
5863 "OP_TARGET": self.op.node_name,
5864 "MASTER_CANDIDATE": str(self.op.master_candidate),
5865 "OFFLINE": str(self.op.offline),
5866 "DRAINED": str(self.op.drained),
5867 "MASTER_CAPABLE": str(self.op.master_capable),
5868 "VM_CAPABLE": str(self.op.vm_capable),
5871 def BuildHooksNodes(self):
5872 """Build hooks nodes.
5875 nl = [self.cfg.GetMasterNode(), self.op.node_name]
5878 def CheckPrereq(self):
5879 """Check prerequisites.
5881 This only checks the instance list against the existing names.
5884 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5886 if self.lock_instances:
5887 affected_instances = \
5888 self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
5890 # Verify instance locks
5891 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
5892 wanted_instances = frozenset(affected_instances.keys())
5893 if wanted_instances - owned_instances:
5894 raise errors.OpPrereqError("Instances affected by changing node %s's"
5895 " secondary IP address have changed since"
5896 " locks were acquired, wanted '%s', have"
5897 " '%s'; retry the operation" %
5899 utils.CommaJoin(wanted_instances),
5900 utils.CommaJoin(owned_instances)),
5903 affected_instances = None
5905 if (self.op.master_candidate is not None or
5906 self.op.drained is not None or
5907 self.op.offline is not None):
5908 # we can't change the master's node flags
5909 if self.op.node_name == self.cfg.GetMasterNode():
5910 raise errors.OpPrereqError("The master role can be changed"
5911 " only via master-failover",
5914 if self.op.master_candidate and not node.master_capable:
5915 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5916 " it a master candidate" % node.name,
5919 if self.op.vm_capable == False:
5920 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5922 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5923 " the vm_capable flag" % node.name,
5926 if node.master_candidate and self.might_demote and not self.lock_all:
5927 assert not self.op.auto_promote, "auto_promote set but lock_all not"
5928 # check if after removing the current node, we're missing master
5930 (mc_remaining, mc_should, _) = \
5931 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5932 if mc_remaining < mc_should:
5933 raise errors.OpPrereqError("Not enough master candidates, please"
5934 " pass auto promote option to allow"
5935 " promotion (--auto-promote or RAPI"
5936 " auto_promote=True)", errors.ECODE_STATE)
5938 self.old_flags = old_flags = (node.master_candidate,
5939 node.drained, node.offline)
5940 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5941 self.old_role = old_role = self._F2R[old_flags]
5943 # Check for ineffective changes
5944 for attr in self._FLAGS:
5945 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5946 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5947 setattr(self.op, attr, None)
5949 # Past this point, any flag change to False means a transition
5950 # away from the respective state, as only real changes are kept
5952 # TODO: We might query the real power state if it supports OOB
5953 if _SupportsOob(self.cfg, node):
5954 if self.op.offline is False and not (node.powered or
5955 self.op.powered == True):
5956 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5957 " offline status can be reset") %
5959 elif self.op.powered is not None:
5960 raise errors.OpPrereqError(("Unable to change powered state for node %s"
5961 " as it does not support out-of-band"
5962 " handling") % self.op.node_name)
5964 # If we're being deofflined/drained, we'll MC ourself if needed
5965 if (self.op.drained == False or self.op.offline == False or
5966 (self.op.master_capable and not node.master_capable)):
5967 if _DecideSelfPromotion(self):
5968 self.op.master_candidate = True
5969 self.LogInfo("Auto-promoting node to master candidate")
5971 # If we're no longer master capable, we'll demote ourselves from MC
5972 if self.op.master_capable == False and node.master_candidate:
5973 self.LogInfo("Demoting from master candidate")
5974 self.op.master_candidate = False
5977 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5978 if self.op.master_candidate:
5979 new_role = self._ROLE_CANDIDATE
5980 elif self.op.drained:
5981 new_role = self._ROLE_DRAINED
5982 elif self.op.offline:
5983 new_role = self._ROLE_OFFLINE
5984 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5985 # False is still in new flags, which means we're un-setting (the
5987 new_role = self._ROLE_REGULAR
5988 else: # no new flags, nothing, keep old role
5991 self.new_role = new_role
5993 if old_role == self._ROLE_OFFLINE and new_role != old_role:
5994 # Trying to transition out of offline status
5995 result = self.rpc.call_version([node.name])[node.name]
5997 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5998 " to report its version: %s" %
5999 (node.name, result.fail_msg),
6002 self.LogWarning("Transitioning node from offline to online state"
6003 " without using re-add. Please make sure the node"
6006 if self.op.secondary_ip:
6007 # Ok even without locking, because this can't be changed by any LU
6008 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
6009 master_singlehomed = master.secondary_ip == master.primary_ip
6010 if master_singlehomed and self.op.secondary_ip:
6011 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
6012 " homed cluster", errors.ECODE_INVAL)
6014 assert not (frozenset(affected_instances) -
6015 self.owned_locks(locking.LEVEL_INSTANCE))
6018 if affected_instances:
6019 raise errors.OpPrereqError("Cannot change secondary IP address:"
6020 " offline node has instances (%s)"
6021 " configured to use it" %
6022 utils.CommaJoin(affected_instances.keys()))
6024 # On online nodes, check that no instances are running, and that
6025 # the node has the new ip and we can reach it.
6026 for instance in affected_instances.values():
6027 _CheckInstanceState(self, instance, INSTANCE_DOWN,
6028 msg="cannot change secondary ip")
6030 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
6031 if master.name != node.name:
6032 # check reachability from master secondary ip to new secondary ip
6033 if not netutils.TcpPing(self.op.secondary_ip,
6034 constants.DEFAULT_NODED_PORT,
6035 source=master.secondary_ip):
6036 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
6037 " based ping to node daemon port",
6038 errors.ECODE_ENVIRON)
6040 if self.op.ndparams:
6041 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
6042 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
6043 self.new_ndparams = new_ndparams
6045 if self.op.hv_state:
6046 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
6047 self.node.hv_state_static)
6049 if self.op.disk_state:
6050 self.new_disk_state = \
6051 _MergeAndVerifyDiskState(self.op.disk_state,
6052 self.node.disk_state_static)
6054 def Exec(self, feedback_fn):
6059 old_role = self.old_role
6060 new_role = self.new_role
6064 if self.op.ndparams:
6065 node.ndparams = self.new_ndparams
6067 if self.op.powered is not None:
6068 node.powered = self.op.powered
6070 if self.op.hv_state:
6071 node.hv_state_static = self.new_hv_state
6073 if self.op.disk_state:
6074 node.disk_state_static = self.new_disk_state
6076 for attr in ["master_capable", "vm_capable"]:
6077 val = getattr(self.op, attr)
6079 setattr(node, attr, val)
6080 result.append((attr, str(val)))
6082 if new_role != old_role:
6083 # Tell the node to demote itself, if no longer MC and not offline
6084 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
6085 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
6087 self.LogWarning("Node failed to demote itself: %s", msg)
6089 new_flags = self._R2F[new_role]
6090 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
6092 result.append((desc, str(nf)))
6093 (node.master_candidate, node.drained, node.offline) = new_flags
6095 # we locked all nodes, we adjust the CP before updating this node
6097 _AdjustCandidatePool(self, [node.name])
6099 if self.op.secondary_ip:
6100 node.secondary_ip = self.op.secondary_ip
6101 result.append(("secondary_ip", self.op.secondary_ip))
6103 # this will trigger configuration file update, if needed
6104 self.cfg.Update(node, feedback_fn)
6106 # this will trigger job queue propagation or cleanup if the mc
6108 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
6109 self.context.ReaddNode(node)
6114 class LUNodePowercycle(NoHooksLU):
6115 """Powercycles a node.
6120 def CheckArguments(self):
6121 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6122 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
6123 raise errors.OpPrereqError("The node is the master and the force"
6124 " parameter was not set",
6127 def ExpandNames(self):
6128 """Locking for PowercycleNode.
6130 This is a last-resort option and shouldn't block on other
6131 jobs. Therefore, we grab no locks.
6134 self.needed_locks = {}
6136 def Exec(self, feedback_fn):
6140 result = self.rpc.call_node_powercycle(self.op.node_name,
6141 self.cfg.GetHypervisorType())
6142 result.Raise("Failed to schedule the reboot")
6143 return result.payload
6146 class LUClusterQuery(NoHooksLU):
6147 """Query cluster configuration.
6152 def ExpandNames(self):
6153 self.needed_locks = {}
6155 def Exec(self, feedback_fn):
6156 """Return cluster config.
6159 cluster = self.cfg.GetClusterInfo()
6162 # Filter just for enabled hypervisors
6163 for os_name, hv_dict in cluster.os_hvp.items():
6164 os_hvp[os_name] = {}
6165 for hv_name, hv_params in hv_dict.items():
6166 if hv_name in cluster.enabled_hypervisors:
6167 os_hvp[os_name][hv_name] = hv_params
6169 # Convert ip_family to ip_version
6170 primary_ip_version = constants.IP4_VERSION
6171 if cluster.primary_ip_family == netutils.IP6Address.family:
6172 primary_ip_version = constants.IP6_VERSION
6175 "software_version": constants.RELEASE_VERSION,
6176 "protocol_version": constants.PROTOCOL_VERSION,
6177 "config_version": constants.CONFIG_VERSION,
6178 "os_api_version": max(constants.OS_API_VERSIONS),
6179 "export_version": constants.EXPORT_VERSION,
6180 "architecture": runtime.GetArchInfo(),
6181 "name": cluster.cluster_name,
6182 "master": cluster.master_node,
6183 "default_hypervisor": cluster.primary_hypervisor,
6184 "enabled_hypervisors": cluster.enabled_hypervisors,
6185 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
6186 for hypervisor_name in cluster.enabled_hypervisors]),
6188 "beparams": cluster.beparams,
6189 "osparams": cluster.osparams,
6190 "ipolicy": cluster.ipolicy,
6191 "nicparams": cluster.nicparams,
6192 "ndparams": cluster.ndparams,
6193 "diskparams": cluster.diskparams,
6194 "candidate_pool_size": cluster.candidate_pool_size,
6195 "master_netdev": cluster.master_netdev,
6196 "master_netmask": cluster.master_netmask,
6197 "use_external_mip_script": cluster.use_external_mip_script,
6198 "volume_group_name": cluster.volume_group_name,
6199 "drbd_usermode_helper": cluster.drbd_usermode_helper,
6200 "file_storage_dir": cluster.file_storage_dir,
6201 "shared_file_storage_dir": cluster.shared_file_storage_dir,
6202 "maintain_node_health": cluster.maintain_node_health,
6203 "ctime": cluster.ctime,
6204 "mtime": cluster.mtime,
6205 "uuid": cluster.uuid,
6206 "tags": list(cluster.GetTags()),
6207 "uid_pool": cluster.uid_pool,
6208 "default_iallocator": cluster.default_iallocator,
6209 "reserved_lvs": cluster.reserved_lvs,
6210 "primary_ip_version": primary_ip_version,
6211 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
6212 "hidden_os": cluster.hidden_os,
6213 "blacklisted_os": cluster.blacklisted_os,
6219 class LUClusterConfigQuery(NoHooksLU):
6220 """Return configuration values.
6225 def CheckArguments(self):
6226 self.cq = _ClusterQuery(None, self.op.output_fields, False)
6228 def ExpandNames(self):
6229 self.cq.ExpandNames(self)
6231 def DeclareLocks(self, level):
6232 self.cq.DeclareLocks(self, level)
6234 def Exec(self, feedback_fn):
6235 result = self.cq.OldStyleQuery(self)
6237 assert len(result) == 1
6242 class _ClusterQuery(_QueryBase):
6243 FIELDS = query.CLUSTER_FIELDS
6245 #: Do not sort (there is only one item)
6248 def ExpandNames(self, lu):
6249 lu.needed_locks = {}
6251 # The following variables interact with _QueryBase._GetNames
6252 self.wanted = locking.ALL_SET
6253 self.do_locking = self.use_locking
6256 raise errors.OpPrereqError("Can not use locking for cluster queries",
6259 def DeclareLocks(self, lu, level):
6262 def _GetQueryData(self, lu):
6263 """Computes the list of nodes and their attributes.
6266 # Locking is not used
6267 assert not (compat.any(lu.glm.is_owned(level)
6268 for level in locking.LEVELS
6269 if level != locking.LEVEL_CLUSTER) or
6270 self.do_locking or self.use_locking)
6272 if query.CQ_CONFIG in self.requested_data:
6273 cluster = lu.cfg.GetClusterInfo()
6275 cluster = NotImplemented
6277 if query.CQ_QUEUE_DRAINED in self.requested_data:
6278 drain_flag = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
6280 drain_flag = NotImplemented
6282 if query.CQ_WATCHER_PAUSE in self.requested_data:
6283 watcher_pause = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
6285 watcher_pause = NotImplemented
6287 return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
6290 class LUInstanceActivateDisks(NoHooksLU):
6291 """Bring up an instance's disks.
6296 def ExpandNames(self):
6297 self._ExpandAndLockInstance()
6298 self.needed_locks[locking.LEVEL_NODE] = []
6299 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6301 def DeclareLocks(self, level):
6302 if level == locking.LEVEL_NODE:
6303 self._LockInstancesNodes()
6305 def CheckPrereq(self):
6306 """Check prerequisites.
6308 This checks that the instance is in the cluster.
6311 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6312 assert self.instance is not None, \
6313 "Cannot retrieve locked instance %s" % self.op.instance_name
6314 _CheckNodeOnline(self, self.instance.primary_node)
6316 def Exec(self, feedback_fn):
6317 """Activate the disks.
6320 disks_ok, disks_info = \
6321 _AssembleInstanceDisks(self, self.instance,
6322 ignore_size=self.op.ignore_size)
6324 raise errors.OpExecError("Cannot activate block devices")
6329 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
6331 """Prepare the block devices for an instance.
6333 This sets up the block devices on all nodes.
6335 @type lu: L{LogicalUnit}
6336 @param lu: the logical unit on whose behalf we execute
6337 @type instance: L{objects.Instance}
6338 @param instance: the instance for whose disks we assemble
6339 @type disks: list of L{objects.Disk} or None
6340 @param disks: which disks to assemble (or all, if None)
6341 @type ignore_secondaries: boolean
6342 @param ignore_secondaries: if true, errors on secondary nodes
6343 won't result in an error return from the function
6344 @type ignore_size: boolean
6345 @param ignore_size: if true, the current known size of the disk
6346 will not be used during the disk activation, useful for cases
6347 when the size is wrong
6348 @return: False if the operation failed, otherwise a list of
6349 (host, instance_visible_name, node_visible_name)
6350 with the mapping from node devices to instance devices
6355 iname = instance.name
6356 disks = _ExpandCheckDisks(instance, disks)
6358 # With the two passes mechanism we try to reduce the window of
6359 # opportunity for the race condition of switching DRBD to primary
6360 # before handshaking occured, but we do not eliminate it
6362 # The proper fix would be to wait (with some limits) until the
6363 # connection has been made and drbd transitions from WFConnection
6364 # into any other network-connected state (Connected, SyncTarget,
6367 # 1st pass, assemble on all nodes in secondary mode
6368 for idx, inst_disk in enumerate(disks):
6369 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6371 node_disk = node_disk.Copy()
6372 node_disk.UnsetSize()
6373 lu.cfg.SetDiskID(node_disk, node)
6374 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6376 msg = result.fail_msg
6378 is_offline_secondary = (node in instance.secondary_nodes and
6380 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6381 " (is_primary=False, pass=1): %s",
6382 inst_disk.iv_name, node, msg)
6383 if not (ignore_secondaries or is_offline_secondary):
6386 # FIXME: race condition on drbd migration to primary
6388 # 2nd pass, do only the primary node
6389 for idx, inst_disk in enumerate(disks):
6392 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6393 if node != instance.primary_node:
6396 node_disk = node_disk.Copy()
6397 node_disk.UnsetSize()
6398 lu.cfg.SetDiskID(node_disk, node)
6399 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6401 msg = result.fail_msg
6403 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6404 " (is_primary=True, pass=2): %s",
6405 inst_disk.iv_name, node, msg)
6408 dev_path = result.payload
6410 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
6412 # leave the disks configured for the primary node
6413 # this is a workaround that would be fixed better by
6414 # improving the logical/physical id handling
6416 lu.cfg.SetDiskID(disk, instance.primary_node)
6418 return disks_ok, device_info
6421 def _StartInstanceDisks(lu, instance, force):
6422 """Start the disks of an instance.
6425 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
6426 ignore_secondaries=force)
6428 _ShutdownInstanceDisks(lu, instance)
6429 if force is not None and not force:
6430 lu.proc.LogWarning("", hint="If the message above refers to a"
6432 " you can retry the operation using '--force'.")
6433 raise errors.OpExecError("Disk consistency error")
6436 class LUInstanceDeactivateDisks(NoHooksLU):
6437 """Shutdown an instance's disks.
6442 def ExpandNames(self):
6443 self._ExpandAndLockInstance()
6444 self.needed_locks[locking.LEVEL_NODE] = []
6445 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6447 def DeclareLocks(self, level):
6448 if level == locking.LEVEL_NODE:
6449 self._LockInstancesNodes()
6451 def CheckPrereq(self):
6452 """Check prerequisites.
6454 This checks that the instance is in the cluster.
6457 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6458 assert self.instance is not None, \
6459 "Cannot retrieve locked instance %s" % self.op.instance_name
6461 def Exec(self, feedback_fn):
6462 """Deactivate the disks
6465 instance = self.instance
6467 _ShutdownInstanceDisks(self, instance)
6469 _SafeShutdownInstanceDisks(self, instance)
6472 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
6473 """Shutdown block devices of an instance.
6475 This function checks if an instance is running, before calling
6476 _ShutdownInstanceDisks.
6479 _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
6480 _ShutdownInstanceDisks(lu, instance, disks=disks)
6483 def _ExpandCheckDisks(instance, disks):
6484 """Return the instance disks selected by the disks list
6486 @type disks: list of L{objects.Disk} or None
6487 @param disks: selected disks
6488 @rtype: list of L{objects.Disk}
6489 @return: selected instance disks to act on
6493 return instance.disks
6495 if not set(disks).issubset(instance.disks):
6496 raise errors.ProgrammerError("Can only act on disks belonging to the"
6501 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
6502 """Shutdown block devices of an instance.
6504 This does the shutdown on all nodes of the instance.
6506 If the ignore_primary is false, errors on the primary node are
6511 disks = _ExpandCheckDisks(instance, disks)
6514 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
6515 lu.cfg.SetDiskID(top_disk, node)
6516 result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
6517 msg = result.fail_msg
6519 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
6520 disk.iv_name, node, msg)
6521 if ((node == instance.primary_node and not ignore_primary) or
6522 (node != instance.primary_node and not result.offline)):
6527 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
6528 """Checks if a node has enough free memory.
6530 This function check if a given node has the needed amount of free
6531 memory. In case the node has less memory or we cannot get the
6532 information from the node, this function raise an OpPrereqError
6535 @type lu: C{LogicalUnit}
6536 @param lu: a logical unit from which we get configuration data
6538 @param node: the node to check
6539 @type reason: C{str}
6540 @param reason: string to use in the error message
6541 @type requested: C{int}
6542 @param requested: the amount of memory in MiB to check for
6543 @type hypervisor_name: C{str}
6544 @param hypervisor_name: the hypervisor to ask for memory stats
6546 @return: node current free memory
6547 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
6548 we cannot check the node
6551 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6552 nodeinfo[node].Raise("Can't get data from node %s" % node,
6553 prereq=True, ecode=errors.ECODE_ENVIRON)
6554 (_, _, (hv_info, )) = nodeinfo[node].payload
6556 free_mem = hv_info.get("memory_free", None)
6557 if not isinstance(free_mem, int):
6558 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6559 " was '%s'" % (node, free_mem),
6560 errors.ECODE_ENVIRON)
6561 if requested > free_mem:
6562 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
6563 " needed %s MiB, available %s MiB" %
6564 (node, reason, requested, free_mem),
6569 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
6570 """Checks if nodes have enough free disk space in the all VGs.
6572 This function check if all given nodes have the needed amount of
6573 free disk. In case any node has less disk or we cannot get the
6574 information from the node, this function raise an OpPrereqError
6577 @type lu: C{LogicalUnit}
6578 @param lu: a logical unit from which we get configuration data
6579 @type nodenames: C{list}
6580 @param nodenames: the list of node names to check
6581 @type req_sizes: C{dict}
6582 @param req_sizes: the hash of vg and corresponding amount of disk in
6584 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6585 or we cannot check the node
6588 for vg, req_size in req_sizes.items():
6589 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
6592 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
6593 """Checks if nodes have enough free disk space in the specified VG.
6595 This function check if all given nodes have the needed amount of
6596 free disk. In case any node has less disk or we cannot get the
6597 information from the node, this function raise an OpPrereqError
6600 @type lu: C{LogicalUnit}
6601 @param lu: a logical unit from which we get configuration data
6602 @type nodenames: C{list}
6603 @param nodenames: the list of node names to check
6605 @param vg: the volume group to check
6606 @type requested: C{int}
6607 @param requested: the amount of disk in MiB to check for
6608 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6609 or we cannot check the node
6612 nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6613 for node in nodenames:
6614 info = nodeinfo[node]
6615 info.Raise("Cannot get current information from node %s" % node,
6616 prereq=True, ecode=errors.ECODE_ENVIRON)
6617 (_, (vg_info, ), _) = info.payload
6618 vg_free = vg_info.get("vg_free", None)
6619 if not isinstance(vg_free, int):
6620 raise errors.OpPrereqError("Can't compute free disk space on node"
6621 " %s for vg %s, result was '%s'" %
6622 (node, vg, vg_free), errors.ECODE_ENVIRON)
6623 if requested > vg_free:
6624 raise errors.OpPrereqError("Not enough disk space on target node %s"
6625 " vg %s: required %d MiB, available %d MiB" %
6626 (node, vg, requested, vg_free),
6630 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
6631 """Checks if nodes have enough physical CPUs
6633 This function checks if all given nodes have the needed number of
6634 physical CPUs. In case any node has less CPUs or we cannot get the
6635 information from the node, this function raises an OpPrereqError
6638 @type lu: C{LogicalUnit}
6639 @param lu: a logical unit from which we get configuration data
6640 @type nodenames: C{list}
6641 @param nodenames: the list of node names to check
6642 @type requested: C{int}
6643 @param requested: the minimum acceptable number of physical CPUs
6644 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
6645 or we cannot check the node
6648 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6649 for node in nodenames:
6650 info = nodeinfo[node]
6651 info.Raise("Cannot get current information from node %s" % node,
6652 prereq=True, ecode=errors.ECODE_ENVIRON)
6653 (_, _, (hv_info, )) = info.payload
6654 num_cpus = hv_info.get("cpu_total", None)
6655 if not isinstance(num_cpus, int):
6656 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6657 " on node %s, result was '%s'" %
6658 (node, num_cpus), errors.ECODE_ENVIRON)
6659 if requested > num_cpus:
6660 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
6661 "required" % (node, num_cpus, requested),
6665 class LUInstanceStartup(LogicalUnit):
6666 """Starts an instance.
6669 HPATH = "instance-start"
6670 HTYPE = constants.HTYPE_INSTANCE
6673 def CheckArguments(self):
6675 if self.op.beparams:
6676 # fill the beparams dict
6677 objects.UpgradeBeParams(self.op.beparams)
6678 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6680 def ExpandNames(self):
6681 self._ExpandAndLockInstance()
6682 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
6684 def DeclareLocks(self, level):
6685 if level == locking.LEVEL_NODE_RES:
6686 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
6688 def BuildHooksEnv(self):
6691 This runs on master, primary and secondary nodes of the instance.
6695 "FORCE": self.op.force,
6698 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6702 def BuildHooksNodes(self):
6703 """Build hooks nodes.
6706 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6709 def CheckPrereq(self):
6710 """Check prerequisites.
6712 This checks that the instance is in the cluster.
6715 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6716 assert self.instance is not None, \
6717 "Cannot retrieve locked instance %s" % self.op.instance_name
6720 if self.op.hvparams:
6721 # check hypervisor parameter syntax (locally)
6722 cluster = self.cfg.GetClusterInfo()
6723 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6724 filled_hvp = cluster.FillHV(instance)
6725 filled_hvp.update(self.op.hvparams)
6726 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
6727 hv_type.CheckParameterSyntax(filled_hvp)
6728 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
6730 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6732 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
6734 if self.primary_offline and self.op.ignore_offline_nodes:
6735 self.proc.LogWarning("Ignoring offline primary node")
6737 if self.op.hvparams or self.op.beparams:
6738 self.proc.LogWarning("Overridden parameters are ignored")
6740 _CheckNodeOnline(self, instance.primary_node)
6742 bep = self.cfg.GetClusterInfo().FillBE(instance)
6743 bep.update(self.op.beparams)
6745 # check bridges existence
6746 _CheckInstanceBridgesExist(self, instance)
6748 remote_info = self.rpc.call_instance_info(instance.primary_node,
6750 instance.hypervisor)
6751 remote_info.Raise("Error checking node %s" % instance.primary_node,
6752 prereq=True, ecode=errors.ECODE_ENVIRON)
6753 if not remote_info.payload: # not running already
6754 _CheckNodeFreeMemory(self, instance.primary_node,
6755 "starting instance %s" % instance.name,
6756 bep[constants.BE_MINMEM], instance.hypervisor)
6758 def Exec(self, feedback_fn):
6759 """Start the instance.
6762 instance = self.instance
6763 force = self.op.force
6765 if not self.op.no_remember:
6766 self.cfg.MarkInstanceUp(instance.name)
6768 if self.primary_offline:
6769 assert self.op.ignore_offline_nodes
6770 self.proc.LogInfo("Primary node offline, marked instance as started")
6772 node_current = instance.primary_node
6774 _StartInstanceDisks(self, instance, force)
6777 self.rpc.call_instance_start(node_current,
6778 (instance, self.op.hvparams,
6780 self.op.startup_paused)
6781 msg = result.fail_msg
6783 _ShutdownInstanceDisks(self, instance)
6784 raise errors.OpExecError("Could not start instance: %s" % msg)
6787 class LUInstanceReboot(LogicalUnit):
6788 """Reboot an instance.
6791 HPATH = "instance-reboot"
6792 HTYPE = constants.HTYPE_INSTANCE
6795 def ExpandNames(self):
6796 self._ExpandAndLockInstance()
6798 def BuildHooksEnv(self):
6801 This runs on master, primary and secondary nodes of the instance.
6805 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6806 "REBOOT_TYPE": self.op.reboot_type,
6807 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6810 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6814 def BuildHooksNodes(self):
6815 """Build hooks nodes.
6818 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6821 def CheckPrereq(self):
6822 """Check prerequisites.
6824 This checks that the instance is in the cluster.
6827 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6828 assert self.instance is not None, \
6829 "Cannot retrieve locked instance %s" % self.op.instance_name
6830 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6831 _CheckNodeOnline(self, instance.primary_node)
6833 # check bridges existence
6834 _CheckInstanceBridgesExist(self, instance)
6836 def Exec(self, feedback_fn):
6837 """Reboot the instance.
6840 instance = self.instance
6841 ignore_secondaries = self.op.ignore_secondaries
6842 reboot_type = self.op.reboot_type
6844 remote_info = self.rpc.call_instance_info(instance.primary_node,
6846 instance.hypervisor)
6847 remote_info.Raise("Error checking node %s" % instance.primary_node)
6848 instance_running = bool(remote_info.payload)
6850 node_current = instance.primary_node
6852 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6853 constants.INSTANCE_REBOOT_HARD]:
6854 for disk in instance.disks:
6855 self.cfg.SetDiskID(disk, node_current)
6856 result = self.rpc.call_instance_reboot(node_current, instance,
6858 self.op.shutdown_timeout)
6859 result.Raise("Could not reboot instance")
6861 if instance_running:
6862 result = self.rpc.call_instance_shutdown(node_current, instance,
6863 self.op.shutdown_timeout)
6864 result.Raise("Could not shutdown instance for full reboot")
6865 _ShutdownInstanceDisks(self, instance)
6867 self.LogInfo("Instance %s was already stopped, starting now",
6869 _StartInstanceDisks(self, instance, ignore_secondaries)
6870 result = self.rpc.call_instance_start(node_current,
6871 (instance, None, None), False)
6872 msg = result.fail_msg
6874 _ShutdownInstanceDisks(self, instance)
6875 raise errors.OpExecError("Could not start instance for"
6876 " full reboot: %s" % msg)
6878 self.cfg.MarkInstanceUp(instance.name)
6881 class LUInstanceShutdown(LogicalUnit):
6882 """Shutdown an instance.
6885 HPATH = "instance-stop"
6886 HTYPE = constants.HTYPE_INSTANCE
6889 def ExpandNames(self):
6890 self._ExpandAndLockInstance()
6892 def BuildHooksEnv(self):
6895 This runs on master, primary and secondary nodes of the instance.
6898 env = _BuildInstanceHookEnvByObject(self, self.instance)
6899 env["TIMEOUT"] = self.op.timeout
6902 def BuildHooksNodes(self):
6903 """Build hooks nodes.
6906 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6909 def CheckPrereq(self):
6910 """Check prerequisites.
6912 This checks that the instance is in the cluster.
6915 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6916 assert self.instance is not None, \
6917 "Cannot retrieve locked instance %s" % self.op.instance_name
6919 _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
6921 self.primary_offline = \
6922 self.cfg.GetNodeInfo(self.instance.primary_node).offline
6924 if self.primary_offline and self.op.ignore_offline_nodes:
6925 self.proc.LogWarning("Ignoring offline primary node")
6927 _CheckNodeOnline(self, self.instance.primary_node)
6929 def Exec(self, feedback_fn):
6930 """Shutdown the instance.
6933 instance = self.instance
6934 node_current = instance.primary_node
6935 timeout = self.op.timeout
6937 if not self.op.no_remember:
6938 self.cfg.MarkInstanceDown(instance.name)
6940 if self.primary_offline:
6941 assert self.op.ignore_offline_nodes
6942 self.proc.LogInfo("Primary node offline, marked instance as stopped")
6944 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6945 msg = result.fail_msg
6947 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6949 _ShutdownInstanceDisks(self, instance)
6952 class LUInstanceReinstall(LogicalUnit):
6953 """Reinstall an instance.
6956 HPATH = "instance-reinstall"
6957 HTYPE = constants.HTYPE_INSTANCE
6960 def ExpandNames(self):
6961 self._ExpandAndLockInstance()
6963 def BuildHooksEnv(self):
6966 This runs on master, primary and secondary nodes of the instance.
6969 return _BuildInstanceHookEnvByObject(self, self.instance)
6971 def BuildHooksNodes(self):
6972 """Build hooks nodes.
6975 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6978 def CheckPrereq(self):
6979 """Check prerequisites.
6981 This checks that the instance is in the cluster and is not running.
6984 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6985 assert instance is not None, \
6986 "Cannot retrieve locked instance %s" % self.op.instance_name
6987 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
6988 " offline, cannot reinstall")
6990 if instance.disk_template == constants.DT_DISKLESS:
6991 raise errors.OpPrereqError("Instance '%s' has no disks" %
6992 self.op.instance_name,
6994 _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
6996 if self.op.os_type is not None:
6998 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
6999 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
7000 instance_os = self.op.os_type
7002 instance_os = instance.os
7004 nodelist = list(instance.all_nodes)
7006 if self.op.osparams:
7007 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
7008 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
7009 self.os_inst = i_osdict # the new dict (without defaults)
7013 self.instance = instance
7015 def Exec(self, feedback_fn):
7016 """Reinstall the instance.
7019 inst = self.instance
7021 if self.op.os_type is not None:
7022 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
7023 inst.os = self.op.os_type
7024 # Write to configuration
7025 self.cfg.Update(inst, feedback_fn)
7027 _StartInstanceDisks(self, inst, None)
7029 feedback_fn("Running the instance OS create scripts...")
7030 # FIXME: pass debug option from opcode to backend
7031 result = self.rpc.call_instance_os_add(inst.primary_node,
7032 (inst, self.os_inst), True,
7033 self.op.debug_level)
7034 result.Raise("Could not install OS for instance %s on node %s" %
7035 (inst.name, inst.primary_node))
7037 _ShutdownInstanceDisks(self, inst)
7040 class LUInstanceRecreateDisks(LogicalUnit):
7041 """Recreate an instance's missing disks.
7044 HPATH = "instance-recreate-disks"
7045 HTYPE = constants.HTYPE_INSTANCE
7048 _MODIFYABLE = frozenset([
7049 constants.IDISK_SIZE,
7050 constants.IDISK_MODE,
7053 # New or changed disk parameters may have different semantics
7054 assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
7055 constants.IDISK_ADOPT,
7057 # TODO: Implement support changing VG while recreating
7059 constants.IDISK_METAVG,
7062 def CheckArguments(self):
7063 if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
7064 # Normalize and convert deprecated list of disk indices
7065 self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
7067 duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
7069 raise errors.OpPrereqError("Some disks have been specified more than"
7070 " once: %s" % utils.CommaJoin(duplicates),
7073 for (idx, params) in self.op.disks:
7074 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
7075 unsupported = frozenset(params.keys()) - self._MODIFYABLE
7077 raise errors.OpPrereqError("Parameters for disk %s try to change"
7078 " unmodifyable parameter(s): %s" %
7079 (idx, utils.CommaJoin(unsupported)),
7082 def ExpandNames(self):
7083 self._ExpandAndLockInstance()
7084 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7086 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
7087 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
7089 self.needed_locks[locking.LEVEL_NODE] = []
7090 self.needed_locks[locking.LEVEL_NODE_RES] = []
7092 def DeclareLocks(self, level):
7093 if level == locking.LEVEL_NODE:
7094 # if we replace the nodes, we only need to lock the old primary,
7095 # otherwise we need to lock all nodes for disk re-creation
7096 primary_only = bool(self.op.nodes)
7097 self._LockInstancesNodes(primary_only=primary_only)
7098 elif level == locking.LEVEL_NODE_RES:
7100 self.needed_locks[locking.LEVEL_NODE_RES] = \
7101 self.needed_locks[locking.LEVEL_NODE][:]
7103 def BuildHooksEnv(self):
7106 This runs on master, primary and secondary nodes of the instance.
7109 return _BuildInstanceHookEnvByObject(self, self.instance)
7111 def BuildHooksNodes(self):
7112 """Build hooks nodes.
7115 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7118 def CheckPrereq(self):
7119 """Check prerequisites.
7121 This checks that the instance is in the cluster and is not running.
7124 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7125 assert instance is not None, \
7126 "Cannot retrieve locked instance %s" % self.op.instance_name
7128 if len(self.op.nodes) != len(instance.all_nodes):
7129 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
7130 " %d replacement nodes were specified" %
7131 (instance.name, len(instance.all_nodes),
7132 len(self.op.nodes)),
7134 assert instance.disk_template != constants.DT_DRBD8 or \
7135 len(self.op.nodes) == 2
7136 assert instance.disk_template != constants.DT_PLAIN or \
7137 len(self.op.nodes) == 1
7138 primary_node = self.op.nodes[0]
7140 primary_node = instance.primary_node
7141 _CheckNodeOnline(self, primary_node)
7143 if instance.disk_template == constants.DT_DISKLESS:
7144 raise errors.OpPrereqError("Instance '%s' has no disks" %
7145 self.op.instance_name, errors.ECODE_INVAL)
7147 # if we replace nodes *and* the old primary is offline, we don't
7149 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
7150 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE_RES)
7151 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
7152 if not (self.op.nodes and old_pnode.offline):
7153 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7154 msg="cannot recreate disks")
7157 self.disks = dict(self.op.disks)
7159 self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
7161 maxidx = max(self.disks.keys())
7162 if maxidx >= len(instance.disks):
7163 raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
7166 if (self.op.nodes and
7167 sorted(self.disks.keys()) != range(len(instance.disks))):
7168 raise errors.OpPrereqError("Can't recreate disks partially and"
7169 " change the nodes at the same time",
7172 self.instance = instance
7174 def Exec(self, feedback_fn):
7175 """Recreate the disks.
7178 instance = self.instance
7180 assert (self.owned_locks(locking.LEVEL_NODE) ==
7181 self.owned_locks(locking.LEVEL_NODE_RES))
7184 mods = [] # keeps track of needed changes
7186 for idx, disk in enumerate(instance.disks):
7188 changes = self.disks[idx]
7190 # Disk should not be recreated
7194 # update secondaries for disks, if needed
7195 if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
7196 # need to update the nodes and minors
7197 assert len(self.op.nodes) == 2
7198 assert len(disk.logical_id) == 6 # otherwise disk internals
7200 (_, _, old_port, _, _, old_secret) = disk.logical_id
7201 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
7202 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
7203 new_minors[0], new_minors[1], old_secret)
7204 assert len(disk.logical_id) == len(new_id)
7208 mods.append((idx, new_id, changes))
7210 # now that we have passed all asserts above, we can apply the mods
7211 # in a single run (to avoid partial changes)
7212 for idx, new_id, changes in mods:
7213 disk = instance.disks[idx]
7214 if new_id is not None:
7215 assert disk.dev_type == constants.LD_DRBD8
7216 disk.logical_id = new_id
7218 disk.Update(size=changes.get(constants.IDISK_SIZE, None),
7219 mode=changes.get(constants.IDISK_MODE, None))
7221 # change primary node, if needed
7223 instance.primary_node = self.op.nodes[0]
7224 self.LogWarning("Changing the instance's nodes, you will have to"
7225 " remove any disks left on the older nodes manually")
7228 self.cfg.Update(instance, feedback_fn)
7230 _CreateDisks(self, instance, to_skip=to_skip)
7233 class LUInstanceRename(LogicalUnit):
7234 """Rename an instance.
7237 HPATH = "instance-rename"
7238 HTYPE = constants.HTYPE_INSTANCE
7240 def CheckArguments(self):
7244 if self.op.ip_check and not self.op.name_check:
7245 # TODO: make the ip check more flexible and not depend on the name check
7246 raise errors.OpPrereqError("IP address check requires a name check",
7249 def BuildHooksEnv(self):
7252 This runs on master, primary and secondary nodes of the instance.
7255 env = _BuildInstanceHookEnvByObject(self, self.instance)
7256 env["INSTANCE_NEW_NAME"] = self.op.new_name
7259 def BuildHooksNodes(self):
7260 """Build hooks nodes.
7263 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7266 def CheckPrereq(self):
7267 """Check prerequisites.
7269 This checks that the instance is in the cluster and is not running.
7272 self.op.instance_name = _ExpandInstanceName(self.cfg,
7273 self.op.instance_name)
7274 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7275 assert instance is not None
7276 _CheckNodeOnline(self, instance.primary_node)
7277 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7278 msg="cannot rename")
7279 self.instance = instance
7281 new_name = self.op.new_name
7282 if self.op.name_check:
7283 hostname = netutils.GetHostname(name=new_name)
7284 if hostname.name != new_name:
7285 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
7287 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
7288 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
7289 " same as given hostname '%s'") %
7290 (hostname.name, self.op.new_name),
7292 new_name = self.op.new_name = hostname.name
7293 if (self.op.ip_check and
7294 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
7295 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7296 (hostname.ip, new_name),
7297 errors.ECODE_NOTUNIQUE)
7299 instance_list = self.cfg.GetInstanceList()
7300 if new_name in instance_list and new_name != instance.name:
7301 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7302 new_name, errors.ECODE_EXISTS)
7304 def Exec(self, feedback_fn):
7305 """Rename the instance.
7308 inst = self.instance
7309 old_name = inst.name
7311 rename_file_storage = False
7312 if (inst.disk_template in constants.DTS_FILEBASED and
7313 self.op.new_name != inst.name):
7314 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7315 rename_file_storage = True
7317 self.cfg.RenameInstance(inst.name, self.op.new_name)
7318 # Change the instance lock. This is definitely safe while we hold the BGL.
7319 # Otherwise the new lock would have to be added in acquired mode.
7321 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
7322 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
7324 # re-read the instance from the configuration after rename
7325 inst = self.cfg.GetInstanceInfo(self.op.new_name)
7327 if rename_file_storage:
7328 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7329 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
7330 old_file_storage_dir,
7331 new_file_storage_dir)
7332 result.Raise("Could not rename on node %s directory '%s' to '%s'"
7333 " (but the instance has been renamed in Ganeti)" %
7334 (inst.primary_node, old_file_storage_dir,
7335 new_file_storage_dir))
7337 _StartInstanceDisks(self, inst, None)
7339 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
7340 old_name, self.op.debug_level)
7341 msg = result.fail_msg
7343 msg = ("Could not run OS rename script for instance %s on node %s"
7344 " (but the instance has been renamed in Ganeti): %s" %
7345 (inst.name, inst.primary_node, msg))
7346 self.proc.LogWarning(msg)
7348 _ShutdownInstanceDisks(self, inst)
7353 class LUInstanceRemove(LogicalUnit):
7354 """Remove an instance.
7357 HPATH = "instance-remove"
7358 HTYPE = constants.HTYPE_INSTANCE
7361 def ExpandNames(self):
7362 self._ExpandAndLockInstance()
7363 self.needed_locks[locking.LEVEL_NODE] = []
7364 self.needed_locks[locking.LEVEL_NODE_RES] = []
7365 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7367 def DeclareLocks(self, level):
7368 if level == locking.LEVEL_NODE:
7369 self._LockInstancesNodes()
7370 elif level == locking.LEVEL_NODE_RES:
7372 self.needed_locks[locking.LEVEL_NODE_RES] = \
7373 self.needed_locks[locking.LEVEL_NODE][:]
7375 def BuildHooksEnv(self):
7378 This runs on master, primary and secondary nodes of the instance.
7381 env = _BuildInstanceHookEnvByObject(self, self.instance)
7382 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
7385 def BuildHooksNodes(self):
7386 """Build hooks nodes.
7389 nl = [self.cfg.GetMasterNode()]
7390 nl_post = list(self.instance.all_nodes) + nl
7391 return (nl, nl_post)
7393 def CheckPrereq(self):
7394 """Check prerequisites.
7396 This checks that the instance is in the cluster.
7399 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7400 assert self.instance is not None, \
7401 "Cannot retrieve locked instance %s" % self.op.instance_name
7403 def Exec(self, feedback_fn):
7404 """Remove the instance.
7407 instance = self.instance
7408 logging.info("Shutting down instance %s on node %s",
7409 instance.name, instance.primary_node)
7411 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
7412 self.op.shutdown_timeout)
7413 msg = result.fail_msg
7415 if self.op.ignore_failures:
7416 feedback_fn("Warning: can't shutdown instance: %s" % msg)
7418 raise errors.OpExecError("Could not shutdown instance %s on"
7420 (instance.name, instance.primary_node, msg))
7422 assert (self.owned_locks(locking.LEVEL_NODE) ==
7423 self.owned_locks(locking.LEVEL_NODE_RES))
7424 assert not (set(instance.all_nodes) -
7425 self.owned_locks(locking.LEVEL_NODE)), \
7426 "Not owning correct locks"
7428 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
7431 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
7432 """Utility function to remove an instance.
7435 logging.info("Removing block devices for instance %s", instance.name)
7437 if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
7438 if not ignore_failures:
7439 raise errors.OpExecError("Can't remove instance's disks")
7440 feedback_fn("Warning: can't remove instance's disks")
7442 logging.info("Removing instance %s out of cluster config", instance.name)
7444 lu.cfg.RemoveInstance(instance.name)
7446 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
7447 "Instance lock removal conflict"
7449 # Remove lock for the instance
7450 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
7453 class LUInstanceQuery(NoHooksLU):
7454 """Logical unit for querying instances.
7457 # pylint: disable=W0142
7460 def CheckArguments(self):
7461 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
7462 self.op.output_fields, self.op.use_locking)
7464 def ExpandNames(self):
7465 self.iq.ExpandNames(self)
7467 def DeclareLocks(self, level):
7468 self.iq.DeclareLocks(self, level)
7470 def Exec(self, feedback_fn):
7471 return self.iq.OldStyleQuery(self)
7474 class LUInstanceFailover(LogicalUnit):
7475 """Failover an instance.
7478 HPATH = "instance-failover"
7479 HTYPE = constants.HTYPE_INSTANCE
7482 def CheckArguments(self):
7483 """Check the arguments.
7486 self.iallocator = getattr(self.op, "iallocator", None)
7487 self.target_node = getattr(self.op, "target_node", None)
7489 def ExpandNames(self):
7490 self._ExpandAndLockInstance()
7492 if self.op.target_node is not None:
7493 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7495 self.needed_locks[locking.LEVEL_NODE] = []
7496 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7498 self.needed_locks[locking.LEVEL_NODE_RES] = []
7499 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
7501 ignore_consistency = self.op.ignore_consistency
7502 shutdown_timeout = self.op.shutdown_timeout
7503 self._migrater = TLMigrateInstance(self, self.op.instance_name,
7506 ignore_consistency=ignore_consistency,
7507 shutdown_timeout=shutdown_timeout,
7508 ignore_ipolicy=self.op.ignore_ipolicy)
7509 self.tasklets = [self._migrater]
7511 def DeclareLocks(self, level):
7512 if level == locking.LEVEL_NODE:
7513 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7514 if instance.disk_template in constants.DTS_EXT_MIRROR:
7515 if self.op.target_node is None:
7516 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7518 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7519 self.op.target_node]
7520 del self.recalculate_locks[locking.LEVEL_NODE]
7522 self._LockInstancesNodes()
7523 elif level == locking.LEVEL_NODE_RES:
7525 self.needed_locks[locking.LEVEL_NODE_RES] = \
7526 self.needed_locks[locking.LEVEL_NODE][:]
7528 def BuildHooksEnv(self):
7531 This runs on master, primary and secondary nodes of the instance.
7534 instance = self._migrater.instance
7535 source_node = instance.primary_node
7536 target_node = self.op.target_node
7538 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
7539 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7540 "OLD_PRIMARY": source_node,
7541 "NEW_PRIMARY": target_node,
7544 if instance.disk_template in constants.DTS_INT_MIRROR:
7545 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
7546 env["NEW_SECONDARY"] = source_node
7548 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
7550 env.update(_BuildInstanceHookEnvByObject(self, instance))
7554 def BuildHooksNodes(self):
7555 """Build hooks nodes.
7558 instance = self._migrater.instance
7559 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7560 return (nl, nl + [instance.primary_node])
7563 class LUInstanceMigrate(LogicalUnit):
7564 """Migrate an instance.
7566 This is migration without shutting down, compared to the failover,
7567 which is done with shutdown.
7570 HPATH = "instance-migrate"
7571 HTYPE = constants.HTYPE_INSTANCE
7574 def ExpandNames(self):
7575 self._ExpandAndLockInstance()
7577 if self.op.target_node is not None:
7578 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7580 self.needed_locks[locking.LEVEL_NODE] = []
7581 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7583 self.needed_locks[locking.LEVEL_NODE] = []
7584 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7587 TLMigrateInstance(self, self.op.instance_name,
7588 cleanup=self.op.cleanup,
7590 fallback=self.op.allow_failover,
7591 allow_runtime_changes=self.op.allow_runtime_changes,
7592 ignore_ipolicy=self.op.ignore_ipolicy)
7593 self.tasklets = [self._migrater]
7595 def DeclareLocks(self, level):
7596 if level == locking.LEVEL_NODE:
7597 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7598 if instance.disk_template in constants.DTS_EXT_MIRROR:
7599 if self.op.target_node is None:
7600 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7602 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7603 self.op.target_node]
7604 del self.recalculate_locks[locking.LEVEL_NODE]
7606 self._LockInstancesNodes()
7607 elif level == locking.LEVEL_NODE_RES:
7609 self.needed_locks[locking.LEVEL_NODE_RES] = \
7610 self.needed_locks[locking.LEVEL_NODE][:]
7612 def BuildHooksEnv(self):
7615 This runs on master, primary and secondary nodes of the instance.
7618 instance = self._migrater.instance
7619 source_node = instance.primary_node
7620 target_node = self.op.target_node
7621 env = _BuildInstanceHookEnvByObject(self, instance)
7623 "MIGRATE_LIVE": self._migrater.live,
7624 "MIGRATE_CLEANUP": self.op.cleanup,
7625 "OLD_PRIMARY": source_node,
7626 "NEW_PRIMARY": target_node,
7627 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7630 if instance.disk_template in constants.DTS_INT_MIRROR:
7631 env["OLD_SECONDARY"] = target_node
7632 env["NEW_SECONDARY"] = source_node
7634 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
7638 def BuildHooksNodes(self):
7639 """Build hooks nodes.
7642 instance = self._migrater.instance
7643 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7644 return (nl, nl + [instance.primary_node])
7647 class LUInstanceMove(LogicalUnit):
7648 """Move an instance by data-copying.
7651 HPATH = "instance-move"
7652 HTYPE = constants.HTYPE_INSTANCE
7655 def ExpandNames(self):
7656 self._ExpandAndLockInstance()
7657 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7658 self.op.target_node = target_node
7659 self.needed_locks[locking.LEVEL_NODE] = [target_node]
7660 self.needed_locks[locking.LEVEL_NODE_RES] = []
7661 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7663 def DeclareLocks(self, level):
7664 if level == locking.LEVEL_NODE:
7665 self._LockInstancesNodes(primary_only=True)
7666 elif level == locking.LEVEL_NODE_RES:
7668 self.needed_locks[locking.LEVEL_NODE_RES] = \
7669 self.needed_locks[locking.LEVEL_NODE][:]
7671 def BuildHooksEnv(self):
7674 This runs on master, primary and secondary nodes of the instance.
7678 "TARGET_NODE": self.op.target_node,
7679 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7681 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7684 def BuildHooksNodes(self):
7685 """Build hooks nodes.
7689 self.cfg.GetMasterNode(),
7690 self.instance.primary_node,
7691 self.op.target_node,
7695 def CheckPrereq(self):
7696 """Check prerequisites.
7698 This checks that the instance is in the cluster.
7701 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7702 assert self.instance is not None, \
7703 "Cannot retrieve locked instance %s" % self.op.instance_name
7705 node = self.cfg.GetNodeInfo(self.op.target_node)
7706 assert node is not None, \
7707 "Cannot retrieve locked node %s" % self.op.target_node
7709 self.target_node = target_node = node.name
7711 if target_node == instance.primary_node:
7712 raise errors.OpPrereqError("Instance %s is already on the node %s" %
7713 (instance.name, target_node),
7716 bep = self.cfg.GetClusterInfo().FillBE(instance)
7718 for idx, dsk in enumerate(instance.disks):
7719 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
7720 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
7721 " cannot copy" % idx, errors.ECODE_STATE)
7723 _CheckNodeOnline(self, target_node)
7724 _CheckNodeNotDrained(self, target_node)
7725 _CheckNodeVmCapable(self, target_node)
7726 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
7727 self.cfg.GetNodeGroup(node.group))
7728 _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
7729 ignore=self.op.ignore_ipolicy)
7731 if instance.admin_state == constants.ADMINST_UP:
7732 # check memory requirements on the secondary node
7733 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7734 instance.name, bep[constants.BE_MAXMEM],
7735 instance.hypervisor)
7737 self.LogInfo("Not checking memory on the secondary node as"
7738 " instance will not be started")
7740 # check bridge existance
7741 _CheckInstanceBridgesExist(self, instance, node=target_node)
7743 def Exec(self, feedback_fn):
7744 """Move an instance.
7746 The move is done by shutting it down on its present node, copying
7747 the data over (slow) and starting it on the new node.
7750 instance = self.instance
7752 source_node = instance.primary_node
7753 target_node = self.target_node
7755 self.LogInfo("Shutting down instance %s on source node %s",
7756 instance.name, source_node)
7758 assert (self.owned_locks(locking.LEVEL_NODE) ==
7759 self.owned_locks(locking.LEVEL_NODE_RES))
7761 result = self.rpc.call_instance_shutdown(source_node, instance,
7762 self.op.shutdown_timeout)
7763 msg = result.fail_msg
7765 if self.op.ignore_consistency:
7766 self.proc.LogWarning("Could not shutdown instance %s on node %s."
7767 " Proceeding anyway. Please make sure node"
7768 " %s is down. Error details: %s",
7769 instance.name, source_node, source_node, msg)
7771 raise errors.OpExecError("Could not shutdown instance %s on"
7773 (instance.name, source_node, msg))
7775 # create the target disks
7777 _CreateDisks(self, instance, target_node=target_node)
7778 except errors.OpExecError:
7779 self.LogWarning("Device creation failed, reverting...")
7781 _RemoveDisks(self, instance, target_node=target_node)
7783 self.cfg.ReleaseDRBDMinors(instance.name)
7786 cluster_name = self.cfg.GetClusterInfo().cluster_name
7789 # activate, get path, copy the data over
7790 for idx, disk in enumerate(instance.disks):
7791 self.LogInfo("Copying data for disk %d", idx)
7792 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
7793 instance.name, True, idx)
7795 self.LogWarning("Can't assemble newly created disk %d: %s",
7796 idx, result.fail_msg)
7797 errs.append(result.fail_msg)
7799 dev_path = result.payload
7800 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
7801 target_node, dev_path,
7804 self.LogWarning("Can't copy data over for disk %d: %s",
7805 idx, result.fail_msg)
7806 errs.append(result.fail_msg)
7810 self.LogWarning("Some disks failed to copy, aborting")
7812 _RemoveDisks(self, instance, target_node=target_node)
7814 self.cfg.ReleaseDRBDMinors(instance.name)
7815 raise errors.OpExecError("Errors during disk copy: %s" %
7818 instance.primary_node = target_node
7819 self.cfg.Update(instance, feedback_fn)
7821 self.LogInfo("Removing the disks on the original node")
7822 _RemoveDisks(self, instance, target_node=source_node)
7824 # Only start the instance if it's marked as up
7825 if instance.admin_state == constants.ADMINST_UP:
7826 self.LogInfo("Starting instance %s on node %s",
7827 instance.name, target_node)
7829 disks_ok, _ = _AssembleInstanceDisks(self, instance,
7830 ignore_secondaries=True)
7832 _ShutdownInstanceDisks(self, instance)
7833 raise errors.OpExecError("Can't activate the instance's disks")
7835 result = self.rpc.call_instance_start(target_node,
7836 (instance, None, None), False)
7837 msg = result.fail_msg
7839 _ShutdownInstanceDisks(self, instance)
7840 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7841 (instance.name, target_node, msg))
7844 class LUNodeMigrate(LogicalUnit):
7845 """Migrate all instances from a node.
7848 HPATH = "node-migrate"
7849 HTYPE = constants.HTYPE_NODE
7852 def CheckArguments(self):
7855 def ExpandNames(self):
7856 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7858 self.share_locks = _ShareAll()
7859 self.needed_locks = {
7860 locking.LEVEL_NODE: [self.op.node_name],
7863 def BuildHooksEnv(self):
7866 This runs on the master, the primary and all the secondaries.
7870 "NODE_NAME": self.op.node_name,
7871 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7874 def BuildHooksNodes(self):
7875 """Build hooks nodes.
7878 nl = [self.cfg.GetMasterNode()]
7881 def CheckPrereq(self):
7884 def Exec(self, feedback_fn):
7885 # Prepare jobs for migration instances
7886 allow_runtime_changes = self.op.allow_runtime_changes
7888 [opcodes.OpInstanceMigrate(instance_name=inst.name,
7891 iallocator=self.op.iallocator,
7892 target_node=self.op.target_node,
7893 allow_runtime_changes=allow_runtime_changes,
7894 ignore_ipolicy=self.op.ignore_ipolicy)]
7895 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
7898 # TODO: Run iallocator in this opcode and pass correct placement options to
7899 # OpInstanceMigrate. Since other jobs can modify the cluster between
7900 # running the iallocator and the actual migration, a good consistency model
7901 # will have to be found.
7903 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
7904 frozenset([self.op.node_name]))
7906 return ResultWithJobs(jobs)
7909 class TLMigrateInstance(Tasklet):
7910 """Tasklet class for instance migration.
7913 @ivar live: whether the migration will be done live or non-live;
7914 this variable is initalized only after CheckPrereq has run
7915 @type cleanup: boolean
7916 @ivar cleanup: Wheater we cleanup from a failed migration
7917 @type iallocator: string
7918 @ivar iallocator: The iallocator used to determine target_node
7919 @type target_node: string
7920 @ivar target_node: If given, the target_node to reallocate the instance to
7921 @type failover: boolean
7922 @ivar failover: Whether operation results in failover or migration
7923 @type fallback: boolean
7924 @ivar fallback: Whether fallback to failover is allowed if migration not
7926 @type ignore_consistency: boolean
7927 @ivar ignore_consistency: Wheter we should ignore consistency between source
7929 @type shutdown_timeout: int
7930 @ivar shutdown_timeout: In case of failover timeout of the shutdown
7931 @type ignore_ipolicy: bool
7932 @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
7937 _MIGRATION_POLL_INTERVAL = 1 # seconds
7938 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7940 def __init__(self, lu, instance_name, cleanup=False,
7941 failover=False, fallback=False,
7942 ignore_consistency=False,
7943 allow_runtime_changes=True,
7944 shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
7945 ignore_ipolicy=False):
7946 """Initializes this class.
7949 Tasklet.__init__(self, lu)
7952 self.instance_name = instance_name
7953 self.cleanup = cleanup
7954 self.live = False # will be overridden later
7955 self.failover = failover
7956 self.fallback = fallback
7957 self.ignore_consistency = ignore_consistency
7958 self.shutdown_timeout = shutdown_timeout
7959 self.ignore_ipolicy = ignore_ipolicy
7960 self.allow_runtime_changes = allow_runtime_changes
7962 def CheckPrereq(self):
7963 """Check prerequisites.
7965 This checks that the instance is in the cluster.
7968 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7969 instance = self.cfg.GetInstanceInfo(instance_name)
7970 assert instance is not None
7971 self.instance = instance
7972 cluster = self.cfg.GetClusterInfo()
7974 if (not self.cleanup and
7975 not instance.admin_state == constants.ADMINST_UP and
7976 not self.failover and self.fallback):
7977 self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
7978 " switching to failover")
7979 self.failover = True
7981 if instance.disk_template not in constants.DTS_MIRRORED:
7986 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
7987 " %s" % (instance.disk_template, text),
7990 if instance.disk_template in constants.DTS_EXT_MIRROR:
7991 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
7993 if self.lu.op.iallocator:
7994 self._RunAllocator()
7996 # We set set self.target_node as it is required by
7998 self.target_node = self.lu.op.target_node
8000 # Check that the target node is correct in terms of instance policy
8001 nodeinfo = self.cfg.GetNodeInfo(self.target_node)
8002 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8003 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8004 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8005 ignore=self.ignore_ipolicy)
8007 # self.target_node is already populated, either directly or by the
8009 target_node = self.target_node
8010 if self.target_node == instance.primary_node:
8011 raise errors.OpPrereqError("Cannot migrate instance %s"
8012 " to its primary (%s)" %
8013 (instance.name, instance.primary_node))
8015 if len(self.lu.tasklets) == 1:
8016 # It is safe to release locks only when we're the only tasklet
8018 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
8019 keep=[instance.primary_node, self.target_node])
8022 secondary_nodes = instance.secondary_nodes
8023 if not secondary_nodes:
8024 raise errors.ConfigurationError("No secondary node but using"
8025 " %s disk template" %
8026 instance.disk_template)
8027 target_node = secondary_nodes[0]
8028 if self.lu.op.iallocator or (self.lu.op.target_node and
8029 self.lu.op.target_node != target_node):
8031 text = "failed over"
8034 raise errors.OpPrereqError("Instances with disk template %s cannot"
8035 " be %s to arbitrary nodes"
8036 " (neither an iallocator nor a target"
8037 " node can be passed)" %
8038 (instance.disk_template, text),
8040 nodeinfo = self.cfg.GetNodeInfo(target_node)
8041 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8042 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8043 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8044 ignore=self.ignore_ipolicy)
8046 i_be = cluster.FillBE(instance)
8048 # check memory requirements on the secondary node
8049 if (not self.cleanup and
8050 (not self.failover or instance.admin_state == constants.ADMINST_UP)):
8051 self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
8052 "migrating instance %s" %
8054 i_be[constants.BE_MINMEM],
8055 instance.hypervisor)
8057 self.lu.LogInfo("Not checking memory on the secondary node as"
8058 " instance will not be started")
8060 # check if failover must be forced instead of migration
8061 if (not self.cleanup and not self.failover and
8062 i_be[constants.BE_ALWAYS_FAILOVER]):
8064 self.lu.LogInfo("Instance configured to always failover; fallback"
8066 self.failover = True
8068 raise errors.OpPrereqError("This instance has been configured to"
8069 " always failover, please allow failover",
8072 # check bridge existance
8073 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
8075 if not self.cleanup:
8076 _CheckNodeNotDrained(self.lu, target_node)
8077 if not self.failover:
8078 result = self.rpc.call_instance_migratable(instance.primary_node,
8080 if result.fail_msg and self.fallback:
8081 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
8083 self.failover = True
8085 result.Raise("Can't migrate, please use failover",
8086 prereq=True, ecode=errors.ECODE_STATE)
8088 assert not (self.failover and self.cleanup)
8090 if not self.failover:
8091 if self.lu.op.live is not None and self.lu.op.mode is not None:
8092 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
8093 " parameters are accepted",
8095 if self.lu.op.live is not None:
8097 self.lu.op.mode = constants.HT_MIGRATION_LIVE
8099 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
8100 # reset the 'live' parameter to None so that repeated
8101 # invocations of CheckPrereq do not raise an exception
8102 self.lu.op.live = None
8103 elif self.lu.op.mode is None:
8104 # read the default value from the hypervisor
8105 i_hv = cluster.FillHV(self.instance, skip_globals=False)
8106 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
8108 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
8110 # Failover is never live
8113 if not (self.failover or self.cleanup):
8114 remote_info = self.rpc.call_instance_info(instance.primary_node,
8116 instance.hypervisor)
8117 remote_info.Raise("Error checking instance on node %s" %
8118 instance.primary_node)
8119 instance_running = bool(remote_info.payload)
8120 if instance_running:
8121 self.current_mem = int(remote_info.payload["memory"])
8123 def _RunAllocator(self):
8124 """Run the allocator based on input opcode.
8127 # FIXME: add a self.ignore_ipolicy option
8128 ial = IAllocator(self.cfg, self.rpc,
8129 mode=constants.IALLOCATOR_MODE_RELOC,
8130 name=self.instance_name,
8131 relocate_from=[self.instance.primary_node],
8134 ial.Run(self.lu.op.iallocator)
8137 raise errors.OpPrereqError("Can't compute nodes using"
8138 " iallocator '%s': %s" %
8139 (self.lu.op.iallocator, ial.info),
8141 if len(ial.result) != ial.required_nodes:
8142 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8143 " of nodes (%s), required %s" %
8144 (self.lu.op.iallocator, len(ial.result),
8145 ial.required_nodes), errors.ECODE_FAULT)
8146 self.target_node = ial.result[0]
8147 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8148 self.instance_name, self.lu.op.iallocator,
8149 utils.CommaJoin(ial.result))
8151 def _WaitUntilSync(self):
8152 """Poll with custom rpc for disk sync.
8154 This uses our own step-based rpc call.
8157 self.feedback_fn("* wait until resync is done")
8161 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8163 (self.instance.disks,
8166 for node, nres in result.items():
8167 nres.Raise("Cannot resync disks on node %s" % node)
8168 node_done, node_percent = nres.payload
8169 all_done = all_done and node_done
8170 if node_percent is not None:
8171 min_percent = min(min_percent, node_percent)
8173 if min_percent < 100:
8174 self.feedback_fn(" - progress: %.1f%%" % min_percent)
8177 def _EnsureSecondary(self, node):
8178 """Demote a node to secondary.
8181 self.feedback_fn("* switching node %s to secondary mode" % node)
8183 for dev in self.instance.disks:
8184 self.cfg.SetDiskID(dev, node)
8186 result = self.rpc.call_blockdev_close(node, self.instance.name,
8187 self.instance.disks)
8188 result.Raise("Cannot change disk to secondary on node %s" % node)
8190 def _GoStandalone(self):
8191 """Disconnect from the network.
8194 self.feedback_fn("* changing into standalone mode")
8195 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
8196 self.instance.disks)
8197 for node, nres in result.items():
8198 nres.Raise("Cannot disconnect disks node %s" % node)
8200 def _GoReconnect(self, multimaster):
8201 """Reconnect to the network.
8207 msg = "single-master"
8208 self.feedback_fn("* changing disks into %s mode" % msg)
8209 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8210 (self.instance.disks, self.instance),
8211 self.instance.name, multimaster)
8212 for node, nres in result.items():
8213 nres.Raise("Cannot change disks config on node %s" % node)
8215 def _ExecCleanup(self):
8216 """Try to cleanup after a failed migration.
8218 The cleanup is done by:
8219 - check that the instance is running only on one node
8220 (and update the config if needed)
8221 - change disks on its secondary node to secondary
8222 - wait until disks are fully synchronized
8223 - disconnect from the network
8224 - change disks into single-master mode
8225 - wait again until disks are fully synchronized
8228 instance = self.instance
8229 target_node = self.target_node
8230 source_node = self.source_node
8232 # check running on only one node
8233 self.feedback_fn("* checking where the instance actually runs"
8234 " (if this hangs, the hypervisor might be in"
8236 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
8237 for node, result in ins_l.items():
8238 result.Raise("Can't contact node %s" % node)
8240 runningon_source = instance.name in ins_l[source_node].payload
8241 runningon_target = instance.name in ins_l[target_node].payload
8243 if runningon_source and runningon_target:
8244 raise errors.OpExecError("Instance seems to be running on two nodes,"
8245 " or the hypervisor is confused; you will have"
8246 " to ensure manually that it runs only on one"
8247 " and restart this operation")
8249 if not (runningon_source or runningon_target):
8250 raise errors.OpExecError("Instance does not seem to be running at all;"
8251 " in this case it's safer to repair by"
8252 " running 'gnt-instance stop' to ensure disk"
8253 " shutdown, and then restarting it")
8255 if runningon_target:
8256 # the migration has actually succeeded, we need to update the config
8257 self.feedback_fn("* instance running on secondary node (%s),"
8258 " updating config" % target_node)
8259 instance.primary_node = target_node
8260 self.cfg.Update(instance, self.feedback_fn)
8261 demoted_node = source_node
8263 self.feedback_fn("* instance confirmed to be running on its"
8264 " primary node (%s)" % source_node)
8265 demoted_node = target_node
8267 if instance.disk_template in constants.DTS_INT_MIRROR:
8268 self._EnsureSecondary(demoted_node)
8270 self._WaitUntilSync()
8271 except errors.OpExecError:
8272 # we ignore here errors, since if the device is standalone, it
8273 # won't be able to sync
8275 self._GoStandalone()
8276 self._GoReconnect(False)
8277 self._WaitUntilSync()
8279 self.feedback_fn("* done")
8281 def _RevertDiskStatus(self):
8282 """Try to revert the disk status after a failed migration.
8285 target_node = self.target_node
8286 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
8290 self._EnsureSecondary(target_node)
8291 self._GoStandalone()
8292 self._GoReconnect(False)
8293 self._WaitUntilSync()
8294 except errors.OpExecError, err:
8295 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
8296 " please try to recover the instance manually;"
8297 " error '%s'" % str(err))
8299 def _AbortMigration(self):
8300 """Call the hypervisor code to abort a started migration.
8303 instance = self.instance
8304 target_node = self.target_node
8305 source_node = self.source_node
8306 migration_info = self.migration_info
8308 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
8312 abort_msg = abort_result.fail_msg
8314 logging.error("Aborting migration failed on target node %s: %s",
8315 target_node, abort_msg)
8316 # Don't raise an exception here, as we stil have to try to revert the
8317 # disk status, even if this step failed.
8319 abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
8320 instance, False, self.live)
8321 abort_msg = abort_result.fail_msg
8323 logging.error("Aborting migration failed on source node %s: %s",
8324 source_node, abort_msg)
8326 def _ExecMigration(self):
8327 """Migrate an instance.
8329 The migrate is done by:
8330 - change the disks into dual-master mode
8331 - wait until disks are fully synchronized again
8332 - migrate the instance
8333 - change disks on the new secondary node (the old primary) to secondary
8334 - wait until disks are fully synchronized
8335 - change disks into single-master mode
8338 instance = self.instance
8339 target_node = self.target_node
8340 source_node = self.source_node
8342 # Check for hypervisor version mismatch and warn the user.
8343 nodeinfo = self.rpc.call_node_info([source_node, target_node],
8344 None, [self.instance.hypervisor])
8345 for ninfo in nodeinfo.values():
8346 ninfo.Raise("Unable to retrieve node information from node '%s'" %
8348 (_, _, (src_info, )) = nodeinfo[source_node].payload
8349 (_, _, (dst_info, )) = nodeinfo[target_node].payload
8351 if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
8352 (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
8353 src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
8354 dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
8355 if src_version != dst_version:
8356 self.feedback_fn("* warning: hypervisor version mismatch between"
8357 " source (%s) and target (%s) node" %
8358 (src_version, dst_version))
8360 self.feedback_fn("* checking disk consistency between source and target")
8361 for (idx, dev) in enumerate(instance.disks):
8362 if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8363 raise errors.OpExecError("Disk %s is degraded or not fully"
8364 " synchronized on target node,"
8365 " aborting migration" % idx)
8367 if self.current_mem > self.tgt_free_mem:
8368 if not self.allow_runtime_changes:
8369 raise errors.OpExecError("Memory ballooning not allowed and not enough"
8370 " free memory to fit instance %s on target"
8371 " node %s (have %dMB, need %dMB)" %
8372 (instance.name, target_node,
8373 self.tgt_free_mem, self.current_mem))
8374 self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
8375 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
8378 rpcres.Raise("Cannot modify instance runtime memory")
8380 # First get the migration information from the remote node
8381 result = self.rpc.call_migration_info(source_node, instance)
8382 msg = result.fail_msg
8384 log_err = ("Failed fetching source migration information from %s: %s" %
8386 logging.error(log_err)
8387 raise errors.OpExecError(log_err)
8389 self.migration_info = migration_info = result.payload
8391 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8392 # Then switch the disks to master/master mode
8393 self._EnsureSecondary(target_node)
8394 self._GoStandalone()
8395 self._GoReconnect(True)
8396 self._WaitUntilSync()
8398 self.feedback_fn("* preparing %s to accept the instance" % target_node)
8399 result = self.rpc.call_accept_instance(target_node,
8402 self.nodes_ip[target_node])
8404 msg = result.fail_msg
8406 logging.error("Instance pre-migration failed, trying to revert"
8407 " disk status: %s", msg)
8408 self.feedback_fn("Pre-migration failed, aborting")
8409 self._AbortMigration()
8410 self._RevertDiskStatus()
8411 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
8412 (instance.name, msg))
8414 self.feedback_fn("* migrating instance to %s" % target_node)
8415 result = self.rpc.call_instance_migrate(source_node, instance,
8416 self.nodes_ip[target_node],
8418 msg = result.fail_msg
8420 logging.error("Instance migration failed, trying to revert"
8421 " disk status: %s", msg)
8422 self.feedback_fn("Migration failed, aborting")
8423 self._AbortMigration()
8424 self._RevertDiskStatus()
8425 raise errors.OpExecError("Could not migrate instance %s: %s" %
8426 (instance.name, msg))
8428 self.feedback_fn("* starting memory transfer")
8429 last_feedback = time.time()
8431 result = self.rpc.call_instance_get_migration_status(source_node,
8433 msg = result.fail_msg
8434 ms = result.payload # MigrationStatus instance
8435 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
8436 logging.error("Instance migration failed, trying to revert"
8437 " disk status: %s", msg)
8438 self.feedback_fn("Migration failed, aborting")
8439 self._AbortMigration()
8440 self._RevertDiskStatus()
8441 raise errors.OpExecError("Could not migrate instance %s: %s" %
8442 (instance.name, msg))
8444 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
8445 self.feedback_fn("* memory transfer complete")
8448 if (utils.TimeoutExpired(last_feedback,
8449 self._MIGRATION_FEEDBACK_INTERVAL) and
8450 ms.transferred_ram is not None):
8451 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
8452 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
8453 last_feedback = time.time()
8455 time.sleep(self._MIGRATION_POLL_INTERVAL)
8457 result = self.rpc.call_instance_finalize_migration_src(source_node,
8461 msg = result.fail_msg
8463 logging.error("Instance migration succeeded, but finalization failed"
8464 " on the source node: %s", msg)
8465 raise errors.OpExecError("Could not finalize instance migration: %s" %
8468 instance.primary_node = target_node
8470 # distribute new instance config to the other nodes
8471 self.cfg.Update(instance, self.feedback_fn)
8473 result = self.rpc.call_instance_finalize_migration_dst(target_node,
8477 msg = result.fail_msg
8479 logging.error("Instance migration succeeded, but finalization failed"
8480 " on the target node: %s", msg)
8481 raise errors.OpExecError("Could not finalize instance migration: %s" %
8484 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8485 self._EnsureSecondary(source_node)
8486 self._WaitUntilSync()
8487 self._GoStandalone()
8488 self._GoReconnect(False)
8489 self._WaitUntilSync()
8491 # If the instance's disk template is `rbd' and there was a successful
8492 # migration, unmap the device from the source node.
8493 if self.instance.disk_template == constants.DT_RBD:
8494 disks = _ExpandCheckDisks(instance, instance.disks)
8495 self.feedback_fn("* unmapping instance's disks from %s" % source_node)
8497 result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
8498 msg = result.fail_msg
8500 logging.error("Migration was successful, but couldn't unmap the"
8501 " block device %s on source node %s: %s",
8502 disk.iv_name, source_node, msg)
8503 logging.error("You need to unmap the device %s manually on %s",
8504 disk.iv_name, source_node)
8506 self.feedback_fn("* done")
8508 def _ExecFailover(self):
8509 """Failover an instance.
8511 The failover is done by shutting it down on its present node and
8512 starting it on the secondary.
8515 instance = self.instance
8516 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
8518 source_node = instance.primary_node
8519 target_node = self.target_node
8521 if instance.admin_state == constants.ADMINST_UP:
8522 self.feedback_fn("* checking disk consistency between source and target")
8523 for (idx, dev) in enumerate(instance.disks):
8524 # for drbd, these are drbd over lvm
8525 if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8527 if primary_node.offline:
8528 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8530 (primary_node.name, idx, target_node))
8531 elif not self.ignore_consistency:
8532 raise errors.OpExecError("Disk %s is degraded on target node,"
8533 " aborting failover" % idx)
8535 self.feedback_fn("* not checking disk consistency as instance is not"
8538 self.feedback_fn("* shutting down instance on source node")
8539 logging.info("Shutting down instance %s on node %s",
8540 instance.name, source_node)
8542 result = self.rpc.call_instance_shutdown(source_node, instance,
8543 self.shutdown_timeout)
8544 msg = result.fail_msg
8546 if self.ignore_consistency or primary_node.offline:
8547 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
8548 " proceeding anyway; please make sure node"
8549 " %s is down; error details: %s",
8550 instance.name, source_node, source_node, msg)
8552 raise errors.OpExecError("Could not shutdown instance %s on"
8554 (instance.name, source_node, msg))
8556 self.feedback_fn("* deactivating the instance's disks on source node")
8557 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
8558 raise errors.OpExecError("Can't shut down the instance's disks")
8560 instance.primary_node = target_node
8561 # distribute new instance config to the other nodes
8562 self.cfg.Update(instance, self.feedback_fn)
8564 # Only start the instance if it's marked as up
8565 if instance.admin_state == constants.ADMINST_UP:
8566 self.feedback_fn("* activating the instance's disks on target node %s" %
8568 logging.info("Starting instance %s on node %s",
8569 instance.name, target_node)
8571 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
8572 ignore_secondaries=True)
8574 _ShutdownInstanceDisks(self.lu, instance)
8575 raise errors.OpExecError("Can't activate the instance's disks")
8577 self.feedback_fn("* starting the instance on the target node %s" %
8579 result = self.rpc.call_instance_start(target_node, (instance, None, None),
8581 msg = result.fail_msg
8583 _ShutdownInstanceDisks(self.lu, instance)
8584 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8585 (instance.name, target_node, msg))
8587 def Exec(self, feedback_fn):
8588 """Perform the migration.
8591 self.feedback_fn = feedback_fn
8592 self.source_node = self.instance.primary_node
8594 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
8595 if self.instance.disk_template in constants.DTS_INT_MIRROR:
8596 self.target_node = self.instance.secondary_nodes[0]
8597 # Otherwise self.target_node has been populated either
8598 # directly, or through an iallocator.
8600 self.all_nodes = [self.source_node, self.target_node]
8601 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
8602 in self.cfg.GetMultiNodeInfo(self.all_nodes))
8605 feedback_fn("Failover instance %s" % self.instance.name)
8606 self._ExecFailover()
8608 feedback_fn("Migrating instance %s" % self.instance.name)
8611 return self._ExecCleanup()
8613 return self._ExecMigration()
8616 def _CreateBlockDev(lu, node, instance, device, force_create, info,
8618 """Wrapper around L{_CreateBlockDevInner}.
8620 This method annotates the root device first.
8623 (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
8624 return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
8628 def _CreateBlockDevInner(lu, node, instance, device, force_create,
8630 """Create a tree of block devices on a given node.
8632 If this device type has to be created on secondaries, create it and
8635 If not, just recurse to children keeping the same 'force' value.
8637 @attention: The device has to be annotated already.
8639 @param lu: the lu on whose behalf we execute
8640 @param node: the node on which to create the device
8641 @type instance: L{objects.Instance}
8642 @param instance: the instance which owns the device
8643 @type device: L{objects.Disk}
8644 @param device: the device to create
8645 @type force_create: boolean
8646 @param force_create: whether to force creation of this device; this
8647 will be change to True whenever we find a device which has
8648 CreateOnSecondary() attribute
8649 @param info: the extra 'metadata' we should attach to the device
8650 (this will be represented as a LVM tag)
8651 @type force_open: boolean
8652 @param force_open: this parameter will be passes to the
8653 L{backend.BlockdevCreate} function where it specifies
8654 whether we run on primary or not, and it affects both
8655 the child assembly and the device own Open() execution
8658 if device.CreateOnSecondary():
8662 for child in device.children:
8663 _CreateBlockDevInner(lu, node, instance, child, force_create,
8666 if not force_create:
8669 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
8672 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
8673 """Create a single block device on a given node.
8675 This will not recurse over children of the device, so they must be
8678 @param lu: the lu on whose behalf we execute
8679 @param node: the node on which to create the device
8680 @type instance: L{objects.Instance}
8681 @param instance: the instance which owns the device
8682 @type device: L{objects.Disk}
8683 @param device: the device to create
8684 @param info: the extra 'metadata' we should attach to the device
8685 (this will be represented as a LVM tag)
8686 @type force_open: boolean
8687 @param force_open: this parameter will be passes to the
8688 L{backend.BlockdevCreate} function where it specifies
8689 whether we run on primary or not, and it affects both
8690 the child assembly and the device own Open() execution
8693 lu.cfg.SetDiskID(device, node)
8694 result = lu.rpc.call_blockdev_create(node, device, device.size,
8695 instance.name, force_open, info)
8696 result.Raise("Can't create block device %s on"
8697 " node %s for instance %s" % (device, node, instance.name))
8698 if device.physical_id is None:
8699 device.physical_id = result.payload
8702 def _GenerateUniqueNames(lu, exts):
8703 """Generate a suitable LV name.
8705 This will generate a logical volume name for the given instance.
8710 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
8711 results.append("%s%s" % (new_id, val))
8715 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
8716 iv_name, p_minor, s_minor):
8717 """Generate a drbd8 device complete with its children.
8720 assert len(vgnames) == len(names) == 2
8721 port = lu.cfg.AllocatePort()
8722 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
8724 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
8725 logical_id=(vgnames[0], names[0]),
8727 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
8728 logical_id=(vgnames[1], names[1]),
8730 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
8731 logical_id=(primary, secondary, port,
8734 children=[dev_data, dev_meta],
8735 iv_name=iv_name, params={})
8739 _DISK_TEMPLATE_NAME_PREFIX = {
8740 constants.DT_PLAIN: "",
8741 constants.DT_RBD: ".rbd",
8745 _DISK_TEMPLATE_DEVICE_TYPE = {
8746 constants.DT_PLAIN: constants.LD_LV,
8747 constants.DT_FILE: constants.LD_FILE,
8748 constants.DT_SHARED_FILE: constants.LD_FILE,
8749 constants.DT_BLOCK: constants.LD_BLOCKDEV,
8750 constants.DT_RBD: constants.LD_RBD,
8754 def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
8755 secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
8756 feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8757 _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8758 """Generate the entire disk layout for a given template type.
8761 #TODO: compute space requirements
8763 vgname = lu.cfg.GetVGName()
8764 disk_count = len(disk_info)
8767 if template_name == constants.DT_DISKLESS:
8769 elif template_name == constants.DT_DRBD8:
8770 if len(secondary_nodes) != 1:
8771 raise errors.ProgrammerError("Wrong template configuration")
8772 remote_node = secondary_nodes[0]
8773 minors = lu.cfg.AllocateDRBDMinor(
8774 [primary_node, remote_node] * len(disk_info), instance_name)
8776 (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
8778 drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
8781 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
8782 for i in range(disk_count)]):
8783 names.append(lv_prefix + "_data")
8784 names.append(lv_prefix + "_meta")
8785 for idx, disk in enumerate(disk_info):
8786 disk_index = idx + base_index
8787 data_vg = disk.get(constants.IDISK_VG, vgname)
8788 meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
8789 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
8790 disk[constants.IDISK_SIZE],
8792 names[idx * 2:idx * 2 + 2],
8793 "disk/%d" % disk_index,
8794 minors[idx * 2], minors[idx * 2 + 1])
8795 disk_dev.mode = disk[constants.IDISK_MODE]
8796 disks.append(disk_dev)
8799 raise errors.ProgrammerError("Wrong template configuration")
8801 if template_name == constants.DT_FILE:
8803 elif template_name == constants.DT_SHARED_FILE:
8804 _req_shr_file_storage()
8806 name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
8807 if name_prefix is None:
8810 names = _GenerateUniqueNames(lu, ["%s.disk%s" %
8811 (name_prefix, base_index + i)
8812 for i in range(disk_count)])
8814 if template_name == constants.DT_PLAIN:
8815 def logical_id_fn(idx, _, disk):
8816 vg = disk.get(constants.IDISK_VG, vgname)
8817 return (vg, names[idx])
8818 elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
8820 lambda _, disk_index, disk: (file_driver,
8821 "%s/disk%d" % (file_storage_dir,
8823 elif template_name == constants.DT_BLOCK:
8825 lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
8826 disk[constants.IDISK_ADOPT])
8827 elif template_name == constants.DT_RBD:
8828 logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
8830 raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
8832 dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
8834 for idx, disk in enumerate(disk_info):
8835 disk_index = idx + base_index
8836 size = disk[constants.IDISK_SIZE]
8837 feedback_fn("* disk %s, size %s" %
8838 (disk_index, utils.FormatUnit(size, "h")))
8839 disks.append(objects.Disk(dev_type=dev_type, size=size,
8840 logical_id=logical_id_fn(idx, disk_index, disk),
8841 iv_name="disk/%d" % disk_index,
8842 mode=disk[constants.IDISK_MODE],
8848 def _GetInstanceInfoText(instance):
8849 """Compute that text that should be added to the disk's metadata.
8852 return "originstname+%s" % instance.name
8855 def _CalcEta(time_taken, written, total_size):
8856 """Calculates the ETA based on size written and total size.
8858 @param time_taken: The time taken so far
8859 @param written: amount written so far
8860 @param total_size: The total size of data to be written
8861 @return: The remaining time in seconds
8864 avg_time = time_taken / float(written)
8865 return (total_size - written) * avg_time
8868 def _WipeDisks(lu, instance):
8869 """Wipes instance disks.
8871 @type lu: L{LogicalUnit}
8872 @param lu: the logical unit on whose behalf we execute
8873 @type instance: L{objects.Instance}
8874 @param instance: the instance whose disks we should create
8875 @return: the success of the wipe
8878 node = instance.primary_node
8880 for device in instance.disks:
8881 lu.cfg.SetDiskID(device, node)
8883 logging.info("Pause sync of instance %s disks", instance.name)
8884 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8885 (instance.disks, instance),
8887 result.Raise("Failed RPC to node %s for pausing the disk syncing" % node)
8889 for idx, success in enumerate(result.payload):
8891 logging.warn("pause-sync of instance %s for disks %d failed",
8895 for idx, device in enumerate(instance.disks):
8896 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
8897 # MAX_WIPE_CHUNK at max
8898 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
8899 constants.MIN_WIPE_CHUNK_PERCENT)
8900 # we _must_ make this an int, otherwise rounding errors will
8902 wipe_chunk_size = int(wipe_chunk_size)
8904 lu.LogInfo("* Wiping disk %d", idx)
8905 logging.info("Wiping disk %d for instance %s, node %s using"
8906 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
8911 start_time = time.time()
8913 while offset < size:
8914 wipe_size = min(wipe_chunk_size, size - offset)
8915 logging.debug("Wiping disk %d, offset %s, chunk %s",
8916 idx, offset, wipe_size)
8917 result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
8919 result.Raise("Could not wipe disk %d at offset %d for size %d" %
8920 (idx, offset, wipe_size))
8923 if now - last_output >= 60:
8924 eta = _CalcEta(now - start_time, offset, size)
8925 lu.LogInfo(" - done: %.1f%% ETA: %s" %
8926 (offset / float(size) * 100, utils.FormatSeconds(eta)))
8929 logging.info("Resume sync of instance %s disks", instance.name)
8931 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8932 (instance.disks, instance),
8936 lu.LogWarning("RPC call to %s for resuming disk syncing failed,"
8937 " please have a look at the status and troubleshoot"
8938 " the issue: %s", node, result.fail_msg)
8940 for idx, success in enumerate(result.payload):
8942 lu.LogWarning("Resume sync of disk %d failed, please have a"
8943 " look at the status and troubleshoot the issue", idx)
8944 logging.warn("resume-sync of instance %s for disks %d failed",
8948 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
8949 """Create all disks for an instance.
8951 This abstracts away some work from AddInstance.
8953 @type lu: L{LogicalUnit}
8954 @param lu: the logical unit on whose behalf we execute
8955 @type instance: L{objects.Instance}
8956 @param instance: the instance whose disks we should create
8958 @param to_skip: list of indices to skip
8959 @type target_node: string
8960 @param target_node: if passed, overrides the target node for creation
8962 @return: the success of the creation
8965 info = _GetInstanceInfoText(instance)
8966 if target_node is None:
8967 pnode = instance.primary_node
8968 all_nodes = instance.all_nodes
8973 if instance.disk_template in constants.DTS_FILEBASED:
8974 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8975 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
8977 result.Raise("Failed to create directory '%s' on"
8978 " node %s" % (file_storage_dir, pnode))
8980 # Note: this needs to be kept in sync with adding of disks in
8981 # LUInstanceSetParams
8982 for idx, device in enumerate(instance.disks):
8983 if to_skip and idx in to_skip:
8985 logging.info("Creating disk %s for instance '%s'", idx, instance.name)
8987 for node in all_nodes:
8988 f_create = node == pnode
8989 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
8992 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
8993 """Remove all disks for an instance.
8995 This abstracts away some work from `AddInstance()` and
8996 `RemoveInstance()`. Note that in case some of the devices couldn't
8997 be removed, the removal will continue with the other ones (compare
8998 with `_CreateDisks()`).
9000 @type lu: L{LogicalUnit}
9001 @param lu: the logical unit on whose behalf we execute
9002 @type instance: L{objects.Instance}
9003 @param instance: the instance whose disks we should remove
9004 @type target_node: string
9005 @param target_node: used to override the node on which to remove the disks
9007 @return: the success of the removal
9010 logging.info("Removing block devices for instance %s", instance.name)
9013 ports_to_release = set()
9014 anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
9015 for (idx, device) in enumerate(anno_disks):
9017 edata = [(target_node, device)]
9019 edata = device.ComputeNodeTree(instance.primary_node)
9020 for node, disk in edata:
9021 lu.cfg.SetDiskID(disk, node)
9022 result = lu.rpc.call_blockdev_remove(node, disk)
9024 lu.LogWarning("Could not remove disk %s on node %s,"
9025 " continuing anyway: %s", idx, node, result.fail_msg)
9026 if not (result.offline and node != instance.primary_node):
9029 # if this is a DRBD disk, return its port to the pool
9030 if device.dev_type in constants.LDS_DRBD:
9031 ports_to_release.add(device.logical_id[2])
9033 if all_result or ignore_failures:
9034 for port in ports_to_release:
9035 lu.cfg.AddTcpUdpPort(port)
9037 if instance.disk_template == constants.DT_FILE:
9038 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9042 tgt = instance.primary_node
9043 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
9045 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
9046 file_storage_dir, instance.primary_node, result.fail_msg)
9052 def _ComputeDiskSizePerVG(disk_template, disks):
9053 """Compute disk size requirements in the volume group
9056 def _compute(disks, payload):
9057 """Universal algorithm.
9062 vgs[disk[constants.IDISK_VG]] = \
9063 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
9067 # Required free disk space as a function of disk and swap space
9069 constants.DT_DISKLESS: {},
9070 constants.DT_PLAIN: _compute(disks, 0),
9071 # 128 MB are added for drbd metadata for each disk
9072 constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
9073 constants.DT_FILE: {},
9074 constants.DT_SHARED_FILE: {},
9077 if disk_template not in req_size_dict:
9078 raise errors.ProgrammerError("Disk template '%s' size requirement"
9079 " is unknown" % disk_template)
9081 return req_size_dict[disk_template]
9084 def _ComputeDiskSize(disk_template, disks):
9085 """Compute disk size requirements according to disk template
9088 # Required free disk space as a function of disk and swap space
9090 constants.DT_DISKLESS: None,
9091 constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
9092 # 128 MB are added for drbd metadata for each disk
9094 sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
9095 constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
9096 constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
9097 constants.DT_BLOCK: 0,
9098 constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
9101 if disk_template not in req_size_dict:
9102 raise errors.ProgrammerError("Disk template '%s' size requirement"
9103 " is unknown" % disk_template)
9105 return req_size_dict[disk_template]
9108 def _FilterVmNodes(lu, nodenames):
9109 """Filters out non-vm_capable nodes from a list.
9111 @type lu: L{LogicalUnit}
9112 @param lu: the logical unit for which we check
9113 @type nodenames: list
9114 @param nodenames: the list of nodes on which we should check
9116 @return: the list of vm-capable nodes
9119 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
9120 return [name for name in nodenames if name not in vm_nodes]
9123 def _CheckHVParams(lu, nodenames, hvname, hvparams):
9124 """Hypervisor parameter validation.
9126 This function abstract the hypervisor parameter validation to be
9127 used in both instance create and instance modify.
9129 @type lu: L{LogicalUnit}
9130 @param lu: the logical unit for which we check
9131 @type nodenames: list
9132 @param nodenames: the list of nodes on which we should check
9133 @type hvname: string
9134 @param hvname: the name of the hypervisor we should use
9135 @type hvparams: dict
9136 @param hvparams: the parameters which we need to check
9137 @raise errors.OpPrereqError: if the parameters are not valid
9140 nodenames = _FilterVmNodes(lu, nodenames)
9142 cluster = lu.cfg.GetClusterInfo()
9143 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
9145 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
9146 for node in nodenames:
9150 info.Raise("Hypervisor parameter validation failed on node %s" % node)
9153 def _CheckOSParams(lu, required, nodenames, osname, osparams):
9154 """OS parameters validation.
9156 @type lu: L{LogicalUnit}
9157 @param lu: the logical unit for which we check
9158 @type required: boolean
9159 @param required: whether the validation should fail if the OS is not
9161 @type nodenames: list
9162 @param nodenames: the list of nodes on which we should check
9163 @type osname: string
9164 @param osname: the name of the hypervisor we should use
9165 @type osparams: dict
9166 @param osparams: the parameters which we need to check
9167 @raise errors.OpPrereqError: if the parameters are not valid
9170 nodenames = _FilterVmNodes(lu, nodenames)
9171 result = lu.rpc.call_os_validate(nodenames, required, osname,
9172 [constants.OS_VALIDATE_PARAMETERS],
9174 for node, nres in result.items():
9175 # we don't check for offline cases since this should be run only
9176 # against the master node and/or an instance's nodes
9177 nres.Raise("OS Parameters validation failed on node %s" % node)
9178 if not nres.payload:
9179 lu.LogInfo("OS %s not found on node %s, validation skipped",
9183 class LUInstanceCreate(LogicalUnit):
9184 """Create an instance.
9187 HPATH = "instance-add"
9188 HTYPE = constants.HTYPE_INSTANCE
9191 def CheckArguments(self):
9195 # do not require name_check to ease forward/backward compatibility
9197 if self.op.no_install and self.op.start:
9198 self.LogInfo("No-installation mode selected, disabling startup")
9199 self.op.start = False
9200 # validate/normalize the instance name
9201 self.op.instance_name = \
9202 netutils.Hostname.GetNormalizedName(self.op.instance_name)
9204 if self.op.ip_check and not self.op.name_check:
9205 # TODO: make the ip check more flexible and not depend on the name check
9206 raise errors.OpPrereqError("Cannot do IP address check without a name"
9207 " check", errors.ECODE_INVAL)
9209 # check nics' parameter names
9210 for nic in self.op.nics:
9211 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
9213 # check disks. parameter names and consistent adopt/no-adopt strategy
9214 has_adopt = has_no_adopt = False
9215 for disk in self.op.disks:
9216 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
9217 if constants.IDISK_ADOPT in disk:
9221 if has_adopt and has_no_adopt:
9222 raise errors.OpPrereqError("Either all disks are adopted or none is",
9225 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
9226 raise errors.OpPrereqError("Disk adoption is not supported for the"
9227 " '%s' disk template" %
9228 self.op.disk_template,
9230 if self.op.iallocator is not None:
9231 raise errors.OpPrereqError("Disk adoption not allowed with an"
9232 " iallocator script", errors.ECODE_INVAL)
9233 if self.op.mode == constants.INSTANCE_IMPORT:
9234 raise errors.OpPrereqError("Disk adoption not allowed for"
9235 " instance import", errors.ECODE_INVAL)
9237 if self.op.disk_template in constants.DTS_MUST_ADOPT:
9238 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
9239 " but no 'adopt' parameter given" %
9240 self.op.disk_template,
9243 self.adopt_disks = has_adopt
9245 # instance name verification
9246 if self.op.name_check:
9247 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
9248 self.op.instance_name = self.hostname1.name
9249 # used in CheckPrereq for ip ping check
9250 self.check_ip = self.hostname1.ip
9252 self.check_ip = None
9254 # file storage checks
9255 if (self.op.file_driver and
9256 not self.op.file_driver in constants.FILE_DRIVER):
9257 raise errors.OpPrereqError("Invalid file driver name '%s'" %
9258 self.op.file_driver, errors.ECODE_INVAL)
9260 if self.op.disk_template == constants.DT_FILE:
9261 opcodes.RequireFileStorage()
9262 elif self.op.disk_template == constants.DT_SHARED_FILE:
9263 opcodes.RequireSharedFileStorage()
9265 ### Node/iallocator related checks
9266 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
9268 if self.op.pnode is not None:
9269 if self.op.disk_template in constants.DTS_INT_MIRROR:
9270 if self.op.snode is None:
9271 raise errors.OpPrereqError("The networked disk templates need"
9272 " a mirror node", errors.ECODE_INVAL)
9274 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
9276 self.op.snode = None
9278 self._cds = _GetClusterDomainSecret()
9280 if self.op.mode == constants.INSTANCE_IMPORT:
9281 # On import force_variant must be True, because if we forced it at
9282 # initial install, our only chance when importing it back is that it
9284 self.op.force_variant = True
9286 if self.op.no_install:
9287 self.LogInfo("No-installation mode has no effect during import")
9289 elif self.op.mode == constants.INSTANCE_CREATE:
9290 if self.op.os_type is None:
9291 raise errors.OpPrereqError("No guest OS specified",
9293 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
9294 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
9295 " installation" % self.op.os_type,
9297 if self.op.disk_template is None:
9298 raise errors.OpPrereqError("No disk template specified",
9301 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9302 # Check handshake to ensure both clusters have the same domain secret
9303 src_handshake = self.op.source_handshake
9304 if not src_handshake:
9305 raise errors.OpPrereqError("Missing source handshake",
9308 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
9311 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
9314 # Load and check source CA
9315 self.source_x509_ca_pem = self.op.source_x509_ca
9316 if not self.source_x509_ca_pem:
9317 raise errors.OpPrereqError("Missing source X509 CA",
9321 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
9323 except OpenSSL.crypto.Error, err:
9324 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
9325 (err, ), errors.ECODE_INVAL)
9327 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9328 if errcode is not None:
9329 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
9332 self.source_x509_ca = cert
9334 src_instance_name = self.op.source_instance_name
9335 if not src_instance_name:
9336 raise errors.OpPrereqError("Missing source instance name",
9339 self.source_instance_name = \
9340 netutils.GetHostname(name=src_instance_name).name
9343 raise errors.OpPrereqError("Invalid instance creation mode %r" %
9344 self.op.mode, errors.ECODE_INVAL)
9346 def ExpandNames(self):
9347 """ExpandNames for CreateInstance.
9349 Figure out the right locks for instance creation.
9352 self.needed_locks = {}
9354 instance_name = self.op.instance_name
9355 # this is just a preventive check, but someone might still add this
9356 # instance in the meantime, and creation will fail at lock-add time
9357 if instance_name in self.cfg.GetInstanceList():
9358 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
9359 instance_name, errors.ECODE_EXISTS)
9361 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
9363 if self.op.iallocator:
9364 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
9365 # specifying a group on instance creation and then selecting nodes from
9367 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9368 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
9370 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
9371 nodelist = [self.op.pnode]
9372 if self.op.snode is not None:
9373 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
9374 nodelist.append(self.op.snode)
9375 self.needed_locks[locking.LEVEL_NODE] = nodelist
9376 # Lock resources of instance's primary and secondary nodes (copy to
9377 # prevent accidential modification)
9378 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
9380 # in case of import lock the source node too
9381 if self.op.mode == constants.INSTANCE_IMPORT:
9382 src_node = self.op.src_node
9383 src_path = self.op.src_path
9385 if src_path is None:
9386 self.op.src_path = src_path = self.op.instance_name
9388 if src_node is None:
9389 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9390 self.op.src_node = None
9391 if os.path.isabs(src_path):
9392 raise errors.OpPrereqError("Importing an instance from a path"
9393 " requires a source node option",
9396 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
9397 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
9398 self.needed_locks[locking.LEVEL_NODE].append(src_node)
9399 if not os.path.isabs(src_path):
9400 self.op.src_path = src_path = \
9401 utils.PathJoin(constants.EXPORT_DIR, src_path)
9403 def _RunAllocator(self):
9404 """Run the allocator based on input opcode.
9407 nics = [n.ToDict() for n in self.nics]
9408 ial = IAllocator(self.cfg, self.rpc,
9409 mode=constants.IALLOCATOR_MODE_ALLOC,
9410 name=self.op.instance_name,
9411 disk_template=self.op.disk_template,
9414 vcpus=self.be_full[constants.BE_VCPUS],
9415 memory=self.be_full[constants.BE_MAXMEM],
9416 spindle_use=self.be_full[constants.BE_SPINDLE_USE],
9419 hypervisor=self.op.hypervisor,
9422 ial.Run(self.op.iallocator)
9425 raise errors.OpPrereqError("Can't compute nodes using"
9426 " iallocator '%s': %s" %
9427 (self.op.iallocator, ial.info),
9429 if len(ial.result) != ial.required_nodes:
9430 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9431 " of nodes (%s), required %s" %
9432 (self.op.iallocator, len(ial.result),
9433 ial.required_nodes), errors.ECODE_FAULT)
9434 self.op.pnode = ial.result[0]
9435 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
9436 self.op.instance_name, self.op.iallocator,
9437 utils.CommaJoin(ial.result))
9438 if ial.required_nodes == 2:
9439 self.op.snode = ial.result[1]
9441 def BuildHooksEnv(self):
9444 This runs on master, primary and secondary nodes of the instance.
9448 "ADD_MODE": self.op.mode,
9450 if self.op.mode == constants.INSTANCE_IMPORT:
9451 env["SRC_NODE"] = self.op.src_node
9452 env["SRC_PATH"] = self.op.src_path
9453 env["SRC_IMAGES"] = self.src_images
9455 env.update(_BuildInstanceHookEnv(
9456 name=self.op.instance_name,
9457 primary_node=self.op.pnode,
9458 secondary_nodes=self.secondaries,
9459 status=self.op.start,
9460 os_type=self.op.os_type,
9461 minmem=self.be_full[constants.BE_MINMEM],
9462 maxmem=self.be_full[constants.BE_MAXMEM],
9463 vcpus=self.be_full[constants.BE_VCPUS],
9464 nics=_NICListToTuple(self, self.nics),
9465 disk_template=self.op.disk_template,
9466 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
9467 for d in self.disks],
9470 hypervisor_name=self.op.hypervisor,
9476 def BuildHooksNodes(self):
9477 """Build hooks nodes.
9480 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
9483 def _ReadExportInfo(self):
9484 """Reads the export information from disk.
9486 It will override the opcode source node and path with the actual
9487 information, if these two were not specified before.
9489 @return: the export information
9492 assert self.op.mode == constants.INSTANCE_IMPORT
9494 src_node = self.op.src_node
9495 src_path = self.op.src_path
9497 if src_node is None:
9498 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
9499 exp_list = self.rpc.call_export_list(locked_nodes)
9501 for node in exp_list:
9502 if exp_list[node].fail_msg:
9504 if src_path in exp_list[node].payload:
9506 self.op.src_node = src_node = node
9507 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
9511 raise errors.OpPrereqError("No export found for relative path %s" %
9512 src_path, errors.ECODE_INVAL)
9514 _CheckNodeOnline(self, src_node)
9515 result = self.rpc.call_export_info(src_node, src_path)
9516 result.Raise("No export or invalid export found in dir %s" % src_path)
9518 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
9519 if not export_info.has_section(constants.INISECT_EXP):
9520 raise errors.ProgrammerError("Corrupted export config",
9521 errors.ECODE_ENVIRON)
9523 ei_version = export_info.get(constants.INISECT_EXP, "version")
9524 if (int(ei_version) != constants.EXPORT_VERSION):
9525 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
9526 (ei_version, constants.EXPORT_VERSION),
9527 errors.ECODE_ENVIRON)
9530 def _ReadExportParams(self, einfo):
9531 """Use export parameters as defaults.
9533 In case the opcode doesn't specify (as in override) some instance
9534 parameters, then try to use them from the export information, if
9538 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
9540 if self.op.disk_template is None:
9541 if einfo.has_option(constants.INISECT_INS, "disk_template"):
9542 self.op.disk_template = einfo.get(constants.INISECT_INS,
9544 if self.op.disk_template not in constants.DISK_TEMPLATES:
9545 raise errors.OpPrereqError("Disk template specified in configuration"
9546 " file is not one of the allowed values:"
9547 " %s" % " ".join(constants.DISK_TEMPLATES))
9549 raise errors.OpPrereqError("No disk template specified and the export"
9550 " is missing the disk_template information",
9553 if not self.op.disks:
9555 # TODO: import the disk iv_name too
9556 for idx in range(constants.MAX_DISKS):
9557 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
9558 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
9559 disks.append({constants.IDISK_SIZE: disk_sz})
9560 self.op.disks = disks
9561 if not disks and self.op.disk_template != constants.DT_DISKLESS:
9562 raise errors.OpPrereqError("No disk info specified and the export"
9563 " is missing the disk information",
9566 if not self.op.nics:
9568 for idx in range(constants.MAX_NICS):
9569 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
9571 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
9572 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
9579 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
9580 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
9582 if (self.op.hypervisor is None and
9583 einfo.has_option(constants.INISECT_INS, "hypervisor")):
9584 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
9586 if einfo.has_section(constants.INISECT_HYP):
9587 # use the export parameters but do not override the ones
9588 # specified by the user
9589 for name, value in einfo.items(constants.INISECT_HYP):
9590 if name not in self.op.hvparams:
9591 self.op.hvparams[name] = value
9593 if einfo.has_section(constants.INISECT_BEP):
9594 # use the parameters, without overriding
9595 for name, value in einfo.items(constants.INISECT_BEP):
9596 if name not in self.op.beparams:
9597 self.op.beparams[name] = value
9598 # Compatibility for the old "memory" be param
9599 if name == constants.BE_MEMORY:
9600 if constants.BE_MAXMEM not in self.op.beparams:
9601 self.op.beparams[constants.BE_MAXMEM] = value
9602 if constants.BE_MINMEM not in self.op.beparams:
9603 self.op.beparams[constants.BE_MINMEM] = value
9605 # try to read the parameters old style, from the main section
9606 for name in constants.BES_PARAMETERS:
9607 if (name not in self.op.beparams and
9608 einfo.has_option(constants.INISECT_INS, name)):
9609 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
9611 if einfo.has_section(constants.INISECT_OSP):
9612 # use the parameters, without overriding
9613 for name, value in einfo.items(constants.INISECT_OSP):
9614 if name not in self.op.osparams:
9615 self.op.osparams[name] = value
9617 def _RevertToDefaults(self, cluster):
9618 """Revert the instance parameters to the default values.
9622 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
9623 for name in self.op.hvparams.keys():
9624 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
9625 del self.op.hvparams[name]
9627 be_defs = cluster.SimpleFillBE({})
9628 for name in self.op.beparams.keys():
9629 if name in be_defs and be_defs[name] == self.op.beparams[name]:
9630 del self.op.beparams[name]
9632 nic_defs = cluster.SimpleFillNIC({})
9633 for nic in self.op.nics:
9634 for name in constants.NICS_PARAMETERS:
9635 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
9638 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
9639 for name in self.op.osparams.keys():
9640 if name in os_defs and os_defs[name] == self.op.osparams[name]:
9641 del self.op.osparams[name]
9643 def _CalculateFileStorageDir(self):
9644 """Calculate final instance file storage dir.
9647 # file storage dir calculation/check
9648 self.instance_file_storage_dir = None
9649 if self.op.disk_template in constants.DTS_FILEBASED:
9650 # build the full file storage dir path
9653 if self.op.disk_template == constants.DT_SHARED_FILE:
9654 get_fsd_fn = self.cfg.GetSharedFileStorageDir
9656 get_fsd_fn = self.cfg.GetFileStorageDir
9658 cfg_storagedir = get_fsd_fn()
9659 if not cfg_storagedir:
9660 raise errors.OpPrereqError("Cluster file storage dir not defined")
9661 joinargs.append(cfg_storagedir)
9663 if self.op.file_storage_dir is not None:
9664 joinargs.append(self.op.file_storage_dir)
9666 joinargs.append(self.op.instance_name)
9668 # pylint: disable=W0142
9669 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
9671 def CheckPrereq(self): # pylint: disable=R0914
9672 """Check prerequisites.
9675 self._CalculateFileStorageDir()
9677 if self.op.mode == constants.INSTANCE_IMPORT:
9678 export_info = self._ReadExportInfo()
9679 self._ReadExportParams(export_info)
9680 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
9682 self._old_instance_name = None
9684 if (not self.cfg.GetVGName() and
9685 self.op.disk_template not in constants.DTS_NOT_LVM):
9686 raise errors.OpPrereqError("Cluster does not support lvm-based"
9687 " instances", errors.ECODE_STATE)
9689 if (self.op.hypervisor is None or
9690 self.op.hypervisor == constants.VALUE_AUTO):
9691 self.op.hypervisor = self.cfg.GetHypervisorType()
9693 cluster = self.cfg.GetClusterInfo()
9694 enabled_hvs = cluster.enabled_hypervisors
9695 if self.op.hypervisor not in enabled_hvs:
9696 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
9697 " cluster (%s)" % (self.op.hypervisor,
9698 ",".join(enabled_hvs)),
9701 # Check tag validity
9702 for tag in self.op.tags:
9703 objects.TaggableObject.ValidateTag(tag)
9705 # check hypervisor parameter syntax (locally)
9706 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
9707 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
9709 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
9710 hv_type.CheckParameterSyntax(filled_hvp)
9711 self.hv_full = filled_hvp
9712 # check that we don't specify global parameters on an instance
9713 _CheckGlobalHvParams(self.op.hvparams)
9715 # fill and remember the beparams dict
9716 default_beparams = cluster.beparams[constants.PP_DEFAULT]
9717 for param, value in self.op.beparams.iteritems():
9718 if value == constants.VALUE_AUTO:
9719 self.op.beparams[param] = default_beparams[param]
9720 objects.UpgradeBeParams(self.op.beparams)
9721 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
9722 self.be_full = cluster.SimpleFillBE(self.op.beparams)
9724 # build os parameters
9725 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
9727 # now that hvp/bep are in final format, let's reset to defaults,
9729 if self.op.identify_defaults:
9730 self._RevertToDefaults(cluster)
9734 for idx, nic in enumerate(self.op.nics):
9735 nic_mode_req = nic.get(constants.INIC_MODE, None)
9736 nic_mode = nic_mode_req
9737 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
9738 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
9740 # in routed mode, for the first nic, the default ip is 'auto'
9741 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
9742 default_ip_mode = constants.VALUE_AUTO
9744 default_ip_mode = constants.VALUE_NONE
9746 # ip validity checks
9747 ip = nic.get(constants.INIC_IP, default_ip_mode)
9748 if ip is None or ip.lower() == constants.VALUE_NONE:
9750 elif ip.lower() == constants.VALUE_AUTO:
9751 if not self.op.name_check:
9752 raise errors.OpPrereqError("IP address set to auto but name checks"
9753 " have been skipped",
9755 nic_ip = self.hostname1.ip
9757 if not netutils.IPAddress.IsValid(ip):
9758 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
9762 # TODO: check the ip address for uniqueness
9763 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
9764 raise errors.OpPrereqError("Routed nic mode requires an ip address",
9767 # MAC address verification
9768 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
9769 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9770 mac = utils.NormalizeAndValidateMac(mac)
9773 self.cfg.ReserveMAC(mac, self.proc.GetECId())
9774 except errors.ReservationError:
9775 raise errors.OpPrereqError("MAC address %s already in use"
9776 " in cluster" % mac,
9777 errors.ECODE_NOTUNIQUE)
9779 # Build nic parameters
9780 link = nic.get(constants.INIC_LINK, None)
9781 if link == constants.VALUE_AUTO:
9782 link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
9785 nicparams[constants.NIC_MODE] = nic_mode
9787 nicparams[constants.NIC_LINK] = link
9789 check_params = cluster.SimpleFillNIC(nicparams)
9790 objects.NIC.CheckParameterSyntax(check_params)
9791 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
9793 # disk checks/pre-build
9794 default_vg = self.cfg.GetVGName()
9796 for disk in self.op.disks:
9797 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
9798 if mode not in constants.DISK_ACCESS_SET:
9799 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
9800 mode, errors.ECODE_INVAL)
9801 size = disk.get(constants.IDISK_SIZE, None)
9803 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
9806 except (TypeError, ValueError):
9807 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
9810 data_vg = disk.get(constants.IDISK_VG, default_vg)
9812 constants.IDISK_SIZE: size,
9813 constants.IDISK_MODE: mode,
9814 constants.IDISK_VG: data_vg,
9816 if constants.IDISK_METAVG in disk:
9817 new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
9818 if constants.IDISK_ADOPT in disk:
9819 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
9820 self.disks.append(new_disk)
9822 if self.op.mode == constants.INSTANCE_IMPORT:
9824 for idx in range(len(self.disks)):
9825 option = "disk%d_dump" % idx
9826 if export_info.has_option(constants.INISECT_INS, option):
9827 # FIXME: are the old os-es, disk sizes, etc. useful?
9828 export_name = export_info.get(constants.INISECT_INS, option)
9829 image = utils.PathJoin(self.op.src_path, export_name)
9830 disk_images.append(image)
9832 disk_images.append(False)
9834 self.src_images = disk_images
9836 if self.op.instance_name == self._old_instance_name:
9837 for idx, nic in enumerate(self.nics):
9838 if nic.mac == constants.VALUE_AUTO:
9839 nic_mac_ini = "nic%d_mac" % idx
9840 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
9842 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
9844 # ip ping checks (we use the same ip that was resolved in ExpandNames)
9845 if self.op.ip_check:
9846 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
9847 raise errors.OpPrereqError("IP %s of instance %s already in use" %
9848 (self.check_ip, self.op.instance_name),
9849 errors.ECODE_NOTUNIQUE)
9851 #### mac address generation
9852 # By generating here the mac address both the allocator and the hooks get
9853 # the real final mac address rather than the 'auto' or 'generate' value.
9854 # There is a race condition between the generation and the instance object
9855 # creation, which means that we know the mac is valid now, but we're not
9856 # sure it will be when we actually add the instance. If things go bad
9857 # adding the instance will abort because of a duplicate mac, and the
9858 # creation job will fail.
9859 for nic in self.nics:
9860 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9861 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
9865 if self.op.iallocator is not None:
9866 self._RunAllocator()
9868 # Release all unneeded node locks
9869 _ReleaseLocks(self, locking.LEVEL_NODE,
9870 keep=filter(None, [self.op.pnode, self.op.snode,
9872 _ReleaseLocks(self, locking.LEVEL_NODE_RES,
9873 keep=filter(None, [self.op.pnode, self.op.snode,
9876 #### node related checks
9878 # check primary node
9879 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
9880 assert self.pnode is not None, \
9881 "Cannot retrieve locked node %s" % self.op.pnode
9883 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
9884 pnode.name, errors.ECODE_STATE)
9886 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
9887 pnode.name, errors.ECODE_STATE)
9888 if not pnode.vm_capable:
9889 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
9890 " '%s'" % pnode.name, errors.ECODE_STATE)
9892 self.secondaries = []
9894 # mirror node verification
9895 if self.op.disk_template in constants.DTS_INT_MIRROR:
9896 if self.op.snode == pnode.name:
9897 raise errors.OpPrereqError("The secondary node cannot be the"
9898 " primary node", errors.ECODE_INVAL)
9899 _CheckNodeOnline(self, self.op.snode)
9900 _CheckNodeNotDrained(self, self.op.snode)
9901 _CheckNodeVmCapable(self, self.op.snode)
9902 self.secondaries.append(self.op.snode)
9904 snode = self.cfg.GetNodeInfo(self.op.snode)
9905 if pnode.group != snode.group:
9906 self.LogWarning("The primary and secondary nodes are in two"
9907 " different node groups; the disk parameters"
9908 " from the first disk's node group will be"
9911 nodenames = [pnode.name] + self.secondaries
9913 # Verify instance specs
9914 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
9916 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
9917 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
9918 constants.ISPEC_DISK_COUNT: len(self.disks),
9919 constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
9920 constants.ISPEC_NIC_COUNT: len(self.nics),
9921 constants.ISPEC_SPINDLE_USE: spindle_use,
9924 group_info = self.cfg.GetNodeGroup(pnode.group)
9925 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
9926 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
9927 if not self.op.ignore_ipolicy and res:
9928 raise errors.OpPrereqError(("Instance allocation to group %s violates"
9929 " policy: %s") % (pnode.group,
9930 utils.CommaJoin(res)),
9933 if not self.adopt_disks:
9934 if self.op.disk_template == constants.DT_RBD:
9935 # _CheckRADOSFreeSpace() is just a placeholder.
9936 # Any function that checks prerequisites can be placed here.
9937 # Check if there is enough space on the RADOS cluster.
9938 _CheckRADOSFreeSpace()
9940 # Check lv size requirements, if not adopting
9941 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
9942 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
9944 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
9945 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
9946 disk[constants.IDISK_ADOPT])
9947 for disk in self.disks])
9948 if len(all_lvs) != len(self.disks):
9949 raise errors.OpPrereqError("Duplicate volume names given for adoption",
9951 for lv_name in all_lvs:
9953 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
9954 # to ReserveLV uses the same syntax
9955 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
9956 except errors.ReservationError:
9957 raise errors.OpPrereqError("LV named %s used by another instance" %
9958 lv_name, errors.ECODE_NOTUNIQUE)
9960 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
9961 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
9963 node_lvs = self.rpc.call_lv_list([pnode.name],
9964 vg_names.payload.keys())[pnode.name]
9965 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
9966 node_lvs = node_lvs.payload
9968 delta = all_lvs.difference(node_lvs.keys())
9970 raise errors.OpPrereqError("Missing logical volume(s): %s" %
9971 utils.CommaJoin(delta),
9973 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
9975 raise errors.OpPrereqError("Online logical volumes found, cannot"
9976 " adopt: %s" % utils.CommaJoin(online_lvs),
9978 # update the size of disk based on what is found
9979 for dsk in self.disks:
9980 dsk[constants.IDISK_SIZE] = \
9981 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
9982 dsk[constants.IDISK_ADOPT])][0]))
9984 elif self.op.disk_template == constants.DT_BLOCK:
9985 # Normalize and de-duplicate device paths
9986 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
9987 for disk in self.disks])
9988 if len(all_disks) != len(self.disks):
9989 raise errors.OpPrereqError("Duplicate disk names given for adoption",
9991 baddisks = [d for d in all_disks
9992 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
9994 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
9995 " cannot be adopted" %
9996 (", ".join(baddisks),
9997 constants.ADOPTABLE_BLOCKDEV_ROOT),
10000 node_disks = self.rpc.call_bdev_sizes([pnode.name],
10001 list(all_disks))[pnode.name]
10002 node_disks.Raise("Cannot get block device information from node %s" %
10004 node_disks = node_disks.payload
10005 delta = all_disks.difference(node_disks.keys())
10007 raise errors.OpPrereqError("Missing block device(s): %s" %
10008 utils.CommaJoin(delta),
10009 errors.ECODE_INVAL)
10010 for dsk in self.disks:
10011 dsk[constants.IDISK_SIZE] = \
10012 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
10014 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
10016 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
10017 # check OS parameters (remotely)
10018 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
10020 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
10022 # memory check on primary node
10023 #TODO(dynmem): use MINMEM for checking
10025 _CheckNodeFreeMemory(self, self.pnode.name,
10026 "creating instance %s" % self.op.instance_name,
10027 self.be_full[constants.BE_MAXMEM],
10028 self.op.hypervisor)
10030 self.dry_run_result = list(nodenames)
10032 def Exec(self, feedback_fn):
10033 """Create and add the instance to the cluster.
10036 instance = self.op.instance_name
10037 pnode_name = self.pnode.name
10039 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
10040 self.owned_locks(locking.LEVEL_NODE)), \
10041 "Node locks differ from node resource locks"
10043 ht_kind = self.op.hypervisor
10044 if ht_kind in constants.HTS_REQ_PORT:
10045 network_port = self.cfg.AllocatePort()
10047 network_port = None
10049 # This is ugly but we got a chicken-egg problem here
10050 # We can only take the group disk parameters, as the instance
10051 # has no disks yet (we are generating them right here).
10052 node = self.cfg.GetNodeInfo(pnode_name)
10053 nodegroup = self.cfg.GetNodeGroup(node.group)
10054 disks = _GenerateDiskTemplate(self,
10055 self.op.disk_template,
10056 instance, pnode_name,
10059 self.instance_file_storage_dir,
10060 self.op.file_driver,
10063 self.cfg.GetGroupDiskParams(nodegroup))
10065 iobj = objects.Instance(name=instance, os=self.op.os_type,
10066 primary_node=pnode_name,
10067 nics=self.nics, disks=disks,
10068 disk_template=self.op.disk_template,
10069 admin_state=constants.ADMINST_DOWN,
10070 network_port=network_port,
10071 beparams=self.op.beparams,
10072 hvparams=self.op.hvparams,
10073 hypervisor=self.op.hypervisor,
10074 osparams=self.op.osparams,
10078 for tag in self.op.tags:
10081 if self.adopt_disks:
10082 if self.op.disk_template == constants.DT_PLAIN:
10083 # rename LVs to the newly-generated names; we need to construct
10084 # 'fake' LV disks with the old data, plus the new unique_id
10085 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
10087 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
10088 rename_to.append(t_dsk.logical_id)
10089 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
10090 self.cfg.SetDiskID(t_dsk, pnode_name)
10091 result = self.rpc.call_blockdev_rename(pnode_name,
10092 zip(tmp_disks, rename_to))
10093 result.Raise("Failed to rename adoped LVs")
10095 feedback_fn("* creating instance disks...")
10097 _CreateDisks(self, iobj)
10098 except errors.OpExecError:
10099 self.LogWarning("Device creation failed, reverting...")
10101 _RemoveDisks(self, iobj)
10103 self.cfg.ReleaseDRBDMinors(instance)
10106 feedback_fn("adding instance %s to cluster config" % instance)
10108 self.cfg.AddInstance(iobj, self.proc.GetECId())
10110 # Declare that we don't want to remove the instance lock anymore, as we've
10111 # added the instance to the config
10112 del self.remove_locks[locking.LEVEL_INSTANCE]
10114 if self.op.mode == constants.INSTANCE_IMPORT:
10115 # Release unused nodes
10116 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
10118 # Release all nodes
10119 _ReleaseLocks(self, locking.LEVEL_NODE)
10122 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
10123 feedback_fn("* wiping instance disks...")
10125 _WipeDisks(self, iobj)
10126 except errors.OpExecError, err:
10127 logging.exception("Wiping disks failed")
10128 self.LogWarning("Wiping instance disks failed (%s)", err)
10132 # Something is already wrong with the disks, don't do anything else
10134 elif self.op.wait_for_sync:
10135 disk_abort = not _WaitForSync(self, iobj)
10136 elif iobj.disk_template in constants.DTS_INT_MIRROR:
10137 # make sure the disks are not degraded (still sync-ing is ok)
10138 feedback_fn("* checking mirrors status")
10139 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
10144 _RemoveDisks(self, iobj)
10145 self.cfg.RemoveInstance(iobj.name)
10146 # Make sure the instance lock gets removed
10147 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
10148 raise errors.OpExecError("There are some degraded disks for"
10151 # Release all node resource locks
10152 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
10154 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
10155 # we need to set the disks ID to the primary node, since the
10156 # preceding code might or might have not done it, depending on
10157 # disk template and other options
10158 for disk in iobj.disks:
10159 self.cfg.SetDiskID(disk, pnode_name)
10160 if self.op.mode == constants.INSTANCE_CREATE:
10161 if not self.op.no_install:
10162 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
10163 not self.op.wait_for_sync)
10165 feedback_fn("* pausing disk sync to install instance OS")
10166 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10169 for idx, success in enumerate(result.payload):
10171 logging.warn("pause-sync of instance %s for disk %d failed",
10174 feedback_fn("* running the instance OS create scripts...")
10175 # FIXME: pass debug option from opcode to backend
10177 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
10178 self.op.debug_level)
10180 feedback_fn("* resuming disk sync")
10181 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10184 for idx, success in enumerate(result.payload):
10186 logging.warn("resume-sync of instance %s for disk %d failed",
10189 os_add_result.Raise("Could not add os for instance %s"
10190 " on node %s" % (instance, pnode_name))
10193 if self.op.mode == constants.INSTANCE_IMPORT:
10194 feedback_fn("* running the instance OS import scripts...")
10198 for idx, image in enumerate(self.src_images):
10202 # FIXME: pass debug option from opcode to backend
10203 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
10204 constants.IEIO_FILE, (image, ),
10205 constants.IEIO_SCRIPT,
10206 (iobj.disks[idx], idx),
10208 transfers.append(dt)
10211 masterd.instance.TransferInstanceData(self, feedback_fn,
10212 self.op.src_node, pnode_name,
10213 self.pnode.secondary_ip,
10215 if not compat.all(import_result):
10216 self.LogWarning("Some disks for instance %s on node %s were not"
10217 " imported successfully" % (instance, pnode_name))
10219 rename_from = self._old_instance_name
10221 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
10222 feedback_fn("* preparing remote import...")
10223 # The source cluster will stop the instance before attempting to make
10224 # a connection. In some cases stopping an instance can take a long
10225 # time, hence the shutdown timeout is added to the connection
10227 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
10228 self.op.source_shutdown_timeout)
10229 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10231 assert iobj.primary_node == self.pnode.name
10233 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
10234 self.source_x509_ca,
10235 self._cds, timeouts)
10236 if not compat.all(disk_results):
10237 # TODO: Should the instance still be started, even if some disks
10238 # failed to import (valid for local imports, too)?
10239 self.LogWarning("Some disks for instance %s on node %s were not"
10240 " imported successfully" % (instance, pnode_name))
10242 rename_from = self.source_instance_name
10245 # also checked in the prereq part
10246 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
10249 # Run rename script on newly imported instance
10250 assert iobj.name == instance
10251 feedback_fn("Running rename script for %s" % instance)
10252 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
10254 self.op.debug_level)
10255 if result.fail_msg:
10256 self.LogWarning("Failed to run rename script for %s on node"
10257 " %s: %s" % (instance, pnode_name, result.fail_msg))
10259 assert not self.owned_locks(locking.LEVEL_NODE_RES)
10262 iobj.admin_state = constants.ADMINST_UP
10263 self.cfg.Update(iobj, feedback_fn)
10264 logging.info("Starting instance %s on node %s", instance, pnode_name)
10265 feedback_fn("* starting instance...")
10266 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
10268 result.Raise("Could not start instance")
10270 return list(iobj.all_nodes)
10273 def _CheckRADOSFreeSpace():
10274 """Compute disk size requirements inside the RADOS cluster.
10277 # For the RADOS cluster we assume there is always enough space.
10281 class LUInstanceConsole(NoHooksLU):
10282 """Connect to an instance's console.
10284 This is somewhat special in that it returns the command line that
10285 you need to run on the master node in order to connect to the
10291 def ExpandNames(self):
10292 self.share_locks = _ShareAll()
10293 self._ExpandAndLockInstance()
10295 def CheckPrereq(self):
10296 """Check prerequisites.
10298 This checks that the instance is in the cluster.
10301 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10302 assert self.instance is not None, \
10303 "Cannot retrieve locked instance %s" % self.op.instance_name
10304 _CheckNodeOnline(self, self.instance.primary_node)
10306 def Exec(self, feedback_fn):
10307 """Connect to the console of an instance
10310 instance = self.instance
10311 node = instance.primary_node
10313 node_insts = self.rpc.call_instance_list([node],
10314 [instance.hypervisor])[node]
10315 node_insts.Raise("Can't get node information from %s" % node)
10317 if instance.name not in node_insts.payload:
10318 if instance.admin_state == constants.ADMINST_UP:
10319 state = constants.INSTST_ERRORDOWN
10320 elif instance.admin_state == constants.ADMINST_DOWN:
10321 state = constants.INSTST_ADMINDOWN
10323 state = constants.INSTST_ADMINOFFLINE
10324 raise errors.OpExecError("Instance %s is not running (state %s)" %
10325 (instance.name, state))
10327 logging.debug("Connecting to console of %s on %s", instance.name, node)
10329 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
10332 def _GetInstanceConsole(cluster, instance):
10333 """Returns console information for an instance.
10335 @type cluster: L{objects.Cluster}
10336 @type instance: L{objects.Instance}
10340 hyper = hypervisor.GetHypervisor(instance.hypervisor)
10341 # beparams and hvparams are passed separately, to avoid editing the
10342 # instance and then saving the defaults in the instance itself.
10343 hvparams = cluster.FillHV(instance)
10344 beparams = cluster.FillBE(instance)
10345 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
10347 assert console.instance == instance.name
10348 assert console.Validate()
10350 return console.ToDict()
10353 class LUInstanceReplaceDisks(LogicalUnit):
10354 """Replace the disks of an instance.
10357 HPATH = "mirrors-replace"
10358 HTYPE = constants.HTYPE_INSTANCE
10361 def CheckArguments(self):
10362 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
10363 self.op.iallocator)
10365 def ExpandNames(self):
10366 self._ExpandAndLockInstance()
10368 assert locking.LEVEL_NODE not in self.needed_locks
10369 assert locking.LEVEL_NODE_RES not in self.needed_locks
10370 assert locking.LEVEL_NODEGROUP not in self.needed_locks
10372 assert self.op.iallocator is None or self.op.remote_node is None, \
10373 "Conflicting options"
10375 if self.op.remote_node is not None:
10376 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10378 # Warning: do not remove the locking of the new secondary here
10379 # unless DRBD8.AddChildren is changed to work in parallel;
10380 # currently it doesn't since parallel invocations of
10381 # FindUnusedMinor will conflict
10382 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
10383 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
10385 self.needed_locks[locking.LEVEL_NODE] = []
10386 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10388 if self.op.iallocator is not None:
10389 # iallocator will select a new node in the same group
10390 self.needed_locks[locking.LEVEL_NODEGROUP] = []
10392 self.needed_locks[locking.LEVEL_NODE_RES] = []
10394 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
10395 self.op.iallocator, self.op.remote_node,
10396 self.op.disks, False, self.op.early_release,
10397 self.op.ignore_ipolicy)
10399 self.tasklets = [self.replacer]
10401 def DeclareLocks(self, level):
10402 if level == locking.LEVEL_NODEGROUP:
10403 assert self.op.remote_node is None
10404 assert self.op.iallocator is not None
10405 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
10407 self.share_locks[locking.LEVEL_NODEGROUP] = 1
10408 # Lock all groups used by instance optimistically; this requires going
10409 # via the node before it's locked, requiring verification later on
10410 self.needed_locks[locking.LEVEL_NODEGROUP] = \
10411 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
10413 elif level == locking.LEVEL_NODE:
10414 if self.op.iallocator is not None:
10415 assert self.op.remote_node is None
10416 assert not self.needed_locks[locking.LEVEL_NODE]
10418 # Lock member nodes of all locked groups
10419 self.needed_locks[locking.LEVEL_NODE] = [node_name
10420 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10421 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10423 self._LockInstancesNodes()
10424 elif level == locking.LEVEL_NODE_RES:
10426 self.needed_locks[locking.LEVEL_NODE_RES] = \
10427 self.needed_locks[locking.LEVEL_NODE]
10429 def BuildHooksEnv(self):
10430 """Build hooks env.
10432 This runs on the master, the primary and all the secondaries.
10435 instance = self.replacer.instance
10437 "MODE": self.op.mode,
10438 "NEW_SECONDARY": self.op.remote_node,
10439 "OLD_SECONDARY": instance.secondary_nodes[0],
10441 env.update(_BuildInstanceHookEnvByObject(self, instance))
10444 def BuildHooksNodes(self):
10445 """Build hooks nodes.
10448 instance = self.replacer.instance
10450 self.cfg.GetMasterNode(),
10451 instance.primary_node,
10453 if self.op.remote_node is not None:
10454 nl.append(self.op.remote_node)
10457 def CheckPrereq(self):
10458 """Check prerequisites.
10461 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
10462 self.op.iallocator is None)
10464 # Verify if node group locks are still correct
10465 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10467 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
10469 return LogicalUnit.CheckPrereq(self)
10472 class TLReplaceDisks(Tasklet):
10473 """Replaces disks for an instance.
10475 Note: Locking is not within the scope of this class.
10478 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
10479 disks, delay_iallocator, early_release, ignore_ipolicy):
10480 """Initializes this class.
10483 Tasklet.__init__(self, lu)
10486 self.instance_name = instance_name
10488 self.iallocator_name = iallocator_name
10489 self.remote_node = remote_node
10491 self.delay_iallocator = delay_iallocator
10492 self.early_release = early_release
10493 self.ignore_ipolicy = ignore_ipolicy
10496 self.instance = None
10497 self.new_node = None
10498 self.target_node = None
10499 self.other_node = None
10500 self.remote_node_info = None
10501 self.node_secondary_ip = None
10504 def CheckArguments(mode, remote_node, iallocator):
10505 """Helper function for users of this class.
10508 # check for valid parameter combination
10509 if mode == constants.REPLACE_DISK_CHG:
10510 if remote_node is None and iallocator is None:
10511 raise errors.OpPrereqError("When changing the secondary either an"
10512 " iallocator script must be used or the"
10513 " new node given", errors.ECODE_INVAL)
10515 if remote_node is not None and iallocator is not None:
10516 raise errors.OpPrereqError("Give either the iallocator or the new"
10517 " secondary, not both", errors.ECODE_INVAL)
10519 elif remote_node is not None or iallocator is not None:
10520 # Not replacing the secondary
10521 raise errors.OpPrereqError("The iallocator and new node options can"
10522 " only be used when changing the"
10523 " secondary node", errors.ECODE_INVAL)
10526 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
10527 """Compute a new secondary node using an IAllocator.
10530 ial = IAllocator(lu.cfg, lu.rpc,
10531 mode=constants.IALLOCATOR_MODE_RELOC,
10532 name=instance_name,
10533 relocate_from=list(relocate_from))
10535 ial.Run(iallocator_name)
10537 if not ial.success:
10538 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
10539 " %s" % (iallocator_name, ial.info),
10540 errors.ECODE_NORES)
10542 if len(ial.result) != ial.required_nodes:
10543 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
10544 " of nodes (%s), required %s" %
10546 len(ial.result), ial.required_nodes),
10547 errors.ECODE_FAULT)
10549 remote_node_name = ial.result[0]
10551 lu.LogInfo("Selected new secondary for instance '%s': %s",
10552 instance_name, remote_node_name)
10554 return remote_node_name
10556 def _FindFaultyDisks(self, node_name):
10557 """Wrapper for L{_FindFaultyInstanceDisks}.
10560 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
10563 def _CheckDisksActivated(self, instance):
10564 """Checks if the instance disks are activated.
10566 @param instance: The instance to check disks
10567 @return: True if they are activated, False otherwise
10570 nodes = instance.all_nodes
10572 for idx, dev in enumerate(instance.disks):
10574 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
10575 self.cfg.SetDiskID(dev, node)
10577 result = _BlockdevFind(self, node, dev, instance)
10581 elif result.fail_msg or not result.payload:
10586 def CheckPrereq(self):
10587 """Check prerequisites.
10589 This checks that the instance is in the cluster.
10592 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
10593 assert instance is not None, \
10594 "Cannot retrieve locked instance %s" % self.instance_name
10596 if instance.disk_template != constants.DT_DRBD8:
10597 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
10598 " instances", errors.ECODE_INVAL)
10600 if len(instance.secondary_nodes) != 1:
10601 raise errors.OpPrereqError("The instance has a strange layout,"
10602 " expected one secondary but found %d" %
10603 len(instance.secondary_nodes),
10604 errors.ECODE_FAULT)
10606 if not self.delay_iallocator:
10607 self._CheckPrereq2()
10609 def _CheckPrereq2(self):
10610 """Check prerequisites, second part.
10612 This function should always be part of CheckPrereq. It was separated and is
10613 now called from Exec because during node evacuation iallocator was only
10614 called with an unmodified cluster model, not taking planned changes into
10618 instance = self.instance
10619 secondary_node = instance.secondary_nodes[0]
10621 if self.iallocator_name is None:
10622 remote_node = self.remote_node
10624 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
10625 instance.name, instance.secondary_nodes)
10627 if remote_node is None:
10628 self.remote_node_info = None
10630 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
10631 "Remote node '%s' is not locked" % remote_node
10633 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
10634 assert self.remote_node_info is not None, \
10635 "Cannot retrieve locked node %s" % remote_node
10637 if remote_node == self.instance.primary_node:
10638 raise errors.OpPrereqError("The specified node is the primary node of"
10639 " the instance", errors.ECODE_INVAL)
10641 if remote_node == secondary_node:
10642 raise errors.OpPrereqError("The specified node is already the"
10643 " secondary node of the instance",
10644 errors.ECODE_INVAL)
10646 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
10647 constants.REPLACE_DISK_CHG):
10648 raise errors.OpPrereqError("Cannot specify disks to be replaced",
10649 errors.ECODE_INVAL)
10651 if self.mode == constants.REPLACE_DISK_AUTO:
10652 if not self._CheckDisksActivated(instance):
10653 raise errors.OpPrereqError("Please run activate-disks on instance %s"
10654 " first" % self.instance_name,
10655 errors.ECODE_STATE)
10656 faulty_primary = self._FindFaultyDisks(instance.primary_node)
10657 faulty_secondary = self._FindFaultyDisks(secondary_node)
10659 if faulty_primary and faulty_secondary:
10660 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
10661 " one node and can not be repaired"
10662 " automatically" % self.instance_name,
10663 errors.ECODE_STATE)
10666 self.disks = faulty_primary
10667 self.target_node = instance.primary_node
10668 self.other_node = secondary_node
10669 check_nodes = [self.target_node, self.other_node]
10670 elif faulty_secondary:
10671 self.disks = faulty_secondary
10672 self.target_node = secondary_node
10673 self.other_node = instance.primary_node
10674 check_nodes = [self.target_node, self.other_node]
10680 # Non-automatic modes
10681 if self.mode == constants.REPLACE_DISK_PRI:
10682 self.target_node = instance.primary_node
10683 self.other_node = secondary_node
10684 check_nodes = [self.target_node, self.other_node]
10686 elif self.mode == constants.REPLACE_DISK_SEC:
10687 self.target_node = secondary_node
10688 self.other_node = instance.primary_node
10689 check_nodes = [self.target_node, self.other_node]
10691 elif self.mode == constants.REPLACE_DISK_CHG:
10692 self.new_node = remote_node
10693 self.other_node = instance.primary_node
10694 self.target_node = secondary_node
10695 check_nodes = [self.new_node, self.other_node]
10697 _CheckNodeNotDrained(self.lu, remote_node)
10698 _CheckNodeVmCapable(self.lu, remote_node)
10700 old_node_info = self.cfg.GetNodeInfo(secondary_node)
10701 assert old_node_info is not None
10702 if old_node_info.offline and not self.early_release:
10703 # doesn't make sense to delay the release
10704 self.early_release = True
10705 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
10706 " early-release mode", secondary_node)
10709 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
10712 # If not specified all disks should be replaced
10714 self.disks = range(len(self.instance.disks))
10716 # TODO: This is ugly, but right now we can't distinguish between internal
10717 # submitted opcode and external one. We should fix that.
10718 if self.remote_node_info:
10719 # We change the node, lets verify it still meets instance policy
10720 new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
10721 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
10723 _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
10724 ignore=self.ignore_ipolicy)
10726 for node in check_nodes:
10727 _CheckNodeOnline(self.lu, node)
10729 touched_nodes = frozenset(node_name for node_name in [self.new_node,
10732 if node_name is not None)
10734 # Release unneeded node and node resource locks
10735 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
10736 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
10738 # Release any owned node group
10739 if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
10740 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
10742 # Check whether disks are valid
10743 for disk_idx in self.disks:
10744 instance.FindDisk(disk_idx)
10746 # Get secondary node IP addresses
10747 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
10748 in self.cfg.GetMultiNodeInfo(touched_nodes))
10750 def Exec(self, feedback_fn):
10751 """Execute disk replacement.
10753 This dispatches the disk replacement to the appropriate handler.
10756 if self.delay_iallocator:
10757 self._CheckPrereq2()
10760 # Verify owned locks before starting operation
10761 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
10762 assert set(owned_nodes) == set(self.node_secondary_ip), \
10763 ("Incorrect node locks, owning %s, expected %s" %
10764 (owned_nodes, self.node_secondary_ip.keys()))
10765 assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
10766 self.lu.owned_locks(locking.LEVEL_NODE_RES))
10768 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
10769 assert list(owned_instances) == [self.instance_name], \
10770 "Instance '%s' not locked" % self.instance_name
10772 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
10773 "Should not own any node group lock at this point"
10776 feedback_fn("No disks need replacement")
10779 feedback_fn("Replacing disk(s) %s for %s" %
10780 (utils.CommaJoin(self.disks), self.instance.name))
10782 activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
10784 # Activate the instance disks if we're replacing them on a down instance
10786 _StartInstanceDisks(self.lu, self.instance, True)
10789 # Should we replace the secondary node?
10790 if self.new_node is not None:
10791 fn = self._ExecDrbd8Secondary
10793 fn = self._ExecDrbd8DiskOnly
10795 result = fn(feedback_fn)
10797 # Deactivate the instance disks if we're replacing them on a
10800 _SafeShutdownInstanceDisks(self.lu, self.instance)
10802 assert not self.lu.owned_locks(locking.LEVEL_NODE)
10805 # Verify owned locks
10806 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
10807 nodes = frozenset(self.node_secondary_ip)
10808 assert ((self.early_release and not owned_nodes) or
10809 (not self.early_release and not (set(owned_nodes) - nodes))), \
10810 ("Not owning the correct locks, early_release=%s, owned=%r,"
10811 " nodes=%r" % (self.early_release, owned_nodes, nodes))
10815 def _CheckVolumeGroup(self, nodes):
10816 self.lu.LogInfo("Checking volume groups")
10818 vgname = self.cfg.GetVGName()
10820 # Make sure volume group exists on all involved nodes
10821 results = self.rpc.call_vg_list(nodes)
10823 raise errors.OpExecError("Can't list volume groups on the nodes")
10826 res = results[node]
10827 res.Raise("Error checking node %s" % node)
10828 if vgname not in res.payload:
10829 raise errors.OpExecError("Volume group '%s' not found on node %s" %
10832 def _CheckDisksExistence(self, nodes):
10833 # Check disk existence
10834 for idx, dev in enumerate(self.instance.disks):
10835 if idx not in self.disks:
10839 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
10840 self.cfg.SetDiskID(dev, node)
10842 result = _BlockdevFind(self, node, dev, self.instance)
10844 msg = result.fail_msg
10845 if msg or not result.payload:
10847 msg = "disk not found"
10848 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
10851 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
10852 for idx, dev in enumerate(self.instance.disks):
10853 if idx not in self.disks:
10856 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
10859 if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
10860 on_primary, ldisk=ldisk):
10861 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
10862 " replace disks for instance %s" %
10863 (node_name, self.instance.name))
10865 def _CreateNewStorage(self, node_name):
10866 """Create new storage on the primary or secondary node.
10868 This is only used for same-node replaces, not for changing the
10869 secondary node, hence we don't want to modify the existing disk.
10874 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
10875 for idx, dev in enumerate(disks):
10876 if idx not in self.disks:
10879 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
10881 self.cfg.SetDiskID(dev, node_name)
10883 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
10884 names = _GenerateUniqueNames(self.lu, lv_names)
10886 (data_disk, meta_disk) = dev.children
10887 vg_data = data_disk.logical_id[0]
10888 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
10889 logical_id=(vg_data, names[0]),
10890 params=data_disk.params)
10891 vg_meta = meta_disk.logical_id[0]
10892 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
10893 logical_id=(vg_meta, names[1]),
10894 params=meta_disk.params)
10896 new_lvs = [lv_data, lv_meta]
10897 old_lvs = [child.Copy() for child in dev.children]
10898 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
10900 # we pass force_create=True to force the LVM creation
10901 for new_lv in new_lvs:
10902 _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
10903 _GetInstanceInfoText(self.instance), False)
10907 def _CheckDevices(self, node_name, iv_names):
10908 for name, (dev, _, _) in iv_names.iteritems():
10909 self.cfg.SetDiskID(dev, node_name)
10911 result = _BlockdevFind(self, node_name, dev, self.instance)
10913 msg = result.fail_msg
10914 if msg or not result.payload:
10916 msg = "disk not found"
10917 raise errors.OpExecError("Can't find DRBD device %s: %s" %
10920 if result.payload.is_degraded:
10921 raise errors.OpExecError("DRBD device %s is degraded!" % name)
10923 def _RemoveOldStorage(self, node_name, iv_names):
10924 for name, (_, old_lvs, _) in iv_names.iteritems():
10925 self.lu.LogInfo("Remove logical volumes for %s" % name)
10928 self.cfg.SetDiskID(lv, node_name)
10930 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
10932 self.lu.LogWarning("Can't remove old LV: %s" % msg,
10933 hint="remove unused LVs manually")
10935 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
10936 """Replace a disk on the primary or secondary for DRBD 8.
10938 The algorithm for replace is quite complicated:
10940 1. for each disk to be replaced:
10942 1. create new LVs on the target node with unique names
10943 1. detach old LVs from the drbd device
10944 1. rename old LVs to name_replaced.<time_t>
10945 1. rename new LVs to old LVs
10946 1. attach the new LVs (with the old names now) to the drbd device
10948 1. wait for sync across all devices
10950 1. for each modified disk:
10952 1. remove old LVs (which have the name name_replaces.<time_t>)
10954 Failures are not very well handled.
10959 # Step: check device activation
10960 self.lu.LogStep(1, steps_total, "Check device existence")
10961 self._CheckDisksExistence([self.other_node, self.target_node])
10962 self._CheckVolumeGroup([self.target_node, self.other_node])
10964 # Step: check other node consistency
10965 self.lu.LogStep(2, steps_total, "Check peer consistency")
10966 self._CheckDisksConsistency(self.other_node,
10967 self.other_node == self.instance.primary_node,
10970 # Step: create new storage
10971 self.lu.LogStep(3, steps_total, "Allocate new storage")
10972 iv_names = self._CreateNewStorage(self.target_node)
10974 # Step: for each lv, detach+rename*2+attach
10975 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
10976 for dev, old_lvs, new_lvs in iv_names.itervalues():
10977 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
10979 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
10981 result.Raise("Can't detach drbd from local storage on node"
10982 " %s for device %s" % (self.target_node, dev.iv_name))
10984 #cfg.Update(instance)
10986 # ok, we created the new LVs, so now we know we have the needed
10987 # storage; as such, we proceed on the target node to rename
10988 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
10989 # using the assumption that logical_id == physical_id (which in
10990 # turn is the unique_id on that node)
10992 # FIXME(iustin): use a better name for the replaced LVs
10993 temp_suffix = int(time.time())
10994 ren_fn = lambda d, suff: (d.physical_id[0],
10995 d.physical_id[1] + "_replaced-%s" % suff)
10997 # Build the rename list based on what LVs exist on the node
10998 rename_old_to_new = []
10999 for to_ren in old_lvs:
11000 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
11001 if not result.fail_msg and result.payload:
11003 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
11005 self.lu.LogInfo("Renaming the old LVs on the target node")
11006 result = self.rpc.call_blockdev_rename(self.target_node,
11008 result.Raise("Can't rename old LVs on node %s" % self.target_node)
11010 # Now we rename the new LVs to the old LVs
11011 self.lu.LogInfo("Renaming the new LVs on the target node")
11012 rename_new_to_old = [(new, old.physical_id)
11013 for old, new in zip(old_lvs, new_lvs)]
11014 result = self.rpc.call_blockdev_rename(self.target_node,
11016 result.Raise("Can't rename new LVs on node %s" % self.target_node)
11018 # Intermediate steps of in memory modifications
11019 for old, new in zip(old_lvs, new_lvs):
11020 new.logical_id = old.logical_id
11021 self.cfg.SetDiskID(new, self.target_node)
11023 # We need to modify old_lvs so that removal later removes the
11024 # right LVs, not the newly added ones; note that old_lvs is a
11026 for disk in old_lvs:
11027 disk.logical_id = ren_fn(disk, temp_suffix)
11028 self.cfg.SetDiskID(disk, self.target_node)
11030 # Now that the new lvs have the old name, we can add them to the device
11031 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
11032 result = self.rpc.call_blockdev_addchildren(self.target_node,
11033 (dev, self.instance), new_lvs)
11034 msg = result.fail_msg
11036 for new_lv in new_lvs:
11037 msg2 = self.rpc.call_blockdev_remove(self.target_node,
11040 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
11041 hint=("cleanup manually the unused logical"
11043 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
11045 cstep = itertools.count(5)
11047 if self.early_release:
11048 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11049 self._RemoveOldStorage(self.target_node, iv_names)
11050 # TODO: Check if releasing locks early still makes sense
11051 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11053 # Release all resource locks except those used by the instance
11054 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11055 keep=self.node_secondary_ip.keys())
11057 # Release all node locks while waiting for sync
11058 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11060 # TODO: Can the instance lock be downgraded here? Take the optional disk
11061 # shutdown in the caller into consideration.
11064 # This can fail as the old devices are degraded and _WaitForSync
11065 # does a combined result over all disks, so we don't check its return value
11066 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11067 _WaitForSync(self.lu, self.instance)
11069 # Check all devices manually
11070 self._CheckDevices(self.instance.primary_node, iv_names)
11072 # Step: remove old storage
11073 if not self.early_release:
11074 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11075 self._RemoveOldStorage(self.target_node, iv_names)
11077 def _ExecDrbd8Secondary(self, feedback_fn):
11078 """Replace the secondary node for DRBD 8.
11080 The algorithm for replace is quite complicated:
11081 - for all disks of the instance:
11082 - create new LVs on the new node with same names
11083 - shutdown the drbd device on the old secondary
11084 - disconnect the drbd network on the primary
11085 - create the drbd device on the new secondary
11086 - network attach the drbd on the primary, using an artifice:
11087 the drbd code for Attach() will connect to the network if it
11088 finds a device which is connected to the good local disks but
11089 not network enabled
11090 - wait for sync across all devices
11091 - remove all disks from the old secondary
11093 Failures are not very well handled.
11098 pnode = self.instance.primary_node
11100 # Step: check device activation
11101 self.lu.LogStep(1, steps_total, "Check device existence")
11102 self._CheckDisksExistence([self.instance.primary_node])
11103 self._CheckVolumeGroup([self.instance.primary_node])
11105 # Step: check other node consistency
11106 self.lu.LogStep(2, steps_total, "Check peer consistency")
11107 self._CheckDisksConsistency(self.instance.primary_node, True, True)
11109 # Step: create new storage
11110 self.lu.LogStep(3, steps_total, "Allocate new storage")
11111 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
11112 for idx, dev in enumerate(disks):
11113 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
11114 (self.new_node, idx))
11115 # we pass force_create=True to force LVM creation
11116 for new_lv in dev.children:
11117 _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
11118 True, _GetInstanceInfoText(self.instance), False)
11120 # Step 4: dbrd minors and drbd setups changes
11121 # after this, we must manually remove the drbd minors on both the
11122 # error and the success paths
11123 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11124 minors = self.cfg.AllocateDRBDMinor([self.new_node
11125 for dev in self.instance.disks],
11126 self.instance.name)
11127 logging.debug("Allocated minors %r", minors)
11130 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
11131 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
11132 (self.new_node, idx))
11133 # create new devices on new_node; note that we create two IDs:
11134 # one without port, so the drbd will be activated without
11135 # networking information on the new node at this stage, and one
11136 # with network, for the latter activation in step 4
11137 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
11138 if self.instance.primary_node == o_node1:
11141 assert self.instance.primary_node == o_node2, "Three-node instance?"
11144 new_alone_id = (self.instance.primary_node, self.new_node, None,
11145 p_minor, new_minor, o_secret)
11146 new_net_id = (self.instance.primary_node, self.new_node, o_port,
11147 p_minor, new_minor, o_secret)
11149 iv_names[idx] = (dev, dev.children, new_net_id)
11150 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
11152 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
11153 logical_id=new_alone_id,
11154 children=dev.children,
11157 (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
11160 _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
11162 _GetInstanceInfoText(self.instance), False)
11163 except errors.GenericError:
11164 self.cfg.ReleaseDRBDMinors(self.instance.name)
11167 # We have new devices, shutdown the drbd on the old secondary
11168 for idx, dev in enumerate(self.instance.disks):
11169 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
11170 self.cfg.SetDiskID(dev, self.target_node)
11171 msg = self.rpc.call_blockdev_shutdown(self.target_node,
11172 (dev, self.instance)).fail_msg
11174 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
11175 "node: %s" % (idx, msg),
11176 hint=("Please cleanup this device manually as"
11177 " soon as possible"))
11179 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
11180 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
11181 self.instance.disks)[pnode]
11183 msg = result.fail_msg
11185 # detaches didn't succeed (unlikely)
11186 self.cfg.ReleaseDRBDMinors(self.instance.name)
11187 raise errors.OpExecError("Can't detach the disks from the network on"
11188 " old node: %s" % (msg,))
11190 # if we managed to detach at least one, we update all the disks of
11191 # the instance to point to the new secondary
11192 self.lu.LogInfo("Updating instance configuration")
11193 for dev, _, new_logical_id in iv_names.itervalues():
11194 dev.logical_id = new_logical_id
11195 self.cfg.SetDiskID(dev, self.instance.primary_node)
11197 self.cfg.Update(self.instance, feedback_fn)
11199 # Release all node locks (the configuration has been updated)
11200 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11202 # and now perform the drbd attach
11203 self.lu.LogInfo("Attaching primary drbds to new secondary"
11204 " (standalone => connected)")
11205 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11207 self.node_secondary_ip,
11208 (self.instance.disks, self.instance),
11209 self.instance.name,
11211 for to_node, to_result in result.items():
11212 msg = to_result.fail_msg
11214 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
11216 hint=("please do a gnt-instance info to see the"
11217 " status of disks"))
11219 cstep = itertools.count(5)
11221 if self.early_release:
11222 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11223 self._RemoveOldStorage(self.target_node, iv_names)
11224 # TODO: Check if releasing locks early still makes sense
11225 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11227 # Release all resource locks except those used by the instance
11228 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11229 keep=self.node_secondary_ip.keys())
11231 # TODO: Can the instance lock be downgraded here? Take the optional disk
11232 # shutdown in the caller into consideration.
11235 # This can fail as the old devices are degraded and _WaitForSync
11236 # does a combined result over all disks, so we don't check its return value
11237 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11238 _WaitForSync(self.lu, self.instance)
11240 # Check all devices manually
11241 self._CheckDevices(self.instance.primary_node, iv_names)
11243 # Step: remove old storage
11244 if not self.early_release:
11245 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11246 self._RemoveOldStorage(self.target_node, iv_names)
11249 class LURepairNodeStorage(NoHooksLU):
11250 """Repairs the volume group on a node.
11255 def CheckArguments(self):
11256 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11258 storage_type = self.op.storage_type
11260 if (constants.SO_FIX_CONSISTENCY not in
11261 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
11262 raise errors.OpPrereqError("Storage units of type '%s' can not be"
11263 " repaired" % storage_type,
11264 errors.ECODE_INVAL)
11266 def ExpandNames(self):
11267 self.needed_locks = {
11268 locking.LEVEL_NODE: [self.op.node_name],
11271 def _CheckFaultyDisks(self, instance, node_name):
11272 """Ensure faulty disks abort the opcode or at least warn."""
11274 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
11276 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
11277 " node '%s'" % (instance.name, node_name),
11278 errors.ECODE_STATE)
11279 except errors.OpPrereqError, err:
11280 if self.op.ignore_consistency:
11281 self.proc.LogWarning(str(err.args[0]))
11285 def CheckPrereq(self):
11286 """Check prerequisites.
11289 # Check whether any instance on this node has faulty disks
11290 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
11291 if inst.admin_state != constants.ADMINST_UP:
11293 check_nodes = set(inst.all_nodes)
11294 check_nodes.discard(self.op.node_name)
11295 for inst_node_name in check_nodes:
11296 self._CheckFaultyDisks(inst, inst_node_name)
11298 def Exec(self, feedback_fn):
11299 feedback_fn("Repairing storage unit '%s' on %s ..." %
11300 (self.op.name, self.op.node_name))
11302 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
11303 result = self.rpc.call_storage_execute(self.op.node_name,
11304 self.op.storage_type, st_args,
11306 constants.SO_FIX_CONSISTENCY)
11307 result.Raise("Failed to repair storage unit '%s' on %s" %
11308 (self.op.name, self.op.node_name))
11311 class LUNodeEvacuate(NoHooksLU):
11312 """Evacuates instances off a list of nodes.
11317 _MODE2IALLOCATOR = {
11318 constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
11319 constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
11320 constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
11322 assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
11323 assert (frozenset(_MODE2IALLOCATOR.values()) ==
11324 constants.IALLOCATOR_NEVAC_MODES)
11326 def CheckArguments(self):
11327 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
11329 def ExpandNames(self):
11330 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11332 if self.op.remote_node is not None:
11333 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
11334 assert self.op.remote_node
11336 if self.op.remote_node == self.op.node_name:
11337 raise errors.OpPrereqError("Can not use evacuated node as a new"
11338 " secondary node", errors.ECODE_INVAL)
11340 if self.op.mode != constants.NODE_EVAC_SEC:
11341 raise errors.OpPrereqError("Without the use of an iallocator only"
11342 " secondary instances can be evacuated",
11343 errors.ECODE_INVAL)
11346 self.share_locks = _ShareAll()
11347 self.needed_locks = {
11348 locking.LEVEL_INSTANCE: [],
11349 locking.LEVEL_NODEGROUP: [],
11350 locking.LEVEL_NODE: [],
11353 # Determine nodes (via group) optimistically, needs verification once locks
11354 # have been acquired
11355 self.lock_nodes = self._DetermineNodes()
11357 def _DetermineNodes(self):
11358 """Gets the list of nodes to operate on.
11361 if self.op.remote_node is None:
11362 # Iallocator will choose any node(s) in the same group
11363 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
11365 group_nodes = frozenset([self.op.remote_node])
11367 # Determine nodes to be locked
11368 return set([self.op.node_name]) | group_nodes
11370 def _DetermineInstances(self):
11371 """Builds list of instances to operate on.
11374 assert self.op.mode in constants.NODE_EVAC_MODES
11376 if self.op.mode == constants.NODE_EVAC_PRI:
11377 # Primary instances only
11378 inst_fn = _GetNodePrimaryInstances
11379 assert self.op.remote_node is None, \
11380 "Evacuating primary instances requires iallocator"
11381 elif self.op.mode == constants.NODE_EVAC_SEC:
11382 # Secondary instances only
11383 inst_fn = _GetNodeSecondaryInstances
11386 assert self.op.mode == constants.NODE_EVAC_ALL
11387 inst_fn = _GetNodeInstances
11388 # TODO: In 2.6, change the iallocator interface to take an evacuation mode
11390 raise errors.OpPrereqError("Due to an issue with the iallocator"
11391 " interface it is not possible to evacuate"
11392 " all instances at once; specify explicitly"
11393 " whether to evacuate primary or secondary"
11395 errors.ECODE_INVAL)
11397 return inst_fn(self.cfg, self.op.node_name)
11399 def DeclareLocks(self, level):
11400 if level == locking.LEVEL_INSTANCE:
11401 # Lock instances optimistically, needs verification once node and group
11402 # locks have been acquired
11403 self.needed_locks[locking.LEVEL_INSTANCE] = \
11404 set(i.name for i in self._DetermineInstances())
11406 elif level == locking.LEVEL_NODEGROUP:
11407 # Lock node groups for all potential target nodes optimistically, needs
11408 # verification once nodes have been acquired
11409 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11410 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
11412 elif level == locking.LEVEL_NODE:
11413 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
11415 def CheckPrereq(self):
11417 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11418 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
11419 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
11421 need_nodes = self._DetermineNodes()
11423 if not owned_nodes.issuperset(need_nodes):
11424 raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
11425 " locks were acquired, current nodes are"
11426 " are '%s', used to be '%s'; retry the"
11428 (self.op.node_name,
11429 utils.CommaJoin(need_nodes),
11430 utils.CommaJoin(owned_nodes)),
11431 errors.ECODE_STATE)
11433 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
11434 if owned_groups != wanted_groups:
11435 raise errors.OpExecError("Node groups changed since locks were acquired,"
11436 " current groups are '%s', used to be '%s';"
11437 " retry the operation" %
11438 (utils.CommaJoin(wanted_groups),
11439 utils.CommaJoin(owned_groups)))
11441 # Determine affected instances
11442 self.instances = self._DetermineInstances()
11443 self.instance_names = [i.name for i in self.instances]
11445 if set(self.instance_names) != owned_instances:
11446 raise errors.OpExecError("Instances on node '%s' changed since locks"
11447 " were acquired, current instances are '%s',"
11448 " used to be '%s'; retry the operation" %
11449 (self.op.node_name,
11450 utils.CommaJoin(self.instance_names),
11451 utils.CommaJoin(owned_instances)))
11453 if self.instance_names:
11454 self.LogInfo("Evacuating instances from node '%s': %s",
11456 utils.CommaJoin(utils.NiceSort(self.instance_names)))
11458 self.LogInfo("No instances to evacuate from node '%s'",
11461 if self.op.remote_node is not None:
11462 for i in self.instances:
11463 if i.primary_node == self.op.remote_node:
11464 raise errors.OpPrereqError("Node %s is the primary node of"
11465 " instance %s, cannot use it as"
11467 (self.op.remote_node, i.name),
11468 errors.ECODE_INVAL)
11470 def Exec(self, feedback_fn):
11471 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
11473 if not self.instance_names:
11474 # No instances to evacuate
11477 elif self.op.iallocator is not None:
11478 # TODO: Implement relocation to other group
11479 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
11480 evac_mode=self._MODE2IALLOCATOR[self.op.mode],
11481 instances=list(self.instance_names))
11483 ial.Run(self.op.iallocator)
11485 if not ial.success:
11486 raise errors.OpPrereqError("Can't compute node evacuation using"
11487 " iallocator '%s': %s" %
11488 (self.op.iallocator, ial.info),
11489 errors.ECODE_NORES)
11491 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
11493 elif self.op.remote_node is not None:
11494 assert self.op.mode == constants.NODE_EVAC_SEC
11496 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
11497 remote_node=self.op.remote_node,
11499 mode=constants.REPLACE_DISK_CHG,
11500 early_release=self.op.early_release)]
11501 for instance_name in self.instance_names
11505 raise errors.ProgrammerError("No iallocator or remote node")
11507 return ResultWithJobs(jobs)
11510 def _SetOpEarlyRelease(early_release, op):
11511 """Sets C{early_release} flag on opcodes if available.
11515 op.early_release = early_release
11516 except AttributeError:
11517 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
11522 def _NodeEvacDest(use_nodes, group, nodes):
11523 """Returns group or nodes depending on caller's choice.
11527 return utils.CommaJoin(nodes)
11532 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
11533 """Unpacks the result of change-group and node-evacuate iallocator requests.
11535 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
11536 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
11538 @type lu: L{LogicalUnit}
11539 @param lu: Logical unit instance
11540 @type alloc_result: tuple/list
11541 @param alloc_result: Result from iallocator
11542 @type early_release: bool
11543 @param early_release: Whether to release locks early if possible
11544 @type use_nodes: bool
11545 @param use_nodes: Whether to display node names instead of groups
11548 (moved, failed, jobs) = alloc_result
11551 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
11552 for (name, reason) in failed)
11553 lu.LogWarning("Unable to evacuate instances %s", failreason)
11554 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
11557 lu.LogInfo("Instances to be moved: %s",
11558 utils.CommaJoin("%s (to %s)" %
11559 (name, _NodeEvacDest(use_nodes, group, nodes))
11560 for (name, group, nodes) in moved))
11562 return [map(compat.partial(_SetOpEarlyRelease, early_release),
11563 map(opcodes.OpCode.LoadOpCode, ops))
11567 class LUInstanceGrowDisk(LogicalUnit):
11568 """Grow a disk of an instance.
11571 HPATH = "disk-grow"
11572 HTYPE = constants.HTYPE_INSTANCE
11575 def ExpandNames(self):
11576 self._ExpandAndLockInstance()
11577 self.needed_locks[locking.LEVEL_NODE] = []
11578 self.needed_locks[locking.LEVEL_NODE_RES] = []
11579 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11580 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
11582 def DeclareLocks(self, level):
11583 if level == locking.LEVEL_NODE:
11584 self._LockInstancesNodes()
11585 elif level == locking.LEVEL_NODE_RES:
11587 self.needed_locks[locking.LEVEL_NODE_RES] = \
11588 self.needed_locks[locking.LEVEL_NODE][:]
11590 def BuildHooksEnv(self):
11591 """Build hooks env.
11593 This runs on the master, the primary and all the secondaries.
11597 "DISK": self.op.disk,
11598 "AMOUNT": self.op.amount,
11599 "ABSOLUTE": self.op.absolute,
11601 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11604 def BuildHooksNodes(self):
11605 """Build hooks nodes.
11608 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
11611 def CheckPrereq(self):
11612 """Check prerequisites.
11614 This checks that the instance is in the cluster.
11617 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11618 assert instance is not None, \
11619 "Cannot retrieve locked instance %s" % self.op.instance_name
11620 nodenames = list(instance.all_nodes)
11621 for node in nodenames:
11622 _CheckNodeOnline(self, node)
11624 self.instance = instance
11626 if instance.disk_template not in constants.DTS_GROWABLE:
11627 raise errors.OpPrereqError("Instance's disk layout does not support"
11628 " growing", errors.ECODE_INVAL)
11630 self.disk = instance.FindDisk(self.op.disk)
11632 if self.op.absolute:
11633 self.target = self.op.amount
11634 self.delta = self.target - self.disk.size
11636 raise errors.OpPrereqError("Requested size (%s) is smaller than "
11637 "current disk size (%s)" %
11638 (utils.FormatUnit(self.target, "h"),
11639 utils.FormatUnit(self.disk.size, "h")),
11640 errors.ECODE_STATE)
11642 self.delta = self.op.amount
11643 self.target = self.disk.size + self.delta
11645 raise errors.OpPrereqError("Requested increment (%s) is negative" %
11646 utils.FormatUnit(self.delta, "h"),
11647 errors.ECODE_INVAL)
11649 if instance.disk_template not in (constants.DT_FILE,
11650 constants.DT_SHARED_FILE,
11652 # TODO: check the free disk space for file, when that feature will be
11654 _CheckNodesFreeDiskPerVG(self, nodenames,
11655 self.disk.ComputeGrowth(self.delta))
11657 def Exec(self, feedback_fn):
11658 """Execute disk grow.
11661 instance = self.instance
11664 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11665 assert (self.owned_locks(locking.LEVEL_NODE) ==
11666 self.owned_locks(locking.LEVEL_NODE_RES))
11668 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
11670 raise errors.OpExecError("Cannot activate block device to grow")
11672 feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
11673 (self.op.disk, instance.name,
11674 utils.FormatUnit(self.delta, "h"),
11675 utils.FormatUnit(self.target, "h")))
11677 # First run all grow ops in dry-run mode
11678 for node in instance.all_nodes:
11679 self.cfg.SetDiskID(disk, node)
11680 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11682 result.Raise("Grow request failed to node %s" % node)
11684 # We know that (as far as we can test) operations across different
11685 # nodes will succeed, time to run it for real
11686 for node in instance.all_nodes:
11687 self.cfg.SetDiskID(disk, node)
11688 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11690 result.Raise("Grow request failed to node %s" % node)
11692 # TODO: Rewrite code to work properly
11693 # DRBD goes into sync mode for a short amount of time after executing the
11694 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
11695 # calling "resize" in sync mode fails. Sleeping for a short amount of
11696 # time is a work-around.
11699 disk.RecordGrow(self.delta)
11700 self.cfg.Update(instance, feedback_fn)
11702 # Changes have been recorded, release node lock
11703 _ReleaseLocks(self, locking.LEVEL_NODE)
11705 # Downgrade lock while waiting for sync
11706 self.glm.downgrade(locking.LEVEL_INSTANCE)
11708 if self.op.wait_for_sync:
11709 disk_abort = not _WaitForSync(self, instance, disks=[disk])
11711 self.proc.LogWarning("Disk sync-ing has not returned a good"
11712 " status; please check the instance")
11713 if instance.admin_state != constants.ADMINST_UP:
11714 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
11715 elif instance.admin_state != constants.ADMINST_UP:
11716 self.proc.LogWarning("Not shutting down the disk even if the instance is"
11717 " not supposed to be running because no wait for"
11718 " sync mode was requested")
11720 assert self.owned_locks(locking.LEVEL_NODE_RES)
11721 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11724 class LUInstanceQueryData(NoHooksLU):
11725 """Query runtime instance data.
11730 def ExpandNames(self):
11731 self.needed_locks = {}
11733 # Use locking if requested or when non-static information is wanted
11734 if not (self.op.static or self.op.use_locking):
11735 self.LogWarning("Non-static data requested, locks need to be acquired")
11736 self.op.use_locking = True
11738 if self.op.instances or not self.op.use_locking:
11739 # Expand instance names right here
11740 self.wanted_names = _GetWantedInstances(self, self.op.instances)
11742 # Will use acquired locks
11743 self.wanted_names = None
11745 if self.op.use_locking:
11746 self.share_locks = _ShareAll()
11748 if self.wanted_names is None:
11749 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
11751 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
11753 self.needed_locks[locking.LEVEL_NODEGROUP] = []
11754 self.needed_locks[locking.LEVEL_NODE] = []
11755 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11757 def DeclareLocks(self, level):
11758 if self.op.use_locking:
11759 if level == locking.LEVEL_NODEGROUP:
11760 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11762 # Lock all groups used by instances optimistically; this requires going
11763 # via the node before it's locked, requiring verification later on
11764 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11765 frozenset(group_uuid
11766 for instance_name in owned_instances
11768 self.cfg.GetInstanceNodeGroups(instance_name))
11770 elif level == locking.LEVEL_NODE:
11771 self._LockInstancesNodes()
11773 def CheckPrereq(self):
11774 """Check prerequisites.
11776 This only checks the optional instance list against the existing names.
11779 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11780 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11781 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11783 if self.wanted_names is None:
11784 assert self.op.use_locking, "Locking was not used"
11785 self.wanted_names = owned_instances
11787 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
11789 if self.op.use_locking:
11790 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
11793 assert not (owned_instances or owned_groups or owned_nodes)
11795 self.wanted_instances = instances.values()
11797 def _ComputeBlockdevStatus(self, node, instance, dev):
11798 """Returns the status of a block device
11801 if self.op.static or not node:
11804 self.cfg.SetDiskID(dev, node)
11806 result = self.rpc.call_blockdev_find(node, dev)
11810 result.Raise("Can't compute disk status for %s" % instance.name)
11812 status = result.payload
11816 return (status.dev_path, status.major, status.minor,
11817 status.sync_percent, status.estimated_time,
11818 status.is_degraded, status.ldisk_status)
11820 def _ComputeDiskStatus(self, instance, snode, dev):
11821 """Compute block device status.
11824 (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
11826 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
11828 def _ComputeDiskStatusInner(self, instance, snode, dev):
11829 """Compute block device status.
11831 @attention: The device has to be annotated already.
11834 if dev.dev_type in constants.LDS_DRBD:
11835 # we change the snode then (otherwise we use the one passed in)
11836 if dev.logical_id[0] == instance.primary_node:
11837 snode = dev.logical_id[1]
11839 snode = dev.logical_id[0]
11841 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
11843 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
11846 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
11853 "iv_name": dev.iv_name,
11854 "dev_type": dev.dev_type,
11855 "logical_id": dev.logical_id,
11856 "physical_id": dev.physical_id,
11857 "pstatus": dev_pstatus,
11858 "sstatus": dev_sstatus,
11859 "children": dev_children,
11864 def Exec(self, feedback_fn):
11865 """Gather and return data"""
11868 cluster = self.cfg.GetClusterInfo()
11870 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
11871 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
11873 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
11874 for node in nodes.values()))
11876 group2name_fn = lambda uuid: groups[uuid].name
11878 for instance in self.wanted_instances:
11879 pnode = nodes[instance.primary_node]
11881 if self.op.static or pnode.offline:
11882 remote_state = None
11884 self.LogWarning("Primary node %s is marked offline, returning static"
11885 " information only for instance %s" %
11886 (pnode.name, instance.name))
11888 remote_info = self.rpc.call_instance_info(instance.primary_node,
11890 instance.hypervisor)
11891 remote_info.Raise("Error checking node %s" % instance.primary_node)
11892 remote_info = remote_info.payload
11893 if remote_info and "state" in remote_info:
11894 remote_state = "up"
11896 if instance.admin_state == constants.ADMINST_UP:
11897 remote_state = "down"
11899 remote_state = instance.admin_state
11901 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
11904 snodes_group_uuids = [nodes[snode_name].group
11905 for snode_name in instance.secondary_nodes]
11907 result[instance.name] = {
11908 "name": instance.name,
11909 "config_state": instance.admin_state,
11910 "run_state": remote_state,
11911 "pnode": instance.primary_node,
11912 "pnode_group_uuid": pnode.group,
11913 "pnode_group_name": group2name_fn(pnode.group),
11914 "snodes": instance.secondary_nodes,
11915 "snodes_group_uuids": snodes_group_uuids,
11916 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
11918 # this happens to be the same format used for hooks
11919 "nics": _NICListToTuple(self, instance.nics),
11920 "disk_template": instance.disk_template,
11922 "hypervisor": instance.hypervisor,
11923 "network_port": instance.network_port,
11924 "hv_instance": instance.hvparams,
11925 "hv_actual": cluster.FillHV(instance, skip_globals=True),
11926 "be_instance": instance.beparams,
11927 "be_actual": cluster.FillBE(instance),
11928 "os_instance": instance.osparams,
11929 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
11930 "serial_no": instance.serial_no,
11931 "mtime": instance.mtime,
11932 "ctime": instance.ctime,
11933 "uuid": instance.uuid,
11939 def PrepareContainerMods(mods, private_fn):
11940 """Prepares a list of container modifications by adding a private data field.
11942 @type mods: list of tuples; (operation, index, parameters)
11943 @param mods: List of modifications
11944 @type private_fn: callable or None
11945 @param private_fn: Callable for constructing a private data field for a
11950 if private_fn is None:
11955 return [(op, idx, params, fn()) for (op, idx, params) in mods]
11958 #: Type description for changes as returned by L{ApplyContainerMods}'s
11960 _TApplyContModsCbChanges = \
11961 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
11962 ht.TNonEmptyString,
11967 def ApplyContainerMods(kind, container, chgdesc, mods,
11968 create_fn, modify_fn, remove_fn):
11969 """Applies descriptions in C{mods} to C{container}.
11972 @param kind: One-word item description
11973 @type container: list
11974 @param container: Container to modify
11975 @type chgdesc: None or list
11976 @param chgdesc: List of applied changes
11978 @param mods: Modifications as returned by L{PrepareContainerMods}
11979 @type create_fn: callable
11980 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
11981 receives absolute item index, parameters and private data object as added
11982 by L{PrepareContainerMods}, returns tuple containing new item and changes
11984 @type modify_fn: callable
11985 @param modify_fn: Callback for modifying an existing item
11986 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
11987 and private data object as added by L{PrepareContainerMods}, returns
11989 @type remove_fn: callable
11990 @param remove_fn: Callback on removing item; receives absolute item index,
11991 item and private data object as added by L{PrepareContainerMods}
11994 for (op, idx, params, private) in mods:
11997 absidx = len(container) - 1
11999 raise IndexError("Not accepting negative indices other than -1")
12000 elif idx > len(container):
12001 raise IndexError("Got %s index %s, but there are only %s" %
12002 (kind, idx, len(container)))
12008 if op == constants.DDM_ADD:
12009 # Calculate where item will be added
12011 addidx = len(container)
12015 if create_fn is None:
12018 (item, changes) = create_fn(addidx, params, private)
12021 container.append(item)
12024 assert idx <= len(container)
12025 # list.insert does so before the specified index
12026 container.insert(idx, item)
12028 # Retrieve existing item
12030 item = container[absidx]
12032 raise IndexError("Invalid %s index %s" % (kind, idx))
12034 if op == constants.DDM_REMOVE:
12037 if remove_fn is not None:
12038 remove_fn(absidx, item, private)
12040 changes = [("%s/%s" % (kind, absidx), "remove")]
12042 assert container[absidx] == item
12043 del container[absidx]
12044 elif op == constants.DDM_MODIFY:
12045 if modify_fn is not None:
12046 changes = modify_fn(absidx, item, params, private)
12048 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12050 assert _TApplyContModsCbChanges(changes)
12052 if not (chgdesc is None or changes is None):
12053 chgdesc.extend(changes)
12056 def _UpdateIvNames(base_index, disks):
12057 """Updates the C{iv_name} attribute of disks.
12059 @type disks: list of L{objects.Disk}
12062 for (idx, disk) in enumerate(disks):
12063 disk.iv_name = "disk/%s" % (base_index + idx, )
12066 class _InstNicModPrivate:
12067 """Data structure for network interface modifications.
12069 Used by L{LUInstanceSetParams}.
12072 def __init__(self):
12077 class LUInstanceSetParams(LogicalUnit):
12078 """Modifies an instances's parameters.
12081 HPATH = "instance-modify"
12082 HTYPE = constants.HTYPE_INSTANCE
12086 def _UpgradeDiskNicMods(kind, mods, verify_fn):
12087 assert ht.TList(mods)
12088 assert not mods or len(mods[0]) in (2, 3)
12090 if mods and len(mods[0]) == 2:
12094 for op, params in mods:
12095 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
12096 result.append((op, -1, params))
12100 raise errors.OpPrereqError("Only one %s add or remove operation is"
12101 " supported at a time" % kind,
12102 errors.ECODE_INVAL)
12104 result.append((constants.DDM_MODIFY, op, params))
12106 assert verify_fn(result)
12113 def _CheckMods(kind, mods, key_types, item_fn):
12114 """Ensures requested disk/NIC modifications are valid.
12117 for (op, _, params) in mods:
12118 assert ht.TDict(params)
12120 utils.ForceDictType(params, key_types)
12122 if op == constants.DDM_REMOVE:
12124 raise errors.OpPrereqError("No settings should be passed when"
12125 " removing a %s" % kind,
12126 errors.ECODE_INVAL)
12127 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
12128 item_fn(op, params)
12130 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12133 def _VerifyDiskModification(op, params):
12134 """Verifies a disk modification.
12137 if op == constants.DDM_ADD:
12138 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
12139 if mode not in constants.DISK_ACCESS_SET:
12140 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
12141 errors.ECODE_INVAL)
12143 size = params.get(constants.IDISK_SIZE, None)
12145 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
12146 constants.IDISK_SIZE, errors.ECODE_INVAL)
12150 except (TypeError, ValueError), err:
12151 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
12152 errors.ECODE_INVAL)
12154 params[constants.IDISK_SIZE] = size
12156 elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
12157 raise errors.OpPrereqError("Disk size change not possible, use"
12158 " grow-disk", errors.ECODE_INVAL)
12161 def _VerifyNicModification(op, params):
12162 """Verifies a network interface modification.
12165 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
12166 ip = params.get(constants.INIC_IP, None)
12169 elif ip.lower() == constants.VALUE_NONE:
12170 params[constants.INIC_IP] = None
12171 elif not netutils.IPAddress.IsValid(ip):
12172 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
12173 errors.ECODE_INVAL)
12175 bridge = params.get("bridge", None)
12176 link = params.get(constants.INIC_LINK, None)
12177 if bridge and link:
12178 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
12179 " at the same time", errors.ECODE_INVAL)
12180 elif bridge and bridge.lower() == constants.VALUE_NONE:
12181 params["bridge"] = None
12182 elif link and link.lower() == constants.VALUE_NONE:
12183 params[constants.INIC_LINK] = None
12185 if op == constants.DDM_ADD:
12186 macaddr = params.get(constants.INIC_MAC, None)
12187 if macaddr is None:
12188 params[constants.INIC_MAC] = constants.VALUE_AUTO
12190 if constants.INIC_MAC in params:
12191 macaddr = params[constants.INIC_MAC]
12192 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12193 macaddr = utils.NormalizeAndValidateMac(macaddr)
12195 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
12196 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
12197 " modifying an existing NIC",
12198 errors.ECODE_INVAL)
12200 def CheckArguments(self):
12201 if not (self.op.nics or self.op.disks or self.op.disk_template or
12202 self.op.hvparams or self.op.beparams or self.op.os_name or
12203 self.op.offline is not None or self.op.runtime_mem):
12204 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
12206 if self.op.hvparams:
12207 _CheckGlobalHvParams(self.op.hvparams)
12210 self._UpgradeDiskNicMods("disk", self.op.disks,
12211 opcodes.OpInstanceSetParams.TestDiskModifications)
12213 self._UpgradeDiskNicMods("NIC", self.op.nics,
12214 opcodes.OpInstanceSetParams.TestNicModifications)
12216 # Check disk modifications
12217 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
12218 self._VerifyDiskModification)
12220 if self.op.disks and self.op.disk_template is not None:
12221 raise errors.OpPrereqError("Disk template conversion and other disk"
12222 " changes not supported at the same time",
12223 errors.ECODE_INVAL)
12225 if (self.op.disk_template and
12226 self.op.disk_template in constants.DTS_INT_MIRROR and
12227 self.op.remote_node is None):
12228 raise errors.OpPrereqError("Changing the disk template to a mirrored"
12229 " one requires specifying a secondary node",
12230 errors.ECODE_INVAL)
12232 # Check NIC modifications
12233 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
12234 self._VerifyNicModification)
12236 def ExpandNames(self):
12237 self._ExpandAndLockInstance()
12238 # Can't even acquire node locks in shared mode as upcoming changes in
12239 # Ganeti 2.6 will start to modify the node object on disk conversion
12240 self.needed_locks[locking.LEVEL_NODE] = []
12241 self.needed_locks[locking.LEVEL_NODE_RES] = []
12242 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12244 def DeclareLocks(self, level):
12245 # TODO: Acquire group lock in shared mode (disk parameters)
12246 if level == locking.LEVEL_NODE:
12247 self._LockInstancesNodes()
12248 if self.op.disk_template and self.op.remote_node:
12249 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
12250 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
12251 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
12253 self.needed_locks[locking.LEVEL_NODE_RES] = \
12254 self.needed_locks[locking.LEVEL_NODE][:]
12256 def BuildHooksEnv(self):
12257 """Build hooks env.
12259 This runs on the master, primary and secondaries.
12263 if constants.BE_MINMEM in self.be_new:
12264 args["minmem"] = self.be_new[constants.BE_MINMEM]
12265 if constants.BE_MAXMEM in self.be_new:
12266 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
12267 if constants.BE_VCPUS in self.be_new:
12268 args["vcpus"] = self.be_new[constants.BE_VCPUS]
12269 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
12270 # information at all.
12272 if self._new_nics is not None:
12275 for nic in self._new_nics:
12276 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
12277 mode = nicparams[constants.NIC_MODE]
12278 link = nicparams[constants.NIC_LINK]
12279 nics.append((nic.ip, nic.mac, mode, link))
12281 args["nics"] = nics
12283 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
12284 if self.op.disk_template:
12285 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
12286 if self.op.runtime_mem:
12287 env["RUNTIME_MEMORY"] = self.op.runtime_mem
12291 def BuildHooksNodes(self):
12292 """Build hooks nodes.
12295 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
12298 def _PrepareNicModification(self, params, private, old_ip, old_params,
12300 update_params_dict = dict([(key, params[key])
12301 for key in constants.NICS_PARAMETERS
12304 if "bridge" in params:
12305 update_params_dict[constants.NIC_LINK] = params["bridge"]
12307 new_params = _GetUpdatedParams(old_params, update_params_dict)
12308 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
12310 new_filled_params = cluster.SimpleFillNIC(new_params)
12311 objects.NIC.CheckParameterSyntax(new_filled_params)
12313 new_mode = new_filled_params[constants.NIC_MODE]
12314 if new_mode == constants.NIC_MODE_BRIDGED:
12315 bridge = new_filled_params[constants.NIC_LINK]
12316 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
12318 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
12320 self.warn.append(msg)
12322 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
12324 elif new_mode == constants.NIC_MODE_ROUTED:
12325 ip = params.get(constants.INIC_IP, old_ip)
12327 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
12328 " on a routed NIC", errors.ECODE_INVAL)
12330 if constants.INIC_MAC in params:
12331 mac = params[constants.INIC_MAC]
12333 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
12334 errors.ECODE_INVAL)
12335 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12336 # otherwise generate the MAC address
12337 params[constants.INIC_MAC] = \
12338 self.cfg.GenerateMAC(self.proc.GetECId())
12340 # or validate/reserve the current one
12342 self.cfg.ReserveMAC(mac, self.proc.GetECId())
12343 except errors.ReservationError:
12344 raise errors.OpPrereqError("MAC address '%s' already in use"
12345 " in cluster" % mac,
12346 errors.ECODE_NOTUNIQUE)
12348 private.params = new_params
12349 private.filled = new_filled_params
12351 def CheckPrereq(self):
12352 """Check prerequisites.
12354 This only checks the instance list against the existing names.
12357 # checking the new params on the primary/secondary nodes
12359 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12360 cluster = self.cluster = self.cfg.GetClusterInfo()
12361 assert self.instance is not None, \
12362 "Cannot retrieve locked instance %s" % self.op.instance_name
12363 pnode = instance.primary_node
12364 nodelist = list(instance.all_nodes)
12365 pnode_info = self.cfg.GetNodeInfo(pnode)
12366 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
12368 # Prepare disk/NIC modifications
12369 self.diskmod = PrepareContainerMods(self.op.disks, None)
12370 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
12373 if self.op.os_name and not self.op.force:
12374 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
12375 self.op.force_variant)
12376 instance_os = self.op.os_name
12378 instance_os = instance.os
12380 assert not (self.op.disk_template and self.op.disks), \
12381 "Can't modify disk template and apply disk changes at the same time"
12383 if self.op.disk_template:
12384 if instance.disk_template == self.op.disk_template:
12385 raise errors.OpPrereqError("Instance already has disk template %s" %
12386 instance.disk_template, errors.ECODE_INVAL)
12388 if (instance.disk_template,
12389 self.op.disk_template) not in self._DISK_CONVERSIONS:
12390 raise errors.OpPrereqError("Unsupported disk template conversion from"
12391 " %s to %s" % (instance.disk_template,
12392 self.op.disk_template),
12393 errors.ECODE_INVAL)
12394 _CheckInstanceState(self, instance, INSTANCE_DOWN,
12395 msg="cannot change disk template")
12396 if self.op.disk_template in constants.DTS_INT_MIRROR:
12397 if self.op.remote_node == pnode:
12398 raise errors.OpPrereqError("Given new secondary node %s is the same"
12399 " as the primary node of the instance" %
12400 self.op.remote_node, errors.ECODE_STATE)
12401 _CheckNodeOnline(self, self.op.remote_node)
12402 _CheckNodeNotDrained(self, self.op.remote_node)
12403 # FIXME: here we assume that the old instance type is DT_PLAIN
12404 assert instance.disk_template == constants.DT_PLAIN
12405 disks = [{constants.IDISK_SIZE: d.size,
12406 constants.IDISK_VG: d.logical_id[0]}
12407 for d in instance.disks]
12408 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
12409 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
12411 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
12412 snode_group = self.cfg.GetNodeGroup(snode_info.group)
12413 ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
12414 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
12415 ignore=self.op.ignore_ipolicy)
12416 if pnode_info.group != snode_info.group:
12417 self.LogWarning("The primary and secondary nodes are in two"
12418 " different node groups; the disk parameters"
12419 " from the first disk's node group will be"
12422 # hvparams processing
12423 if self.op.hvparams:
12424 hv_type = instance.hypervisor
12425 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
12426 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
12427 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
12430 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
12431 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
12432 self.hv_proposed = self.hv_new = hv_new # the new actual values
12433 self.hv_inst = i_hvdict # the new dict (without defaults)
12435 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
12437 self.hv_new = self.hv_inst = {}
12439 # beparams processing
12440 if self.op.beparams:
12441 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
12443 objects.UpgradeBeParams(i_bedict)
12444 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
12445 be_new = cluster.SimpleFillBE(i_bedict)
12446 self.be_proposed = self.be_new = be_new # the new actual values
12447 self.be_inst = i_bedict # the new dict (without defaults)
12449 self.be_new = self.be_inst = {}
12450 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
12451 be_old = cluster.FillBE(instance)
12453 # CPU param validation -- checking every time a parameter is
12454 # changed to cover all cases where either CPU mask or vcpus have
12456 if (constants.BE_VCPUS in self.be_proposed and
12457 constants.HV_CPU_MASK in self.hv_proposed):
12459 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
12460 # Verify mask is consistent with number of vCPUs. Can skip this
12461 # test if only 1 entry in the CPU mask, which means same mask
12462 # is applied to all vCPUs.
12463 if (len(cpu_list) > 1 and
12464 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
12465 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
12467 (self.be_proposed[constants.BE_VCPUS],
12468 self.hv_proposed[constants.HV_CPU_MASK]),
12469 errors.ECODE_INVAL)
12471 # Only perform this test if a new CPU mask is given
12472 if constants.HV_CPU_MASK in self.hv_new:
12473 # Calculate the largest CPU number requested
12474 max_requested_cpu = max(map(max, cpu_list))
12475 # Check that all of the instance's nodes have enough physical CPUs to
12476 # satisfy the requested CPU mask
12477 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
12478 max_requested_cpu + 1, instance.hypervisor)
12480 # osparams processing
12481 if self.op.osparams:
12482 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
12483 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
12484 self.os_inst = i_osdict # the new dict (without defaults)
12490 #TODO(dynmem): do the appropriate check involving MINMEM
12491 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
12492 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
12493 mem_check_list = [pnode]
12494 if be_new[constants.BE_AUTO_BALANCE]:
12495 # either we changed auto_balance to yes or it was from before
12496 mem_check_list.extend(instance.secondary_nodes)
12497 instance_info = self.rpc.call_instance_info(pnode, instance.name,
12498 instance.hypervisor)
12499 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
12500 [instance.hypervisor])
12501 pninfo = nodeinfo[pnode]
12502 msg = pninfo.fail_msg
12504 # Assume the primary node is unreachable and go ahead
12505 self.warn.append("Can't get info from primary node %s: %s" %
12508 (_, _, (pnhvinfo, )) = pninfo.payload
12509 if not isinstance(pnhvinfo.get("memory_free", None), int):
12510 self.warn.append("Node data from primary node %s doesn't contain"
12511 " free memory information" % pnode)
12512 elif instance_info.fail_msg:
12513 self.warn.append("Can't get instance runtime information: %s" %
12514 instance_info.fail_msg)
12516 if instance_info.payload:
12517 current_mem = int(instance_info.payload["memory"])
12519 # Assume instance not running
12520 # (there is a slight race condition here, but it's not very
12521 # probable, and we have no other way to check)
12522 # TODO: Describe race condition
12524 #TODO(dynmem): do the appropriate check involving MINMEM
12525 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
12526 pnhvinfo["memory_free"])
12528 raise errors.OpPrereqError("This change will prevent the instance"
12529 " from starting, due to %d MB of memory"
12530 " missing on its primary node" %
12532 errors.ECODE_NORES)
12534 if be_new[constants.BE_AUTO_BALANCE]:
12535 for node, nres in nodeinfo.items():
12536 if node not in instance.secondary_nodes:
12538 nres.Raise("Can't get info from secondary node %s" % node,
12539 prereq=True, ecode=errors.ECODE_STATE)
12540 (_, _, (nhvinfo, )) = nres.payload
12541 if not isinstance(nhvinfo.get("memory_free", None), int):
12542 raise errors.OpPrereqError("Secondary node %s didn't return free"
12543 " memory information" % node,
12544 errors.ECODE_STATE)
12545 #TODO(dynmem): do the appropriate check involving MINMEM
12546 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
12547 raise errors.OpPrereqError("This change will prevent the instance"
12548 " from failover to its secondary node"
12549 " %s, due to not enough memory" % node,
12550 errors.ECODE_STATE)
12552 if self.op.runtime_mem:
12553 remote_info = self.rpc.call_instance_info(instance.primary_node,
12555 instance.hypervisor)
12556 remote_info.Raise("Error checking node %s" % instance.primary_node)
12557 if not remote_info.payload: # not running already
12558 raise errors.OpPrereqError("Instance %s is not running" % instance.name,
12559 errors.ECODE_STATE)
12561 current_memory = remote_info.payload["memory"]
12562 if (not self.op.force and
12563 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
12564 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
12565 raise errors.OpPrereqError("Instance %s must have memory between %d"
12566 " and %d MB of memory unless --force is"
12567 " given" % (instance.name,
12568 self.be_proposed[constants.BE_MINMEM],
12569 self.be_proposed[constants.BE_MAXMEM]),
12570 errors.ECODE_INVAL)
12572 if self.op.runtime_mem > current_memory:
12573 _CheckNodeFreeMemory(self, instance.primary_node,
12574 "ballooning memory for instance %s" %
12576 self.op.memory - current_memory,
12577 instance.hypervisor)
12579 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
12580 raise errors.OpPrereqError("Disk operations not supported for"
12581 " diskless instances",
12582 errors.ECODE_INVAL)
12584 def _PrepareNicCreate(_, params, private):
12585 self._PrepareNicModification(params, private, None, {}, cluster, pnode)
12586 return (None, None)
12588 def _PrepareNicMod(_, nic, params, private):
12589 self._PrepareNicModification(params, private, nic.ip,
12590 nic.nicparams, cluster, pnode)
12593 # Verify NIC changes (operating on copy)
12594 nics = instance.nics[:]
12595 ApplyContainerMods("NIC", nics, None, self.nicmod,
12596 _PrepareNicCreate, _PrepareNicMod, None)
12597 if len(nics) > constants.MAX_NICS:
12598 raise errors.OpPrereqError("Instance has too many network interfaces"
12599 " (%d), cannot add more" % constants.MAX_NICS,
12600 errors.ECODE_STATE)
12602 # Verify disk changes (operating on a copy)
12603 disks = instance.disks[:]
12604 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
12605 if len(disks) > constants.MAX_DISKS:
12606 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
12607 " more" % constants.MAX_DISKS,
12608 errors.ECODE_STATE)
12610 if self.op.offline is not None:
12611 if self.op.offline:
12612 msg = "can't change to offline"
12614 msg = "can't change to online"
12615 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
12617 # Pre-compute NIC changes (necessary to use result in hooks)
12618 self._nic_chgdesc = []
12620 # Operate on copies as this is still in prereq
12621 nics = [nic.Copy() for nic in instance.nics]
12622 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
12623 self._CreateNewNic, self._ApplyNicMods, None)
12624 self._new_nics = nics
12626 self._new_nics = None
12628 def _ConvertPlainToDrbd(self, feedback_fn):
12629 """Converts an instance from plain to drbd.
12632 feedback_fn("Converting template to drbd")
12633 instance = self.instance
12634 pnode = instance.primary_node
12635 snode = self.op.remote_node
12637 assert instance.disk_template == constants.DT_PLAIN
12639 # create a fake disk info for _GenerateDiskTemplate
12640 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
12641 constants.IDISK_VG: d.logical_id[0]}
12642 for d in instance.disks]
12643 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
12644 instance.name, pnode, [snode],
12645 disk_info, None, None, 0, feedback_fn,
12647 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
12649 info = _GetInstanceInfoText(instance)
12650 feedback_fn("Creating additional volumes...")
12651 # first, create the missing data and meta devices
12652 for disk in anno_disks:
12653 # unfortunately this is... not too nice
12654 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
12656 for child in disk.children:
12657 _CreateSingleBlockDev(self, snode, instance, child, info, True)
12658 # at this stage, all new LVs have been created, we can rename the
12660 feedback_fn("Renaming original volumes...")
12661 rename_list = [(o, n.children[0].logical_id)
12662 for (o, n) in zip(instance.disks, new_disks)]
12663 result = self.rpc.call_blockdev_rename(pnode, rename_list)
12664 result.Raise("Failed to rename original LVs")
12666 feedback_fn("Initializing DRBD devices...")
12667 # all child devices are in place, we can now create the DRBD devices
12668 for disk in anno_disks:
12669 for node in [pnode, snode]:
12670 f_create = node == pnode
12671 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
12673 # at this point, the instance has been modified
12674 instance.disk_template = constants.DT_DRBD8
12675 instance.disks = new_disks
12676 self.cfg.Update(instance, feedback_fn)
12678 # Release node locks while waiting for sync
12679 _ReleaseLocks(self, locking.LEVEL_NODE)
12681 # disks are created, waiting for sync
12682 disk_abort = not _WaitForSync(self, instance,
12683 oneshot=not self.op.wait_for_sync)
12685 raise errors.OpExecError("There are some degraded disks for"
12686 " this instance, please cleanup manually")
12688 # Node resource locks will be released by caller
12690 def _ConvertDrbdToPlain(self, feedback_fn):
12691 """Converts an instance from drbd to plain.
12694 instance = self.instance
12696 assert len(instance.secondary_nodes) == 1
12697 assert instance.disk_template == constants.DT_DRBD8
12699 pnode = instance.primary_node
12700 snode = instance.secondary_nodes[0]
12701 feedback_fn("Converting template to plain")
12703 old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
12704 new_disks = [d.children[0] for d in instance.disks]
12706 # copy over size and mode
12707 for parent, child in zip(old_disks, new_disks):
12708 child.size = parent.size
12709 child.mode = parent.mode
12711 # this is a DRBD disk, return its port to the pool
12712 # NOTE: this must be done right before the call to cfg.Update!
12713 for disk in old_disks:
12714 tcp_port = disk.logical_id[2]
12715 self.cfg.AddTcpUdpPort(tcp_port)
12717 # update instance structure
12718 instance.disks = new_disks
12719 instance.disk_template = constants.DT_PLAIN
12720 self.cfg.Update(instance, feedback_fn)
12722 # Release locks in case removing disks takes a while
12723 _ReleaseLocks(self, locking.LEVEL_NODE)
12725 feedback_fn("Removing volumes on the secondary node...")
12726 for disk in old_disks:
12727 self.cfg.SetDiskID(disk, snode)
12728 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
12730 self.LogWarning("Could not remove block device %s on node %s,"
12731 " continuing anyway: %s", disk.iv_name, snode, msg)
12733 feedback_fn("Removing unneeded volumes on the primary node...")
12734 for idx, disk in enumerate(old_disks):
12735 meta = disk.children[1]
12736 self.cfg.SetDiskID(meta, pnode)
12737 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
12739 self.LogWarning("Could not remove metadata for disk %d on node %s,"
12740 " continuing anyway: %s", idx, pnode, msg)
12742 def _CreateNewDisk(self, idx, params, _):
12743 """Creates a new disk.
12746 instance = self.instance
12749 if instance.disk_template in constants.DTS_FILEBASED:
12750 (file_driver, file_path) = instance.disks[0].logical_id
12751 file_path = os.path.dirname(file_path)
12753 file_driver = file_path = None
12756 _GenerateDiskTemplate(self, instance.disk_template, instance.name,
12757 instance.primary_node, instance.secondary_nodes,
12758 [params], file_path, file_driver, idx,
12759 self.Log, self.diskparams)[0]
12761 info = _GetInstanceInfoText(instance)
12763 logging.info("Creating volume %s for instance %s",
12764 disk.iv_name, instance.name)
12765 # Note: this needs to be kept in sync with _CreateDisks
12767 for node in instance.all_nodes:
12768 f_create = (node == instance.primary_node)
12770 _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
12771 except errors.OpExecError, err:
12772 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
12773 disk.iv_name, disk, node, err)
12776 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
12780 def _ModifyDisk(idx, disk, params, _):
12781 """Modifies a disk.
12784 disk.mode = params[constants.IDISK_MODE]
12787 ("disk.mode/%d" % idx, disk.mode),
12790 def _RemoveDisk(self, idx, root, _):
12794 (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
12795 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
12796 self.cfg.SetDiskID(disk, node)
12797 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
12799 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
12800 " continuing anyway", idx, node, msg)
12802 # if this is a DRBD disk, return its port to the pool
12803 if root.dev_type in constants.LDS_DRBD:
12804 self.cfg.AddTcpUdpPort(root.logical_id[2])
12807 def _CreateNewNic(idx, params, private):
12808 """Creates data structure for a new network interface.
12811 mac = params[constants.INIC_MAC]
12812 ip = params.get(constants.INIC_IP, None)
12813 nicparams = private.params
12815 return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
12817 "add:mac=%s,ip=%s,mode=%s,link=%s" %
12818 (mac, ip, private.filled[constants.NIC_MODE],
12819 private.filled[constants.NIC_LINK])),
12823 def _ApplyNicMods(idx, nic, params, private):
12824 """Modifies a network interface.
12829 for key in [constants.INIC_MAC, constants.INIC_IP]:
12831 changes.append(("nic.%s/%d" % (key, idx), params[key]))
12832 setattr(nic, key, params[key])
12835 nic.nicparams = private.params
12837 for (key, val) in params.items():
12838 changes.append(("nic.%s/%d" % (key, idx), val))
12842 def Exec(self, feedback_fn):
12843 """Modifies an instance.
12845 All parameters take effect only at the next restart of the instance.
12848 # Process here the warnings from CheckPrereq, as we don't have a
12849 # feedback_fn there.
12850 # TODO: Replace with self.LogWarning
12851 for warn in self.warn:
12852 feedback_fn("WARNING: %s" % warn)
12854 assert ((self.op.disk_template is None) ^
12855 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
12856 "Not owning any node resource locks"
12859 instance = self.instance
12862 if self.op.runtime_mem:
12863 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
12865 self.op.runtime_mem)
12866 rpcres.Raise("Cannot modify instance runtime memory")
12867 result.append(("runtime_memory", self.op.runtime_mem))
12869 # Apply disk changes
12870 ApplyContainerMods("disk", instance.disks, result, self.diskmod,
12871 self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
12872 _UpdateIvNames(0, instance.disks)
12874 if self.op.disk_template:
12876 check_nodes = set(instance.all_nodes)
12877 if self.op.remote_node:
12878 check_nodes.add(self.op.remote_node)
12879 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
12880 owned = self.owned_locks(level)
12881 assert not (check_nodes - owned), \
12882 ("Not owning the correct locks, owning %r, expected at least %r" %
12883 (owned, check_nodes))
12885 r_shut = _ShutdownInstanceDisks(self, instance)
12887 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
12888 " proceed with disk template conversion")
12889 mode = (instance.disk_template, self.op.disk_template)
12891 self._DISK_CONVERSIONS[mode](self, feedback_fn)
12893 self.cfg.ReleaseDRBDMinors(instance.name)
12895 result.append(("disk_template", self.op.disk_template))
12897 assert instance.disk_template == self.op.disk_template, \
12898 ("Expected disk template '%s', found '%s'" %
12899 (self.op.disk_template, instance.disk_template))
12901 # Release node and resource locks if there are any (they might already have
12902 # been released during disk conversion)
12903 _ReleaseLocks(self, locking.LEVEL_NODE)
12904 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
12906 # Apply NIC changes
12907 if self._new_nics is not None:
12908 instance.nics = self._new_nics
12909 result.extend(self._nic_chgdesc)
12912 if self.op.hvparams:
12913 instance.hvparams = self.hv_inst
12914 for key, val in self.op.hvparams.iteritems():
12915 result.append(("hv/%s" % key, val))
12918 if self.op.beparams:
12919 instance.beparams = self.be_inst
12920 for key, val in self.op.beparams.iteritems():
12921 result.append(("be/%s" % key, val))
12924 if self.op.os_name:
12925 instance.os = self.op.os_name
12928 if self.op.osparams:
12929 instance.osparams = self.os_inst
12930 for key, val in self.op.osparams.iteritems():
12931 result.append(("os/%s" % key, val))
12933 if self.op.offline is None:
12936 elif self.op.offline:
12937 # Mark instance as offline
12938 self.cfg.MarkInstanceOffline(instance.name)
12939 result.append(("admin_state", constants.ADMINST_OFFLINE))
12941 # Mark instance as online, but stopped
12942 self.cfg.MarkInstanceDown(instance.name)
12943 result.append(("admin_state", constants.ADMINST_DOWN))
12945 self.cfg.Update(instance, feedback_fn)
12947 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
12948 self.owned_locks(locking.LEVEL_NODE)), \
12949 "All node locks should have been released by now"
12953 _DISK_CONVERSIONS = {
12954 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
12955 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
12959 class LUInstanceChangeGroup(LogicalUnit):
12960 HPATH = "instance-change-group"
12961 HTYPE = constants.HTYPE_INSTANCE
12964 def ExpandNames(self):
12965 self.share_locks = _ShareAll()
12966 self.needed_locks = {
12967 locking.LEVEL_NODEGROUP: [],
12968 locking.LEVEL_NODE: [],
12971 self._ExpandAndLockInstance()
12973 if self.op.target_groups:
12974 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12975 self.op.target_groups)
12977 self.req_target_uuids = None
12979 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12981 def DeclareLocks(self, level):
12982 if level == locking.LEVEL_NODEGROUP:
12983 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12985 if self.req_target_uuids:
12986 lock_groups = set(self.req_target_uuids)
12988 # Lock all groups used by instance optimistically; this requires going
12989 # via the node before it's locked, requiring verification later on
12990 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
12991 lock_groups.update(instance_groups)
12993 # No target groups, need to lock all of them
12994 lock_groups = locking.ALL_SET
12996 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
12998 elif level == locking.LEVEL_NODE:
12999 if self.req_target_uuids:
13000 # Lock all nodes used by instances
13001 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
13002 self._LockInstancesNodes()
13004 # Lock all nodes in all potential target groups
13005 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
13006 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
13007 member_nodes = [node_name
13008 for group in lock_groups
13009 for node_name in self.cfg.GetNodeGroup(group).members]
13010 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
13012 # Lock all nodes as all groups are potential targets
13013 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13015 def CheckPrereq(self):
13016 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13017 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
13018 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
13020 assert (self.req_target_uuids is None or
13021 owned_groups.issuperset(self.req_target_uuids))
13022 assert owned_instances == set([self.op.instance_name])
13024 # Get instance information
13025 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
13027 # Check if node groups for locked instance are still correct
13028 assert owned_nodes.issuperset(self.instance.all_nodes), \
13029 ("Instance %s's nodes changed while we kept the lock" %
13030 self.op.instance_name)
13032 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
13035 if self.req_target_uuids:
13036 # User requested specific target groups
13037 self.target_uuids = frozenset(self.req_target_uuids)
13039 # All groups except those used by the instance are potential targets
13040 self.target_uuids = owned_groups - inst_groups
13042 conflicting_groups = self.target_uuids & inst_groups
13043 if conflicting_groups:
13044 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
13045 " used by the instance '%s'" %
13046 (utils.CommaJoin(conflicting_groups),
13047 self.op.instance_name),
13048 errors.ECODE_INVAL)
13050 if not self.target_uuids:
13051 raise errors.OpPrereqError("There are no possible target groups",
13052 errors.ECODE_INVAL)
13054 def BuildHooksEnv(self):
13055 """Build hooks env.
13058 assert self.target_uuids
13061 "TARGET_GROUPS": " ".join(self.target_uuids),
13064 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13068 def BuildHooksNodes(self):
13069 """Build hooks nodes.
13072 mn = self.cfg.GetMasterNode()
13073 return ([mn], [mn])
13075 def Exec(self, feedback_fn):
13076 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
13078 assert instances == [self.op.instance_name], "Instance not locked"
13080 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
13081 instances=instances, target_groups=list(self.target_uuids))
13083 ial.Run(self.op.iallocator)
13085 if not ial.success:
13086 raise errors.OpPrereqError("Can't compute solution for changing group of"
13087 " instance '%s' using iallocator '%s': %s" %
13088 (self.op.instance_name, self.op.iallocator,
13090 errors.ECODE_NORES)
13092 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
13094 self.LogInfo("Iallocator returned %s job(s) for changing group of"
13095 " instance '%s'", len(jobs), self.op.instance_name)
13097 return ResultWithJobs(jobs)
13100 class LUBackupQuery(NoHooksLU):
13101 """Query the exports list
13106 def CheckArguments(self):
13107 self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
13108 ["node", "export"], self.op.use_locking)
13110 def ExpandNames(self):
13111 self.expq.ExpandNames(self)
13113 def DeclareLocks(self, level):
13114 self.expq.DeclareLocks(self, level)
13116 def Exec(self, feedback_fn):
13119 for (node, expname) in self.expq.OldStyleQuery(self):
13120 if expname is None:
13121 result[node] = False
13123 result.setdefault(node, []).append(expname)
13128 class _ExportQuery(_QueryBase):
13129 FIELDS = query.EXPORT_FIELDS
13131 #: The node name is not a unique key for this query
13132 SORT_FIELD = "node"
13134 def ExpandNames(self, lu):
13135 lu.needed_locks = {}
13137 # The following variables interact with _QueryBase._GetNames
13139 self.wanted = _GetWantedNodes(lu, self.names)
13141 self.wanted = locking.ALL_SET
13143 self.do_locking = self.use_locking
13145 if self.do_locking:
13146 lu.share_locks = _ShareAll()
13147 lu.needed_locks = {
13148 locking.LEVEL_NODE: self.wanted,
13151 def DeclareLocks(self, lu, level):
13154 def _GetQueryData(self, lu):
13155 """Computes the list of nodes and their attributes.
13158 # Locking is not used
13160 assert not (compat.any(lu.glm.is_owned(level)
13161 for level in locking.LEVELS
13162 if level != locking.LEVEL_CLUSTER) or
13163 self.do_locking or self.use_locking)
13165 nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
13169 for (node, nres) in lu.rpc.call_export_list(nodes).items():
13171 result.append((node, None))
13173 result.extend((node, expname) for expname in nres.payload)
13178 class LUBackupPrepare(NoHooksLU):
13179 """Prepares an instance for an export and returns useful information.
13184 def ExpandNames(self):
13185 self._ExpandAndLockInstance()
13187 def CheckPrereq(self):
13188 """Check prerequisites.
13191 instance_name = self.op.instance_name
13193 self.instance = self.cfg.GetInstanceInfo(instance_name)
13194 assert self.instance is not None, \
13195 "Cannot retrieve locked instance %s" % self.op.instance_name
13196 _CheckNodeOnline(self, self.instance.primary_node)
13198 self._cds = _GetClusterDomainSecret()
13200 def Exec(self, feedback_fn):
13201 """Prepares an instance for an export.
13204 instance = self.instance
13206 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13207 salt = utils.GenerateSecret(8)
13209 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
13210 result = self.rpc.call_x509_cert_create(instance.primary_node,
13211 constants.RIE_CERT_VALIDITY)
13212 result.Raise("Can't create X509 key and certificate on %s" % result.node)
13214 (name, cert_pem) = result.payload
13216 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
13220 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
13221 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
13223 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
13229 class LUBackupExport(LogicalUnit):
13230 """Export an instance to an image in the cluster.
13233 HPATH = "instance-export"
13234 HTYPE = constants.HTYPE_INSTANCE
13237 def CheckArguments(self):
13238 """Check the arguments.
13241 self.x509_key_name = self.op.x509_key_name
13242 self.dest_x509_ca_pem = self.op.destination_x509_ca
13244 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13245 if not self.x509_key_name:
13246 raise errors.OpPrereqError("Missing X509 key name for encryption",
13247 errors.ECODE_INVAL)
13249 if not self.dest_x509_ca_pem:
13250 raise errors.OpPrereqError("Missing destination X509 CA",
13251 errors.ECODE_INVAL)
13253 def ExpandNames(self):
13254 self._ExpandAndLockInstance()
13256 # Lock all nodes for local exports
13257 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13258 # FIXME: lock only instance primary and destination node
13260 # Sad but true, for now we have do lock all nodes, as we don't know where
13261 # the previous export might be, and in this LU we search for it and
13262 # remove it from its current node. In the future we could fix this by:
13263 # - making a tasklet to search (share-lock all), then create the
13264 # new one, then one to remove, after
13265 # - removing the removal operation altogether
13266 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13268 def DeclareLocks(self, level):
13269 """Last minute lock declaration."""
13270 # All nodes are locked anyway, so nothing to do here.
13272 def BuildHooksEnv(self):
13273 """Build hooks env.
13275 This will run on the master, primary node and target node.
13279 "EXPORT_MODE": self.op.mode,
13280 "EXPORT_NODE": self.op.target_node,
13281 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
13282 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
13283 # TODO: Generic function for boolean env variables
13284 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
13287 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13291 def BuildHooksNodes(self):
13292 """Build hooks nodes.
13295 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
13297 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13298 nl.append(self.op.target_node)
13302 def CheckPrereq(self):
13303 """Check prerequisites.
13305 This checks that the instance and node names are valid.
13308 instance_name = self.op.instance_name
13310 self.instance = self.cfg.GetInstanceInfo(instance_name)
13311 assert self.instance is not None, \
13312 "Cannot retrieve locked instance %s" % self.op.instance_name
13313 _CheckNodeOnline(self, self.instance.primary_node)
13315 if (self.op.remove_instance and
13316 self.instance.admin_state == constants.ADMINST_UP and
13317 not self.op.shutdown):
13318 raise errors.OpPrereqError("Can not remove instance without shutting it"
13321 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13322 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
13323 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
13324 assert self.dst_node is not None
13326 _CheckNodeOnline(self, self.dst_node.name)
13327 _CheckNodeNotDrained(self, self.dst_node.name)
13330 self.dest_disk_info = None
13331 self.dest_x509_ca = None
13333 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13334 self.dst_node = None
13336 if len(self.op.target_node) != len(self.instance.disks):
13337 raise errors.OpPrereqError(("Received destination information for %s"
13338 " disks, but instance %s has %s disks") %
13339 (len(self.op.target_node), instance_name,
13340 len(self.instance.disks)),
13341 errors.ECODE_INVAL)
13343 cds = _GetClusterDomainSecret()
13345 # Check X509 key name
13347 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
13348 except (TypeError, ValueError), err:
13349 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
13351 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
13352 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
13353 errors.ECODE_INVAL)
13355 # Load and verify CA
13357 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
13358 except OpenSSL.crypto.Error, err:
13359 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
13360 (err, ), errors.ECODE_INVAL)
13362 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
13363 if errcode is not None:
13364 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
13365 (msg, ), errors.ECODE_INVAL)
13367 self.dest_x509_ca = cert
13369 # Verify target information
13371 for idx, disk_data in enumerate(self.op.target_node):
13373 (host, port, magic) = \
13374 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
13375 except errors.GenericError, err:
13376 raise errors.OpPrereqError("Target info for disk %s: %s" %
13377 (idx, err), errors.ECODE_INVAL)
13379 disk_info.append((host, port, magic))
13381 assert len(disk_info) == len(self.op.target_node)
13382 self.dest_disk_info = disk_info
13385 raise errors.ProgrammerError("Unhandled export mode %r" %
13388 # instance disk type verification
13389 # TODO: Implement export support for file-based disks
13390 for disk in self.instance.disks:
13391 if disk.dev_type == constants.LD_FILE:
13392 raise errors.OpPrereqError("Export not supported for instances with"
13393 " file-based disks", errors.ECODE_INVAL)
13395 def _CleanupExports(self, feedback_fn):
13396 """Removes exports of current instance from all other nodes.
13398 If an instance in a cluster with nodes A..D was exported to node C, its
13399 exports will be removed from the nodes A, B and D.
13402 assert self.op.mode != constants.EXPORT_MODE_REMOTE
13404 nodelist = self.cfg.GetNodeList()
13405 nodelist.remove(self.dst_node.name)
13407 # on one-node clusters nodelist will be empty after the removal
13408 # if we proceed the backup would be removed because OpBackupQuery
13409 # substitutes an empty list with the full cluster node list.
13410 iname = self.instance.name
13412 feedback_fn("Removing old exports for instance %s" % iname)
13413 exportlist = self.rpc.call_export_list(nodelist)
13414 for node in exportlist:
13415 if exportlist[node].fail_msg:
13417 if iname in exportlist[node].payload:
13418 msg = self.rpc.call_export_remove(node, iname).fail_msg
13420 self.LogWarning("Could not remove older export for instance %s"
13421 " on node %s: %s", iname, node, msg)
13423 def Exec(self, feedback_fn):
13424 """Export an instance to an image in the cluster.
13427 assert self.op.mode in constants.EXPORT_MODES
13429 instance = self.instance
13430 src_node = instance.primary_node
13432 if self.op.shutdown:
13433 # shutdown the instance, but not the disks
13434 feedback_fn("Shutting down instance %s" % instance.name)
13435 result = self.rpc.call_instance_shutdown(src_node, instance,
13436 self.op.shutdown_timeout)
13437 # TODO: Maybe ignore failures if ignore_remove_failures is set
13438 result.Raise("Could not shutdown instance %s on"
13439 " node %s" % (instance.name, src_node))
13441 # set the disks ID correctly since call_instance_start needs the
13442 # correct drbd minor to create the symlinks
13443 for disk in instance.disks:
13444 self.cfg.SetDiskID(disk, src_node)
13446 activate_disks = (instance.admin_state != constants.ADMINST_UP)
13449 # Activate the instance disks if we'exporting a stopped instance
13450 feedback_fn("Activating disks for %s" % instance.name)
13451 _StartInstanceDisks(self, instance, None)
13454 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
13457 helper.CreateSnapshots()
13459 if (self.op.shutdown and
13460 instance.admin_state == constants.ADMINST_UP and
13461 not self.op.remove_instance):
13462 assert not activate_disks
13463 feedback_fn("Starting instance %s" % instance.name)
13464 result = self.rpc.call_instance_start(src_node,
13465 (instance, None, None), False)
13466 msg = result.fail_msg
13468 feedback_fn("Failed to start instance: %s" % msg)
13469 _ShutdownInstanceDisks(self, instance)
13470 raise errors.OpExecError("Could not start instance: %s" % msg)
13472 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13473 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
13474 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13475 connect_timeout = constants.RIE_CONNECT_TIMEOUT
13476 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
13478 (key_name, _, _) = self.x509_key_name
13481 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
13484 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
13485 key_name, dest_ca_pem,
13490 # Check for backwards compatibility
13491 assert len(dresults) == len(instance.disks)
13492 assert compat.all(isinstance(i, bool) for i in dresults), \
13493 "Not all results are boolean: %r" % dresults
13497 feedback_fn("Deactivating disks for %s" % instance.name)
13498 _ShutdownInstanceDisks(self, instance)
13500 if not (compat.all(dresults) and fin_resu):
13503 failures.append("export finalization")
13504 if not compat.all(dresults):
13505 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
13507 failures.append("disk export: disk(s) %s" % fdsk)
13509 raise errors.OpExecError("Export failed, errors in %s" %
13510 utils.CommaJoin(failures))
13512 # At this point, the export was successful, we can cleanup/finish
13514 # Remove instance if requested
13515 if self.op.remove_instance:
13516 feedback_fn("Removing instance %s" % instance.name)
13517 _RemoveInstance(self, feedback_fn, instance,
13518 self.op.ignore_remove_failures)
13520 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13521 self._CleanupExports(feedback_fn)
13523 return fin_resu, dresults
13526 class LUBackupRemove(NoHooksLU):
13527 """Remove exports related to the named instance.
13532 def ExpandNames(self):
13533 self.needed_locks = {}
13534 # We need all nodes to be locked in order for RemoveExport to work, but we
13535 # don't need to lock the instance itself, as nothing will happen to it (and
13536 # we can remove exports also for a removed instance)
13537 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13539 def Exec(self, feedback_fn):
13540 """Remove any export.
13543 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
13544 # If the instance was not found we'll try with the name that was passed in.
13545 # This will only work if it was an FQDN, though.
13547 if not instance_name:
13549 instance_name = self.op.instance_name
13551 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
13552 exportlist = self.rpc.call_export_list(locked_nodes)
13554 for node in exportlist:
13555 msg = exportlist[node].fail_msg
13557 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
13559 if instance_name in exportlist[node].payload:
13561 result = self.rpc.call_export_remove(node, instance_name)
13562 msg = result.fail_msg
13564 logging.error("Could not remove export for instance %s"
13565 " on node %s: %s", instance_name, node, msg)
13567 if fqdn_warn and not found:
13568 feedback_fn("Export not found. If trying to remove an export belonging"
13569 " to a deleted instance please use its Fully Qualified"
13573 class LUGroupAdd(LogicalUnit):
13574 """Logical unit for creating node groups.
13577 HPATH = "group-add"
13578 HTYPE = constants.HTYPE_GROUP
13581 def ExpandNames(self):
13582 # We need the new group's UUID here so that we can create and acquire the
13583 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
13584 # that it should not check whether the UUID exists in the configuration.
13585 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
13586 self.needed_locks = {}
13587 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
13589 def CheckPrereq(self):
13590 """Check prerequisites.
13592 This checks that the given group name is not an existing node group
13597 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13598 except errors.OpPrereqError:
13601 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
13602 " node group (UUID: %s)" %
13603 (self.op.group_name, existing_uuid),
13604 errors.ECODE_EXISTS)
13606 if self.op.ndparams:
13607 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13609 if self.op.hv_state:
13610 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
13612 self.new_hv_state = None
13614 if self.op.disk_state:
13615 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
13617 self.new_disk_state = None
13619 if self.op.diskparams:
13620 for templ in constants.DISK_TEMPLATES:
13621 if templ in self.op.diskparams:
13622 utils.ForceDictType(self.op.diskparams[templ],
13623 constants.DISK_DT_TYPES)
13624 self.new_diskparams = self.op.diskparams
13626 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
13627 except errors.OpPrereqError, err:
13628 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
13629 errors.ECODE_INVAL)
13631 self.new_diskparams = {}
13633 if self.op.ipolicy:
13634 cluster = self.cfg.GetClusterInfo()
13635 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
13637 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
13638 except errors.ConfigurationError, err:
13639 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
13640 errors.ECODE_INVAL)
13642 def BuildHooksEnv(self):
13643 """Build hooks env.
13647 "GROUP_NAME": self.op.group_name,
13650 def BuildHooksNodes(self):
13651 """Build hooks nodes.
13654 mn = self.cfg.GetMasterNode()
13655 return ([mn], [mn])
13657 def Exec(self, feedback_fn):
13658 """Add the node group to the cluster.
13661 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
13662 uuid=self.group_uuid,
13663 alloc_policy=self.op.alloc_policy,
13664 ndparams=self.op.ndparams,
13665 diskparams=self.new_diskparams,
13666 ipolicy=self.op.ipolicy,
13667 hv_state_static=self.new_hv_state,
13668 disk_state_static=self.new_disk_state)
13670 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
13671 del self.remove_locks[locking.LEVEL_NODEGROUP]
13674 class LUGroupAssignNodes(NoHooksLU):
13675 """Logical unit for assigning nodes to groups.
13680 def ExpandNames(self):
13681 # These raise errors.OpPrereqError on their own:
13682 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13683 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
13685 # We want to lock all the affected nodes and groups. We have readily
13686 # available the list of nodes, and the *destination* group. To gather the
13687 # list of "source" groups, we need to fetch node information later on.
13688 self.needed_locks = {
13689 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
13690 locking.LEVEL_NODE: self.op.nodes,
13693 def DeclareLocks(self, level):
13694 if level == locking.LEVEL_NODEGROUP:
13695 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
13697 # Try to get all affected nodes' groups without having the group or node
13698 # lock yet. Needs verification later in the code flow.
13699 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
13701 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
13703 def CheckPrereq(self):
13704 """Check prerequisites.
13707 assert self.needed_locks[locking.LEVEL_NODEGROUP]
13708 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
13709 frozenset(self.op.nodes))
13711 expected_locks = (set([self.group_uuid]) |
13712 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
13713 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
13714 if actual_locks != expected_locks:
13715 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
13716 " current groups are '%s', used to be '%s'" %
13717 (utils.CommaJoin(expected_locks),
13718 utils.CommaJoin(actual_locks)))
13720 self.node_data = self.cfg.GetAllNodesInfo()
13721 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13722 instance_data = self.cfg.GetAllInstancesInfo()
13724 if self.group is None:
13725 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13726 (self.op.group_name, self.group_uuid))
13728 (new_splits, previous_splits) = \
13729 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
13730 for node in self.op.nodes],
13731 self.node_data, instance_data)
13734 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
13736 if not self.op.force:
13737 raise errors.OpExecError("The following instances get split by this"
13738 " change and --force was not given: %s" %
13741 self.LogWarning("This operation will split the following instances: %s",
13744 if previous_splits:
13745 self.LogWarning("In addition, these already-split instances continue"
13746 " to be split across groups: %s",
13747 utils.CommaJoin(utils.NiceSort(previous_splits)))
13749 def Exec(self, feedback_fn):
13750 """Assign nodes to a new group.
13753 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
13755 self.cfg.AssignGroupNodes(mods)
13758 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
13759 """Check for split instances after a node assignment.
13761 This method considers a series of node assignments as an atomic operation,
13762 and returns information about split instances after applying the set of
13765 In particular, it returns information about newly split instances, and
13766 instances that were already split, and remain so after the change.
13768 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
13771 @type changes: list of (node_name, new_group_uuid) pairs.
13772 @param changes: list of node assignments to consider.
13773 @param node_data: a dict with data for all nodes
13774 @param instance_data: a dict with all instances to consider
13775 @rtype: a two-tuple
13776 @return: a list of instances that were previously okay and result split as a
13777 consequence of this change, and a list of instances that were previously
13778 split and this change does not fix.
13781 changed_nodes = dict((node, group) for node, group in changes
13782 if node_data[node].group != group)
13784 all_split_instances = set()
13785 previously_split_instances = set()
13787 def InstanceNodes(instance):
13788 return [instance.primary_node] + list(instance.secondary_nodes)
13790 for inst in instance_data.values():
13791 if inst.disk_template not in constants.DTS_INT_MIRROR:
13794 instance_nodes = InstanceNodes(inst)
13796 if len(set(node_data[node].group for node in instance_nodes)) > 1:
13797 previously_split_instances.add(inst.name)
13799 if len(set(changed_nodes.get(node, node_data[node].group)
13800 for node in instance_nodes)) > 1:
13801 all_split_instances.add(inst.name)
13803 return (list(all_split_instances - previously_split_instances),
13804 list(previously_split_instances & all_split_instances))
13807 class _GroupQuery(_QueryBase):
13808 FIELDS = query.GROUP_FIELDS
13810 def ExpandNames(self, lu):
13811 lu.needed_locks = {}
13813 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
13814 self._cluster = lu.cfg.GetClusterInfo()
13815 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
13818 self.wanted = [name_to_uuid[name]
13819 for name in utils.NiceSort(name_to_uuid.keys())]
13821 # Accept names to be either names or UUIDs.
13824 all_uuid = frozenset(self._all_groups.keys())
13826 for name in self.names:
13827 if name in all_uuid:
13828 self.wanted.append(name)
13829 elif name in name_to_uuid:
13830 self.wanted.append(name_to_uuid[name])
13832 missing.append(name)
13835 raise errors.OpPrereqError("Some groups do not exist: %s" %
13836 utils.CommaJoin(missing),
13837 errors.ECODE_NOENT)
13839 def DeclareLocks(self, lu, level):
13842 def _GetQueryData(self, lu):
13843 """Computes the list of node groups and their attributes.
13846 do_nodes = query.GQ_NODE in self.requested_data
13847 do_instances = query.GQ_INST in self.requested_data
13849 group_to_nodes = None
13850 group_to_instances = None
13852 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
13853 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
13854 # latter GetAllInstancesInfo() is not enough, for we have to go through
13855 # instance->node. Hence, we will need to process nodes even if we only need
13856 # instance information.
13857 if do_nodes or do_instances:
13858 all_nodes = lu.cfg.GetAllNodesInfo()
13859 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
13862 for node in all_nodes.values():
13863 if node.group in group_to_nodes:
13864 group_to_nodes[node.group].append(node.name)
13865 node_to_group[node.name] = node.group
13868 all_instances = lu.cfg.GetAllInstancesInfo()
13869 group_to_instances = dict((uuid, []) for uuid in self.wanted)
13871 for instance in all_instances.values():
13872 node = instance.primary_node
13873 if node in node_to_group:
13874 group_to_instances[node_to_group[node]].append(instance.name)
13877 # Do not pass on node information if it was not requested.
13878 group_to_nodes = None
13880 return query.GroupQueryData(self._cluster,
13881 [self._all_groups[uuid]
13882 for uuid in self.wanted],
13883 group_to_nodes, group_to_instances,
13884 query.GQ_DISKPARAMS in self.requested_data)
13887 class LUGroupQuery(NoHooksLU):
13888 """Logical unit for querying node groups.
13893 def CheckArguments(self):
13894 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
13895 self.op.output_fields, False)
13897 def ExpandNames(self):
13898 self.gq.ExpandNames(self)
13900 def DeclareLocks(self, level):
13901 self.gq.DeclareLocks(self, level)
13903 def Exec(self, feedback_fn):
13904 return self.gq.OldStyleQuery(self)
13907 class LUGroupSetParams(LogicalUnit):
13908 """Modifies the parameters of a node group.
13911 HPATH = "group-modify"
13912 HTYPE = constants.HTYPE_GROUP
13915 def CheckArguments(self):
13918 self.op.diskparams,
13919 self.op.alloc_policy,
13921 self.op.disk_state,
13925 if all_changes.count(None) == len(all_changes):
13926 raise errors.OpPrereqError("Please pass at least one modification",
13927 errors.ECODE_INVAL)
13929 def ExpandNames(self):
13930 # This raises errors.OpPrereqError on its own:
13931 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13933 self.needed_locks = {
13934 locking.LEVEL_INSTANCE: [],
13935 locking.LEVEL_NODEGROUP: [self.group_uuid],
13938 self.share_locks[locking.LEVEL_INSTANCE] = 1
13940 def DeclareLocks(self, level):
13941 if level == locking.LEVEL_INSTANCE:
13942 assert not self.needed_locks[locking.LEVEL_INSTANCE]
13944 # Lock instances optimistically, needs verification once group lock has
13946 self.needed_locks[locking.LEVEL_INSTANCE] = \
13947 self.cfg.GetNodeGroupInstances(self.group_uuid)
13950 def _UpdateAndVerifyDiskParams(old, new):
13951 """Updates and verifies disk parameters.
13954 new_params = _GetUpdatedParams(old, new)
13955 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
13958 def CheckPrereq(self):
13959 """Check prerequisites.
13962 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13964 # Check if locked instances are still correct
13965 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
13967 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13968 cluster = self.cfg.GetClusterInfo()
13970 if self.group is None:
13971 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13972 (self.op.group_name, self.group_uuid))
13974 if self.op.ndparams:
13975 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
13976 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
13977 self.new_ndparams = new_ndparams
13979 if self.op.diskparams:
13980 diskparams = self.group.diskparams
13981 uavdp = self._UpdateAndVerifyDiskParams
13982 # For each disktemplate subdict update and verify the values
13983 new_diskparams = dict((dt,
13984 uavdp(diskparams.get(dt, {}),
13985 self.op.diskparams[dt]))
13986 for dt in constants.DISK_TEMPLATES
13987 if dt in self.op.diskparams)
13988 # As we've all subdicts of diskparams ready, lets merge the actual
13989 # dict with all updated subdicts
13990 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
13992 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
13993 except errors.OpPrereqError, err:
13994 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
13995 errors.ECODE_INVAL)
13997 if self.op.hv_state:
13998 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
13999 self.group.hv_state_static)
14001 if self.op.disk_state:
14002 self.new_disk_state = \
14003 _MergeAndVerifyDiskState(self.op.disk_state,
14004 self.group.disk_state_static)
14006 if self.op.ipolicy:
14007 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
14011 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
14012 inst_filter = lambda inst: inst.name in owned_instances
14013 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
14015 _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
14017 new_ipolicy, instances)
14020 self.LogWarning("After the ipolicy change the following instances"
14021 " violate them: %s",
14022 utils.CommaJoin(violations))
14024 def BuildHooksEnv(self):
14025 """Build hooks env.
14029 "GROUP_NAME": self.op.group_name,
14030 "NEW_ALLOC_POLICY": self.op.alloc_policy,
14033 def BuildHooksNodes(self):
14034 """Build hooks nodes.
14037 mn = self.cfg.GetMasterNode()
14038 return ([mn], [mn])
14040 def Exec(self, feedback_fn):
14041 """Modifies the node group.
14046 if self.op.ndparams:
14047 self.group.ndparams = self.new_ndparams
14048 result.append(("ndparams", str(self.group.ndparams)))
14050 if self.op.diskparams:
14051 self.group.diskparams = self.new_diskparams
14052 result.append(("diskparams", str(self.group.diskparams)))
14054 if self.op.alloc_policy:
14055 self.group.alloc_policy = self.op.alloc_policy
14057 if self.op.hv_state:
14058 self.group.hv_state_static = self.new_hv_state
14060 if self.op.disk_state:
14061 self.group.disk_state_static = self.new_disk_state
14063 if self.op.ipolicy:
14064 self.group.ipolicy = self.new_ipolicy
14066 self.cfg.Update(self.group, feedback_fn)
14070 class LUGroupRemove(LogicalUnit):
14071 HPATH = "group-remove"
14072 HTYPE = constants.HTYPE_GROUP
14075 def ExpandNames(self):
14076 # This will raises errors.OpPrereqError on its own:
14077 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14078 self.needed_locks = {
14079 locking.LEVEL_NODEGROUP: [self.group_uuid],
14082 def CheckPrereq(self):
14083 """Check prerequisites.
14085 This checks that the given group name exists as a node group, that is
14086 empty (i.e., contains no nodes), and that is not the last group of the
14090 # Verify that the group is empty.
14091 group_nodes = [node.name
14092 for node in self.cfg.GetAllNodesInfo().values()
14093 if node.group == self.group_uuid]
14096 raise errors.OpPrereqError("Group '%s' not empty, has the following"
14098 (self.op.group_name,
14099 utils.CommaJoin(utils.NiceSort(group_nodes))),
14100 errors.ECODE_STATE)
14102 # Verify the cluster would not be left group-less.
14103 if len(self.cfg.GetNodeGroupList()) == 1:
14104 raise errors.OpPrereqError("Group '%s' is the only group,"
14105 " cannot be removed" %
14106 self.op.group_name,
14107 errors.ECODE_STATE)
14109 def BuildHooksEnv(self):
14110 """Build hooks env.
14114 "GROUP_NAME": self.op.group_name,
14117 def BuildHooksNodes(self):
14118 """Build hooks nodes.
14121 mn = self.cfg.GetMasterNode()
14122 return ([mn], [mn])
14124 def Exec(self, feedback_fn):
14125 """Remove the node group.
14129 self.cfg.RemoveNodeGroup(self.group_uuid)
14130 except errors.ConfigurationError:
14131 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
14132 (self.op.group_name, self.group_uuid))
14134 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
14137 class LUGroupRename(LogicalUnit):
14138 HPATH = "group-rename"
14139 HTYPE = constants.HTYPE_GROUP
14142 def ExpandNames(self):
14143 # This raises errors.OpPrereqError on its own:
14144 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14146 self.needed_locks = {
14147 locking.LEVEL_NODEGROUP: [self.group_uuid],
14150 def CheckPrereq(self):
14151 """Check prerequisites.
14153 Ensures requested new name is not yet used.
14157 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
14158 except errors.OpPrereqError:
14161 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
14162 " node group (UUID: %s)" %
14163 (self.op.new_name, new_name_uuid),
14164 errors.ECODE_EXISTS)
14166 def BuildHooksEnv(self):
14167 """Build hooks env.
14171 "OLD_NAME": self.op.group_name,
14172 "NEW_NAME": self.op.new_name,
14175 def BuildHooksNodes(self):
14176 """Build hooks nodes.
14179 mn = self.cfg.GetMasterNode()
14181 all_nodes = self.cfg.GetAllNodesInfo()
14182 all_nodes.pop(mn, None)
14185 run_nodes.extend(node.name for node in all_nodes.values()
14186 if node.group == self.group_uuid)
14188 return (run_nodes, run_nodes)
14190 def Exec(self, feedback_fn):
14191 """Rename the node group.
14194 group = self.cfg.GetNodeGroup(self.group_uuid)
14197 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14198 (self.op.group_name, self.group_uuid))
14200 group.name = self.op.new_name
14201 self.cfg.Update(group, feedback_fn)
14203 return self.op.new_name
14206 class LUGroupEvacuate(LogicalUnit):
14207 HPATH = "group-evacuate"
14208 HTYPE = constants.HTYPE_GROUP
14211 def ExpandNames(self):
14212 # This raises errors.OpPrereqError on its own:
14213 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14215 if self.op.target_groups:
14216 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
14217 self.op.target_groups)
14219 self.req_target_uuids = []
14221 if self.group_uuid in self.req_target_uuids:
14222 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
14223 " as a target group (targets are %s)" %
14225 utils.CommaJoin(self.req_target_uuids)),
14226 errors.ECODE_INVAL)
14228 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
14230 self.share_locks = _ShareAll()
14231 self.needed_locks = {
14232 locking.LEVEL_INSTANCE: [],
14233 locking.LEVEL_NODEGROUP: [],
14234 locking.LEVEL_NODE: [],
14237 def DeclareLocks(self, level):
14238 if level == locking.LEVEL_INSTANCE:
14239 assert not self.needed_locks[locking.LEVEL_INSTANCE]
14241 # Lock instances optimistically, needs verification once node and group
14242 # locks have been acquired
14243 self.needed_locks[locking.LEVEL_INSTANCE] = \
14244 self.cfg.GetNodeGroupInstances(self.group_uuid)
14246 elif level == locking.LEVEL_NODEGROUP:
14247 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
14249 if self.req_target_uuids:
14250 lock_groups = set([self.group_uuid] + self.req_target_uuids)
14252 # Lock all groups used by instances optimistically; this requires going
14253 # via the node before it's locked, requiring verification later on
14254 lock_groups.update(group_uuid
14255 for instance_name in
14256 self.owned_locks(locking.LEVEL_INSTANCE)
14258 self.cfg.GetInstanceNodeGroups(instance_name))
14260 # No target groups, need to lock all of them
14261 lock_groups = locking.ALL_SET
14263 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
14265 elif level == locking.LEVEL_NODE:
14266 # This will only lock the nodes in the group to be evacuated which
14267 # contain actual instances
14268 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
14269 self._LockInstancesNodes()
14271 # Lock all nodes in group to be evacuated and target groups
14272 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14273 assert self.group_uuid in owned_groups
14274 member_nodes = [node_name
14275 for group in owned_groups
14276 for node_name in self.cfg.GetNodeGroup(group).members]
14277 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
14279 def CheckPrereq(self):
14280 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
14281 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14282 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
14284 assert owned_groups.issuperset(self.req_target_uuids)
14285 assert self.group_uuid in owned_groups
14287 # Check if locked instances are still correct
14288 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
14290 # Get instance information
14291 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
14293 # Check if node groups for locked instances are still correct
14294 _CheckInstancesNodeGroups(self.cfg, self.instances,
14295 owned_groups, owned_nodes, self.group_uuid)
14297 if self.req_target_uuids:
14298 # User requested specific target groups
14299 self.target_uuids = self.req_target_uuids
14301 # All groups except the one to be evacuated are potential targets
14302 self.target_uuids = [group_uuid for group_uuid in owned_groups
14303 if group_uuid != self.group_uuid]
14305 if not self.target_uuids:
14306 raise errors.OpPrereqError("There are no possible target groups",
14307 errors.ECODE_INVAL)
14309 def BuildHooksEnv(self):
14310 """Build hooks env.
14314 "GROUP_NAME": self.op.group_name,
14315 "TARGET_GROUPS": " ".join(self.target_uuids),
14318 def BuildHooksNodes(self):
14319 """Build hooks nodes.
14322 mn = self.cfg.GetMasterNode()
14324 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
14326 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
14328 return (run_nodes, run_nodes)
14330 def Exec(self, feedback_fn):
14331 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
14333 assert self.group_uuid not in self.target_uuids
14335 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
14336 instances=instances, target_groups=self.target_uuids)
14338 ial.Run(self.op.iallocator)
14340 if not ial.success:
14341 raise errors.OpPrereqError("Can't compute group evacuation using"
14342 " iallocator '%s': %s" %
14343 (self.op.iallocator, ial.info),
14344 errors.ECODE_NORES)
14346 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
14348 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
14349 len(jobs), self.op.group_name)
14351 return ResultWithJobs(jobs)
14354 class TagsLU(NoHooksLU): # pylint: disable=W0223
14355 """Generic tags LU.
14357 This is an abstract class which is the parent of all the other tags LUs.
14360 def ExpandNames(self):
14361 self.group_uuid = None
14362 self.needed_locks = {}
14364 if self.op.kind == constants.TAG_NODE:
14365 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
14366 lock_level = locking.LEVEL_NODE
14367 lock_name = self.op.name
14368 elif self.op.kind == constants.TAG_INSTANCE:
14369 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
14370 lock_level = locking.LEVEL_INSTANCE
14371 lock_name = self.op.name
14372 elif self.op.kind == constants.TAG_NODEGROUP:
14373 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
14374 lock_level = locking.LEVEL_NODEGROUP
14375 lock_name = self.group_uuid
14380 if lock_level and getattr(self.op, "use_locking", True):
14381 self.needed_locks[lock_level] = lock_name
14383 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
14384 # not possible to acquire the BGL based on opcode parameters)
14386 def CheckPrereq(self):
14387 """Check prerequisites.
14390 if self.op.kind == constants.TAG_CLUSTER:
14391 self.target = self.cfg.GetClusterInfo()
14392 elif self.op.kind == constants.TAG_NODE:
14393 self.target = self.cfg.GetNodeInfo(self.op.name)
14394 elif self.op.kind == constants.TAG_INSTANCE:
14395 self.target = self.cfg.GetInstanceInfo(self.op.name)
14396 elif self.op.kind == constants.TAG_NODEGROUP:
14397 self.target = self.cfg.GetNodeGroup(self.group_uuid)
14399 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
14400 str(self.op.kind), errors.ECODE_INVAL)
14403 class LUTagsGet(TagsLU):
14404 """Returns the tags of a given object.
14409 def ExpandNames(self):
14410 TagsLU.ExpandNames(self)
14412 # Share locks as this is only a read operation
14413 self.share_locks = _ShareAll()
14415 def Exec(self, feedback_fn):
14416 """Returns the tag list.
14419 return list(self.target.GetTags())
14422 class LUTagsSearch(NoHooksLU):
14423 """Searches the tags for a given pattern.
14428 def ExpandNames(self):
14429 self.needed_locks = {}
14431 def CheckPrereq(self):
14432 """Check prerequisites.
14434 This checks the pattern passed for validity by compiling it.
14438 self.re = re.compile(self.op.pattern)
14439 except re.error, err:
14440 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
14441 (self.op.pattern, err), errors.ECODE_INVAL)
14443 def Exec(self, feedback_fn):
14444 """Returns the tag list.
14448 tgts = [("/cluster", cfg.GetClusterInfo())]
14449 ilist = cfg.GetAllInstancesInfo().values()
14450 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
14451 nlist = cfg.GetAllNodesInfo().values()
14452 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
14453 tgts.extend(("/nodegroup/%s" % n.name, n)
14454 for n in cfg.GetAllNodeGroupsInfo().values())
14456 for path, target in tgts:
14457 for tag in target.GetTags():
14458 if self.re.search(tag):
14459 results.append((path, tag))
14463 class LUTagsSet(TagsLU):
14464 """Sets a tag on a given object.
14469 def CheckPrereq(self):
14470 """Check prerequisites.
14472 This checks the type and length of the tag name and value.
14475 TagsLU.CheckPrereq(self)
14476 for tag in self.op.tags:
14477 objects.TaggableObject.ValidateTag(tag)
14479 def Exec(self, feedback_fn):
14484 for tag in self.op.tags:
14485 self.target.AddTag(tag)
14486 except errors.TagError, err:
14487 raise errors.OpExecError("Error while setting tag: %s" % str(err))
14488 self.cfg.Update(self.target, feedback_fn)
14491 class LUTagsDel(TagsLU):
14492 """Delete a list of tags from a given object.
14497 def CheckPrereq(self):
14498 """Check prerequisites.
14500 This checks that we have the given tag.
14503 TagsLU.CheckPrereq(self)
14504 for tag in self.op.tags:
14505 objects.TaggableObject.ValidateTag(tag)
14506 del_tags = frozenset(self.op.tags)
14507 cur_tags = self.target.GetTags()
14509 diff_tags = del_tags - cur_tags
14511 diff_names = ("'%s'" % i for i in sorted(diff_tags))
14512 raise errors.OpPrereqError("Tag(s) %s not found" %
14513 (utils.CommaJoin(diff_names), ),
14514 errors.ECODE_NOENT)
14516 def Exec(self, feedback_fn):
14517 """Remove the tag from the object.
14520 for tag in self.op.tags:
14521 self.target.RemoveTag(tag)
14522 self.cfg.Update(self.target, feedback_fn)
14525 class LUTestDelay(NoHooksLU):
14526 """Sleep for a specified amount of time.
14528 This LU sleeps on the master and/or nodes for a specified amount of
14534 def ExpandNames(self):
14535 """Expand names and set required locks.
14537 This expands the node list, if any.
14540 self.needed_locks = {}
14541 if self.op.on_nodes:
14542 # _GetWantedNodes can be used here, but is not always appropriate to use
14543 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
14544 # more information.
14545 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
14546 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
14548 def _TestDelay(self):
14549 """Do the actual sleep.
14552 if self.op.on_master:
14553 if not utils.TestDelay(self.op.duration):
14554 raise errors.OpExecError("Error during master delay test")
14555 if self.op.on_nodes:
14556 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
14557 for node, node_result in result.items():
14558 node_result.Raise("Failure during rpc call to node %s" % node)
14560 def Exec(self, feedback_fn):
14561 """Execute the test delay opcode, with the wanted repetitions.
14564 if self.op.repeat == 0:
14567 top_value = self.op.repeat - 1
14568 for i in range(self.op.repeat):
14569 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
14573 class LUTestJqueue(NoHooksLU):
14574 """Utility LU to test some aspects of the job queue.
14579 # Must be lower than default timeout for WaitForJobChange to see whether it
14580 # notices changed jobs
14581 _CLIENT_CONNECT_TIMEOUT = 20.0
14582 _CLIENT_CONFIRM_TIMEOUT = 60.0
14585 def _NotifyUsingSocket(cls, cb, errcls):
14586 """Opens a Unix socket and waits for another program to connect.
14589 @param cb: Callback to send socket name to client
14590 @type errcls: class
14591 @param errcls: Exception class to use for errors
14594 # Using a temporary directory as there's no easy way to create temporary
14595 # sockets without writing a custom loop around tempfile.mktemp and
14597 tmpdir = tempfile.mkdtemp()
14599 tmpsock = utils.PathJoin(tmpdir, "sock")
14601 logging.debug("Creating temporary socket at %s", tmpsock)
14602 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
14607 # Send details to client
14610 # Wait for client to connect before continuing
14611 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
14613 (conn, _) = sock.accept()
14614 except socket.error, err:
14615 raise errcls("Client didn't connect in time (%s)" % err)
14619 # Remove as soon as client is connected
14620 shutil.rmtree(tmpdir)
14622 # Wait for client to close
14625 # pylint: disable=E1101
14626 # Instance of '_socketobject' has no ... member
14627 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
14629 except socket.error, err:
14630 raise errcls("Client failed to confirm notification (%s)" % err)
14634 def _SendNotification(self, test, arg, sockname):
14635 """Sends a notification to the client.
14638 @param test: Test name
14639 @param arg: Test argument (depends on test)
14640 @type sockname: string
14641 @param sockname: Socket path
14644 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
14646 def _Notify(self, prereq, test, arg):
14647 """Notifies the client of a test.
14650 @param prereq: Whether this is a prereq-phase test
14652 @param test: Test name
14653 @param arg: Test argument (depends on test)
14657 errcls = errors.OpPrereqError
14659 errcls = errors.OpExecError
14661 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
14665 def CheckArguments(self):
14666 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
14667 self.expandnames_calls = 0
14669 def ExpandNames(self):
14670 checkargs_calls = getattr(self, "checkargs_calls", 0)
14671 if checkargs_calls < 1:
14672 raise errors.ProgrammerError("CheckArguments was not called")
14674 self.expandnames_calls += 1
14676 if self.op.notify_waitlock:
14677 self._Notify(True, constants.JQT_EXPANDNAMES, None)
14679 self.LogInfo("Expanding names")
14681 # Get lock on master node (just to get a lock, not for a particular reason)
14682 self.needed_locks = {
14683 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
14686 def Exec(self, feedback_fn):
14687 if self.expandnames_calls < 1:
14688 raise errors.ProgrammerError("ExpandNames was not called")
14690 if self.op.notify_exec:
14691 self._Notify(False, constants.JQT_EXEC, None)
14693 self.LogInfo("Executing")
14695 if self.op.log_messages:
14696 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
14697 for idx, msg in enumerate(self.op.log_messages):
14698 self.LogInfo("Sending log message %s", idx + 1)
14699 feedback_fn(constants.JQT_MSGPREFIX + msg)
14700 # Report how many test messages have been sent
14701 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
14704 raise errors.OpExecError("Opcode failure was requested")
14709 class IAllocator(object):
14710 """IAllocator framework.
14712 An IAllocator instance has three sets of attributes:
14713 - cfg that is needed to query the cluster
14714 - input data (all members of the _KEYS class attribute are required)
14715 - four buffer attributes (in|out_data|text), that represent the
14716 input (to the external script) in text and data structure format,
14717 and the output from it, again in two formats
14718 - the result variables from the script (success, info, nodes) for
14722 # pylint: disable=R0902
14723 # lots of instance attributes
14725 def __init__(self, cfg, rpc_runner, mode, **kwargs):
14727 self.rpc = rpc_runner
14728 # init buffer variables
14729 self.in_text = self.out_text = self.in_data = self.out_data = None
14730 # init all input fields so that pylint is happy
14732 self.memory = self.disks = self.disk_template = self.spindle_use = None
14733 self.os = self.tags = self.nics = self.vcpus = None
14734 self.hypervisor = None
14735 self.relocate_from = None
14737 self.instances = None
14738 self.evac_mode = None
14739 self.target_groups = []
14741 self.required_nodes = None
14742 # init result fields
14743 self.success = self.info = self.result = None
14746 (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
14748 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
14749 " IAllocator" % self.mode)
14751 keyset = [n for (n, _) in keydata]
14754 if key not in keyset:
14755 raise errors.ProgrammerError("Invalid input parameter '%s' to"
14756 " IAllocator" % key)
14757 setattr(self, key, kwargs[key])
14760 if key not in kwargs:
14761 raise errors.ProgrammerError("Missing input parameter '%s' to"
14762 " IAllocator" % key)
14763 self._BuildInputData(compat.partial(fn, self), keydata)
14765 def _ComputeClusterData(self):
14766 """Compute the generic allocator input data.
14768 This is the data that is independent of the actual operation.
14772 cluster_info = cfg.GetClusterInfo()
14775 "version": constants.IALLOCATOR_VERSION,
14776 "cluster_name": cfg.GetClusterName(),
14777 "cluster_tags": list(cluster_info.GetTags()),
14778 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
14779 "ipolicy": cluster_info.ipolicy,
14781 ninfo = cfg.GetAllNodesInfo()
14782 iinfo = cfg.GetAllInstancesInfo().values()
14783 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
14786 node_list = [n.name for n in ninfo.values() if n.vm_capable]
14788 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
14789 hypervisor_name = self.hypervisor
14790 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
14791 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
14793 hypervisor_name = cluster_info.primary_hypervisor
14795 node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
14798 self.rpc.call_all_instances_info(node_list,
14799 cluster_info.enabled_hypervisors)
14801 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
14803 config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
14804 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
14805 i_list, config_ndata)
14806 assert len(data["nodes"]) == len(ninfo), \
14807 "Incomplete node data computed"
14809 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
14811 self.in_data = data
14814 def _ComputeNodeGroupData(cfg):
14815 """Compute node groups data.
14818 cluster = cfg.GetClusterInfo()
14819 ng = dict((guuid, {
14820 "name": gdata.name,
14821 "alloc_policy": gdata.alloc_policy,
14822 "ipolicy": _CalculateGroupIPolicy(cluster, gdata),
14824 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
14829 def _ComputeBasicNodeData(cfg, node_cfg):
14830 """Compute global node data.
14833 @returns: a dict of name: (node dict, node config)
14836 # fill in static (config-based) values
14837 node_results = dict((ninfo.name, {
14838 "tags": list(ninfo.GetTags()),
14839 "primary_ip": ninfo.primary_ip,
14840 "secondary_ip": ninfo.secondary_ip,
14841 "offline": ninfo.offline,
14842 "drained": ninfo.drained,
14843 "master_candidate": ninfo.master_candidate,
14844 "group": ninfo.group,
14845 "master_capable": ninfo.master_capable,
14846 "vm_capable": ninfo.vm_capable,
14847 "ndparams": cfg.GetNdParams(ninfo),
14849 for ninfo in node_cfg.values())
14851 return node_results
14854 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
14856 """Compute global node data.
14858 @param node_results: the basic node structures as filled from the config
14861 #TODO(dynmem): compute the right data on MAX and MIN memory
14862 # make a copy of the current dict
14863 node_results = dict(node_results)
14864 for nname, nresult in node_data.items():
14865 assert nname in node_results, "Missing basic data for node %s" % nname
14866 ninfo = node_cfg[nname]
14868 if not (ninfo.offline or ninfo.drained):
14869 nresult.Raise("Can't get data for node %s" % nname)
14870 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
14872 remote_info = _MakeLegacyNodeInfo(nresult.payload)
14874 for attr in ["memory_total", "memory_free", "memory_dom0",
14875 "vg_size", "vg_free", "cpu_total"]:
14876 if attr not in remote_info:
14877 raise errors.OpExecError("Node '%s' didn't return attribute"
14878 " '%s'" % (nname, attr))
14879 if not isinstance(remote_info[attr], int):
14880 raise errors.OpExecError("Node '%s' returned invalid value"
14882 (nname, attr, remote_info[attr]))
14883 # compute memory used by primary instances
14884 i_p_mem = i_p_up_mem = 0
14885 for iinfo, beinfo in i_list:
14886 if iinfo.primary_node == nname:
14887 i_p_mem += beinfo[constants.BE_MAXMEM]
14888 if iinfo.name not in node_iinfo[nname].payload:
14891 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
14892 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
14893 remote_info["memory_free"] -= max(0, i_mem_diff)
14895 if iinfo.admin_state == constants.ADMINST_UP:
14896 i_p_up_mem += beinfo[constants.BE_MAXMEM]
14898 # compute memory used by instances
14900 "total_memory": remote_info["memory_total"],
14901 "reserved_memory": remote_info["memory_dom0"],
14902 "free_memory": remote_info["memory_free"],
14903 "total_disk": remote_info["vg_size"],
14904 "free_disk": remote_info["vg_free"],
14905 "total_cpus": remote_info["cpu_total"],
14906 "i_pri_memory": i_p_mem,
14907 "i_pri_up_memory": i_p_up_mem,
14909 pnr_dyn.update(node_results[nname])
14910 node_results[nname] = pnr_dyn
14912 return node_results
14915 def _ComputeInstanceData(cluster_info, i_list):
14916 """Compute global instance data.
14920 for iinfo, beinfo in i_list:
14922 for nic in iinfo.nics:
14923 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
14927 "mode": filled_params[constants.NIC_MODE],
14928 "link": filled_params[constants.NIC_LINK],
14930 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
14931 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
14932 nic_data.append(nic_dict)
14934 "tags": list(iinfo.GetTags()),
14935 "admin_state": iinfo.admin_state,
14936 "vcpus": beinfo[constants.BE_VCPUS],
14937 "memory": beinfo[constants.BE_MAXMEM],
14938 "spindle_use": beinfo[constants.BE_SPINDLE_USE],
14940 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
14942 "disks": [{constants.IDISK_SIZE: dsk.size,
14943 constants.IDISK_MODE: dsk.mode}
14944 for dsk in iinfo.disks],
14945 "disk_template": iinfo.disk_template,
14946 "hypervisor": iinfo.hypervisor,
14948 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
14950 instance_data[iinfo.name] = pir
14952 return instance_data
14954 def _AddNewInstance(self):
14955 """Add new instance data to allocator structure.
14957 This in combination with _AllocatorGetClusterData will create the
14958 correct structure needed as input for the allocator.
14960 The checks for the completeness of the opcode must have already been
14964 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
14966 if self.disk_template in constants.DTS_INT_MIRROR:
14967 self.required_nodes = 2
14969 self.required_nodes = 1
14973 "disk_template": self.disk_template,
14976 "vcpus": self.vcpus,
14977 "memory": self.memory,
14978 "spindle_use": self.spindle_use,
14979 "disks": self.disks,
14980 "disk_space_total": disk_space,
14982 "required_nodes": self.required_nodes,
14983 "hypervisor": self.hypervisor,
14988 def _AddRelocateInstance(self):
14989 """Add relocate instance data to allocator structure.
14991 This in combination with _IAllocatorGetClusterData will create the
14992 correct structure needed as input for the allocator.
14994 The checks for the completeness of the opcode must have already been
14998 instance = self.cfg.GetInstanceInfo(self.name)
14999 if instance is None:
15000 raise errors.ProgrammerError("Unknown instance '%s' passed to"
15001 " IAllocator" % self.name)
15003 if instance.disk_template not in constants.DTS_MIRRORED:
15004 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
15005 errors.ECODE_INVAL)
15007 if instance.disk_template in constants.DTS_INT_MIRROR and \
15008 len(instance.secondary_nodes) != 1:
15009 raise errors.OpPrereqError("Instance has not exactly one secondary node",
15010 errors.ECODE_STATE)
15012 self.required_nodes = 1
15013 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
15014 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
15018 "disk_space_total": disk_space,
15019 "required_nodes": self.required_nodes,
15020 "relocate_from": self.relocate_from,
15024 def _AddNodeEvacuate(self):
15025 """Get data for node-evacuate requests.
15029 "instances": self.instances,
15030 "evac_mode": self.evac_mode,
15033 def _AddChangeGroup(self):
15034 """Get data for node-evacuate requests.
15038 "instances": self.instances,
15039 "target_groups": self.target_groups,
15042 def _BuildInputData(self, fn, keydata):
15043 """Build input data structures.
15046 self._ComputeClusterData()
15049 request["type"] = self.mode
15050 for keyname, keytype in keydata:
15051 if keyname not in request:
15052 raise errors.ProgrammerError("Request parameter %s is missing" %
15054 val = request[keyname]
15055 if not keytype(val):
15056 raise errors.ProgrammerError("Request parameter %s doesn't pass"
15057 " validation, value %s, expected"
15058 " type %s" % (keyname, val, keytype))
15059 self.in_data["request"] = request
15061 self.in_text = serializer.Dump(self.in_data)
15063 _STRING_LIST = ht.TListOf(ht.TString)
15064 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
15065 # pylint: disable=E1101
15066 # Class '...' has no 'OP_ID' member
15067 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
15068 opcodes.OpInstanceMigrate.OP_ID,
15069 opcodes.OpInstanceReplaceDisks.OP_ID])
15073 ht.TListOf(ht.TAnd(ht.TIsLength(3),
15074 ht.TItems([ht.TNonEmptyString,
15075 ht.TNonEmptyString,
15076 ht.TListOf(ht.TNonEmptyString),
15079 ht.TListOf(ht.TAnd(ht.TIsLength(2),
15080 ht.TItems([ht.TNonEmptyString,
15083 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
15084 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
15087 constants.IALLOCATOR_MODE_ALLOC:
15090 ("name", ht.TString),
15091 ("memory", ht.TInt),
15092 ("spindle_use", ht.TInt),
15093 ("disks", ht.TListOf(ht.TDict)),
15094 ("disk_template", ht.TString),
15095 ("os", ht.TString),
15096 ("tags", _STRING_LIST),
15097 ("nics", ht.TListOf(ht.TDict)),
15098 ("vcpus", ht.TInt),
15099 ("hypervisor", ht.TString),
15101 constants.IALLOCATOR_MODE_RELOC:
15102 (_AddRelocateInstance,
15103 [("name", ht.TString), ("relocate_from", _STRING_LIST)],
15105 constants.IALLOCATOR_MODE_NODE_EVAC:
15106 (_AddNodeEvacuate, [
15107 ("instances", _STRING_LIST),
15108 ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
15110 constants.IALLOCATOR_MODE_CHG_GROUP:
15111 (_AddChangeGroup, [
15112 ("instances", _STRING_LIST),
15113 ("target_groups", _STRING_LIST),
15117 def Run(self, name, validate=True, call_fn=None):
15118 """Run an instance allocator and return the results.
15121 if call_fn is None:
15122 call_fn = self.rpc.call_iallocator_runner
15124 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
15125 result.Raise("Failure while running the iallocator script")
15127 self.out_text = result.payload
15129 self._ValidateResult()
15131 def _ValidateResult(self):
15132 """Process the allocator results.
15134 This will process and if successful save the result in
15135 self.out_data and the other parameters.
15139 rdict = serializer.Load(self.out_text)
15140 except Exception, err:
15141 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
15143 if not isinstance(rdict, dict):
15144 raise errors.OpExecError("Can't parse iallocator results: not a dict")
15146 # TODO: remove backwards compatiblity in later versions
15147 if "nodes" in rdict and "result" not in rdict:
15148 rdict["result"] = rdict["nodes"]
15151 for key in "success", "info", "result":
15152 if key not in rdict:
15153 raise errors.OpExecError("Can't parse iallocator results:"
15154 " missing key '%s'" % key)
15155 setattr(self, key, rdict[key])
15157 if not self._result_check(self.result):
15158 raise errors.OpExecError("Iallocator returned invalid result,"
15159 " expected %s, got %s" %
15160 (self._result_check, self.result),
15161 errors.ECODE_INVAL)
15163 if self.mode == constants.IALLOCATOR_MODE_RELOC:
15164 assert self.relocate_from is not None
15165 assert self.required_nodes == 1
15167 node2group = dict((name, ndata["group"])
15168 for (name, ndata) in self.in_data["nodes"].items())
15170 fn = compat.partial(self._NodesToGroups, node2group,
15171 self.in_data["nodegroups"])
15173 instance = self.cfg.GetInstanceInfo(self.name)
15174 request_groups = fn(self.relocate_from + [instance.primary_node])
15175 result_groups = fn(rdict["result"] + [instance.primary_node])
15177 if self.success and not set(result_groups).issubset(request_groups):
15178 raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
15179 " differ from original groups (%s)" %
15180 (utils.CommaJoin(result_groups),
15181 utils.CommaJoin(request_groups)))
15183 elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15184 assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
15186 self.out_data = rdict
15189 def _NodesToGroups(node2group, groups, nodes):
15190 """Returns a list of unique group names for a list of nodes.
15192 @type node2group: dict
15193 @param node2group: Map from node name to group UUID
15195 @param groups: Group information
15197 @param nodes: Node names
15204 group_uuid = node2group[node]
15206 # Ignore unknown node
15210 group = groups[group_uuid]
15212 # Can't find group, let's use UUID
15213 group_name = group_uuid
15215 group_name = group["name"]
15217 result.add(group_name)
15219 return sorted(result)
15222 class LUTestAllocator(NoHooksLU):
15223 """Run allocator tests.
15225 This LU runs the allocator tests
15228 def CheckPrereq(self):
15229 """Check prerequisites.
15231 This checks the opcode parameters depending on the director and mode test.
15234 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15235 for attr in ["memory", "disks", "disk_template",
15236 "os", "tags", "nics", "vcpus"]:
15237 if not hasattr(self.op, attr):
15238 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
15239 attr, errors.ECODE_INVAL)
15240 iname = self.cfg.ExpandInstanceName(self.op.name)
15241 if iname is not None:
15242 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
15243 iname, errors.ECODE_EXISTS)
15244 if not isinstance(self.op.nics, list):
15245 raise errors.OpPrereqError("Invalid parameter 'nics'",
15246 errors.ECODE_INVAL)
15247 if not isinstance(self.op.disks, list):
15248 raise errors.OpPrereqError("Invalid parameter 'disks'",
15249 errors.ECODE_INVAL)
15250 for row in self.op.disks:
15251 if (not isinstance(row, dict) or
15252 constants.IDISK_SIZE not in row or
15253 not isinstance(row[constants.IDISK_SIZE], int) or
15254 constants.IDISK_MODE not in row or
15255 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
15256 raise errors.OpPrereqError("Invalid contents of the 'disks'"
15257 " parameter", errors.ECODE_INVAL)
15258 if self.op.hypervisor is None:
15259 self.op.hypervisor = self.cfg.GetHypervisorType()
15260 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15261 fname = _ExpandInstanceName(self.cfg, self.op.name)
15262 self.op.name = fname
15263 self.relocate_from = \
15264 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
15265 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
15266 constants.IALLOCATOR_MODE_NODE_EVAC):
15267 if not self.op.instances:
15268 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
15269 self.op.instances = _GetWantedInstances(self, self.op.instances)
15271 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
15272 self.op.mode, errors.ECODE_INVAL)
15274 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
15275 if self.op.allocator is None:
15276 raise errors.OpPrereqError("Missing allocator name",
15277 errors.ECODE_INVAL)
15278 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
15279 raise errors.OpPrereqError("Wrong allocator test '%s'" %
15280 self.op.direction, errors.ECODE_INVAL)
15282 def Exec(self, feedback_fn):
15283 """Run the allocator test.
15286 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15287 ial = IAllocator(self.cfg, self.rpc,
15290 memory=self.op.memory,
15291 disks=self.op.disks,
15292 disk_template=self.op.disk_template,
15296 vcpus=self.op.vcpus,
15297 hypervisor=self.op.hypervisor,
15299 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15300 ial = IAllocator(self.cfg, self.rpc,
15303 relocate_from=list(self.relocate_from),
15305 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
15306 ial = IAllocator(self.cfg, self.rpc,
15308 instances=self.op.instances,
15309 target_groups=self.op.target_groups)
15310 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15311 ial = IAllocator(self.cfg, self.rpc,
15313 instances=self.op.instances,
15314 evac_mode=self.op.evac_mode)
15316 raise errors.ProgrammerError("Uncatched mode %s in"
15317 " LUTestAllocator.Exec", self.op.mode)
15319 if self.op.direction == constants.IALLOCATOR_DIR_IN:
15320 result = ial.in_text
15322 ial.Run(self.op.allocator, validate=False)
15323 result = ial.out_text
15327 #: Query type implementations
15329 constants.QR_CLUSTER: _ClusterQuery,
15330 constants.QR_INSTANCE: _InstanceQuery,
15331 constants.QR_NODE: _NodeQuery,
15332 constants.QR_GROUP: _GroupQuery,
15333 constants.QR_OS: _OsQuery,
15334 constants.QR_EXPORT: _ExportQuery,
15337 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
15340 def _GetQueryImplementation(name):
15341 """Returns the implemtnation for a query type.
15343 @param name: Query type, must be one of L{constants.QR_VIA_OP}
15347 return _QUERY_IMPL[name]
15349 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
15350 errors.ECODE_INVAL)