4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 from ganeti import rpc
62 from ganeti import runtime
64 import ganeti.masterd.instance # pylint: disable=W0611
67 #: Size of DRBD meta block device
71 INSTANCE_DOWN = [constants.ADMINST_DOWN]
72 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
73 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
75 #: Instance status in which an instance can be marked as offline/online
76 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
77 constants.ADMINST_OFFLINE,
82 """Data container for LU results with jobs.
84 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
85 by L{mcpu._ProcessResult}. The latter will then submit the jobs
86 contained in the C{jobs} attribute and include the job IDs in the opcode
90 def __init__(self, jobs, **kwargs):
91 """Initializes this class.
93 Additional return values can be specified as keyword arguments.
95 @type jobs: list of lists of L{opcode.OpCode}
96 @param jobs: A list of lists of opcode objects
103 class LogicalUnit(object):
104 """Logical Unit base class.
106 Subclasses must follow these rules:
107 - implement ExpandNames
108 - implement CheckPrereq (except when tasklets are used)
109 - implement Exec (except when tasklets are used)
110 - implement BuildHooksEnv
111 - implement BuildHooksNodes
112 - redefine HPATH and HTYPE
113 - optionally redefine their run requirements:
114 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
116 Note that all commands require root permissions.
118 @ivar dry_run_result: the value (if any) that will be returned to the caller
119 in dry-run mode (signalled by opcode dry_run parameter)
126 def __init__(self, processor, op, context, rpc_runner):
127 """Constructor for LogicalUnit.
129 This needs to be overridden in derived classes in order to check op
133 self.proc = processor
135 self.cfg = context.cfg
136 self.glm = context.glm
138 self.owned_locks = context.glm.list_owned
139 self.context = context
140 self.rpc = rpc_runner
141 # Dicts used to declare locking needs to mcpu
142 self.needed_locks = None
143 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
145 self.remove_locks = {}
146 # Used to force good behavior when calling helper functions
147 self.recalculate_locks = {}
149 self.Log = processor.Log # pylint: disable=C0103
150 self.LogWarning = processor.LogWarning # pylint: disable=C0103
151 self.LogInfo = processor.LogInfo # pylint: disable=C0103
152 self.LogStep = processor.LogStep # pylint: disable=C0103
153 # support for dry-run
154 self.dry_run_result = None
155 # support for generic debug attribute
156 if (not hasattr(self.op, "debug_level") or
157 not isinstance(self.op.debug_level, int)):
158 self.op.debug_level = 0
163 # Validate opcode parameters and set defaults
164 self.op.Validate(True)
166 self.CheckArguments()
168 def CheckArguments(self):
169 """Check syntactic validity for the opcode arguments.
171 This method is for doing a simple syntactic check and ensure
172 validity of opcode parameters, without any cluster-related
173 checks. While the same can be accomplished in ExpandNames and/or
174 CheckPrereq, doing these separate is better because:
176 - ExpandNames is left as as purely a lock-related function
177 - CheckPrereq is run after we have acquired locks (and possible
180 The function is allowed to change the self.op attribute so that
181 later methods can no longer worry about missing parameters.
186 def ExpandNames(self):
187 """Expand names for this LU.
189 This method is called before starting to execute the opcode, and it should
190 update all the parameters of the opcode to their canonical form (e.g. a
191 short node name must be fully expanded after this method has successfully
192 completed). This way locking, hooks, logging, etc. can work correctly.
194 LUs which implement this method must also populate the self.needed_locks
195 member, as a dict with lock levels as keys, and a list of needed lock names
198 - use an empty dict if you don't need any lock
199 - if you don't need any lock at a particular level omit that
200 level (note that in this case C{DeclareLocks} won't be called
201 at all for that level)
202 - if you need locks at a level, but you can't calculate it in
203 this function, initialise that level with an empty list and do
204 further processing in L{LogicalUnit.DeclareLocks} (see that
205 function's docstring)
206 - don't put anything for the BGL level
207 - if you want all locks at a level use L{locking.ALL_SET} as a value
209 If you need to share locks (rather than acquire them exclusively) at one
210 level you can modify self.share_locks, setting a true value (usually 1) for
211 that level. By default locks are not shared.
213 This function can also define a list of tasklets, which then will be
214 executed in order instead of the usual LU-level CheckPrereq and Exec
215 functions, if those are not defined by the LU.
219 # Acquire all nodes and one instance
220 self.needed_locks = {
221 locking.LEVEL_NODE: locking.ALL_SET,
222 locking.LEVEL_INSTANCE: ['instance1.example.com'],
224 # Acquire just two nodes
225 self.needed_locks = {
226 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
229 self.needed_locks = {} # No, you can't leave it to the default value None
232 # The implementation of this method is mandatory only if the new LU is
233 # concurrent, so that old LUs don't need to be changed all at the same
236 self.needed_locks = {} # Exclusive LUs don't need locks.
238 raise NotImplementedError
240 def DeclareLocks(self, level):
241 """Declare LU locking needs for a level
243 While most LUs can just declare their locking needs at ExpandNames time,
244 sometimes there's the need to calculate some locks after having acquired
245 the ones before. This function is called just before acquiring locks at a
246 particular level, but after acquiring the ones at lower levels, and permits
247 such calculations. It can be used to modify self.needed_locks, and by
248 default it does nothing.
250 This function is only called if you have something already set in
251 self.needed_locks for the level.
253 @param level: Locking level which is going to be locked
254 @type level: member of L{ganeti.locking.LEVELS}
258 def CheckPrereq(self):
259 """Check prerequisites for this LU.
261 This method should check that the prerequisites for the execution
262 of this LU are fulfilled. It can do internode communication, but
263 it should be idempotent - no cluster or system changes are
266 The method should raise errors.OpPrereqError in case something is
267 not fulfilled. Its return value is ignored.
269 This method should also update all the parameters of the opcode to
270 their canonical form if it hasn't been done by ExpandNames before.
273 if self.tasklets is not None:
274 for (idx, tl) in enumerate(self.tasklets):
275 logging.debug("Checking prerequisites for tasklet %s/%s",
276 idx + 1, len(self.tasklets))
281 def Exec(self, feedback_fn):
284 This method should implement the actual work. It should raise
285 errors.OpExecError for failures that are somewhat dealt with in
289 if self.tasklets is not None:
290 for (idx, tl) in enumerate(self.tasklets):
291 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
294 raise NotImplementedError
296 def BuildHooksEnv(self):
297 """Build hooks environment for this LU.
300 @return: Dictionary containing the environment that will be used for
301 running the hooks for this LU. The keys of the dict must not be prefixed
302 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
303 will extend the environment with additional variables. If no environment
304 should be defined, an empty dictionary should be returned (not C{None}).
305 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
309 raise NotImplementedError
311 def BuildHooksNodes(self):
312 """Build list of nodes to run LU's hooks.
314 @rtype: tuple; (list, list)
315 @return: Tuple containing a list of node names on which the hook
316 should run before the execution and a list of node names on which the
317 hook should run after the execution. No nodes should be returned as an
318 empty list (and not None).
319 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
323 raise NotImplementedError
325 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
326 """Notify the LU about the results of its hooks.
328 This method is called every time a hooks phase is executed, and notifies
329 the Logical Unit about the hooks' result. The LU can then use it to alter
330 its result based on the hooks. By default the method does nothing and the
331 previous result is passed back unchanged but any LU can define it if it
332 wants to use the local cluster hook-scripts somehow.
334 @param phase: one of L{constants.HOOKS_PHASE_POST} or
335 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
336 @param hook_results: the results of the multi-node hooks rpc call
337 @param feedback_fn: function used send feedback back to the caller
338 @param lu_result: the previous Exec result this LU had, or None
340 @return: the new Exec result, based on the previous result
344 # API must be kept, thus we ignore the unused argument and could
345 # be a function warnings
346 # pylint: disable=W0613,R0201
349 def _ExpandAndLockInstance(self):
350 """Helper function to expand and lock an instance.
352 Many LUs that work on an instance take its name in self.op.instance_name
353 and need to expand it and then declare the expanded name for locking. This
354 function does it, and then updates self.op.instance_name to the expanded
355 name. It also initializes needed_locks as a dict, if this hasn't been done
359 if self.needed_locks is None:
360 self.needed_locks = {}
362 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
363 "_ExpandAndLockInstance called with instance-level locks set"
364 self.op.instance_name = _ExpandInstanceName(self.cfg,
365 self.op.instance_name)
366 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
368 def _LockInstancesNodes(self, primary_only=False,
369 level=locking.LEVEL_NODE):
370 """Helper function to declare instances' nodes for locking.
372 This function should be called after locking one or more instances to lock
373 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
374 with all primary or secondary nodes for instances already locked and
375 present in self.needed_locks[locking.LEVEL_INSTANCE].
377 It should be called from DeclareLocks, and for safety only works if
378 self.recalculate_locks[locking.LEVEL_NODE] is set.
380 In the future it may grow parameters to just lock some instance's nodes, or
381 to just lock primaries or secondary nodes, if needed.
383 If should be called in DeclareLocks in a way similar to::
385 if level == locking.LEVEL_NODE:
386 self._LockInstancesNodes()
388 @type primary_only: boolean
389 @param primary_only: only lock primary nodes of locked instances
390 @param level: Which lock level to use for locking nodes
393 assert level in self.recalculate_locks, \
394 "_LockInstancesNodes helper function called with no nodes to recalculate"
396 # TODO: check if we're really been called with the instance locks held
398 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
399 # future we might want to have different behaviors depending on the value
400 # of self.recalculate_locks[locking.LEVEL_NODE]
402 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
403 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
404 wanted_nodes.append(instance.primary_node)
406 wanted_nodes.extend(instance.secondary_nodes)
408 if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
409 self.needed_locks[level] = wanted_nodes
410 elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
411 self.needed_locks[level].extend(wanted_nodes)
413 raise errors.ProgrammerError("Unknown recalculation mode")
415 del self.recalculate_locks[level]
418 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
419 """Simple LU which runs no hooks.
421 This LU is intended as a parent for other LogicalUnits which will
422 run no hooks, in order to reduce duplicate code.
428 def BuildHooksEnv(self):
429 """Empty BuildHooksEnv for NoHooksLu.
431 This just raises an error.
434 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
436 def BuildHooksNodes(self):
437 """Empty BuildHooksNodes for NoHooksLU.
440 raise AssertionError("BuildHooksNodes called for NoHooksLU")
444 """Tasklet base class.
446 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447 they can mix legacy code with tasklets. Locking needs to be done in the LU,
448 tasklets know nothing about locks.
450 Subclasses must follow these rules:
451 - Implement CheckPrereq
455 def __init__(self, lu):
462 def CheckPrereq(self):
463 """Check prerequisites for this tasklets.
465 This method should check whether the prerequisites for the execution of
466 this tasklet are fulfilled. It can do internode communication, but it
467 should be idempotent - no cluster or system changes are allowed.
469 The method should raise errors.OpPrereqError in case something is not
470 fulfilled. Its return value is ignored.
472 This method should also update all parameters to their canonical form if it
473 hasn't been done before.
478 def Exec(self, feedback_fn):
479 """Execute the tasklet.
481 This method should implement the actual work. It should raise
482 errors.OpExecError for failures that are somewhat dealt with in code, or
486 raise NotImplementedError
490 """Base for query utility classes.
493 #: Attribute holding field definitions
499 def __init__(self, qfilter, fields, use_locking):
500 """Initializes this class.
503 self.use_locking = use_locking
505 self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
506 namefield=self.SORT_FIELD)
507 self.requested_data = self.query.RequestedData()
508 self.names = self.query.RequestedNames()
510 # Sort only if no names were requested
511 self.sort_by_name = not self.names
513 self.do_locking = None
516 def _GetNames(self, lu, all_names, lock_level):
517 """Helper function to determine names asked for in the query.
521 names = lu.owned_locks(lock_level)
525 if self.wanted == locking.ALL_SET:
526 assert not self.names
527 # caller didn't specify names, so ordering is not important
528 return utils.NiceSort(names)
530 # caller specified names and we must keep the same order
532 assert not self.do_locking or lu.glm.is_owned(lock_level)
534 missing = set(self.wanted).difference(names)
536 raise errors.OpExecError("Some items were removed before retrieving"
537 " their data: %s" % missing)
539 # Return expanded names
542 def ExpandNames(self, lu):
543 """Expand names for this query.
545 See L{LogicalUnit.ExpandNames}.
548 raise NotImplementedError()
550 def DeclareLocks(self, lu, level):
551 """Declare locks for this query.
553 See L{LogicalUnit.DeclareLocks}.
556 raise NotImplementedError()
558 def _GetQueryData(self, lu):
559 """Collects all data for this query.
561 @return: Query data object
564 raise NotImplementedError()
566 def NewStyleQuery(self, lu):
567 """Collect data and execute query.
570 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
571 sort_by_name=self.sort_by_name)
573 def OldStyleQuery(self, lu):
574 """Collect data and execute query.
577 return self.query.OldStyleQuery(self._GetQueryData(lu),
578 sort_by_name=self.sort_by_name)
582 """Returns a dict declaring all lock levels shared.
585 return dict.fromkeys(locking.LEVELS, 1)
588 def _MakeLegacyNodeInfo(data):
589 """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
591 Converts the data into a single dictionary. This is fine for most use cases,
592 but some require information from more than one volume group or hypervisor.
595 (bootid, (vg_info, ), (hv_info, )) = data
597 return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
602 def _AnnotateDiskParams(instance, devs, cfg):
603 """Little helper wrapper to the rpc annotation method.
605 @param instance: The instance object
606 @type devs: List of L{objects.Disk}
607 @param devs: The root devices (not any of its children!)
608 @param cfg: The config object
609 @returns The annotated disk copies
610 @see L{rpc.AnnotateDiskParams}
613 return rpc.AnnotateDiskParams(instance.disk_template, devs,
614 cfg.GetInstanceDiskParams(instance))
617 def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
619 """Checks if node groups for locked instances are still correct.
621 @type cfg: L{config.ConfigWriter}
622 @param cfg: Cluster configuration
623 @type instances: dict; string as key, L{objects.Instance} as value
624 @param instances: Dictionary, instance name as key, instance object as value
625 @type owned_groups: iterable of string
626 @param owned_groups: List of owned groups
627 @type owned_nodes: iterable of string
628 @param owned_nodes: List of owned nodes
629 @type cur_group_uuid: string or None
630 @param cur_group_uuid: Optional group UUID to check against instance's groups
633 for (name, inst) in instances.items():
634 assert owned_nodes.issuperset(inst.all_nodes), \
635 "Instance %s's nodes changed while we kept the lock" % name
637 inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
639 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
640 "Instance %s has no node in group %s" % (name, cur_group_uuid)
643 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
644 """Checks if the owned node groups are still correct for an instance.
646 @type cfg: L{config.ConfigWriter}
647 @param cfg: The cluster configuration
648 @type instance_name: string
649 @param instance_name: Instance name
650 @type owned_groups: set or frozenset
651 @param owned_groups: List of currently owned node groups
654 inst_groups = cfg.GetInstanceNodeGroups(instance_name)
656 if not owned_groups.issuperset(inst_groups):
657 raise errors.OpPrereqError("Instance %s's node groups changed since"
658 " locks were acquired, current groups are"
659 " are '%s', owning groups '%s'; retry the"
662 utils.CommaJoin(inst_groups),
663 utils.CommaJoin(owned_groups)),
669 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
670 """Checks if the instances in a node group are still correct.
672 @type cfg: L{config.ConfigWriter}
673 @param cfg: The cluster configuration
674 @type group_uuid: string
675 @param group_uuid: Node group UUID
676 @type owned_instances: set or frozenset
677 @param owned_instances: List of currently owned instances
680 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
681 if owned_instances != wanted_instances:
682 raise errors.OpPrereqError("Instances in node group '%s' changed since"
683 " locks were acquired, wanted '%s', have '%s';"
684 " retry the operation" %
686 utils.CommaJoin(wanted_instances),
687 utils.CommaJoin(owned_instances)),
690 return wanted_instances
693 def _SupportsOob(cfg, node):
694 """Tells if node supports OOB.
696 @type cfg: L{config.ConfigWriter}
697 @param cfg: The cluster configuration
698 @type node: L{objects.Node}
699 @param node: The node
700 @return: The OOB script if supported or an empty string otherwise
703 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
706 def _GetWantedNodes(lu, nodes):
707 """Returns list of checked and expanded node names.
709 @type lu: L{LogicalUnit}
710 @param lu: the logical unit on whose behalf we execute
712 @param nodes: list of node names or None for all nodes
714 @return: the list of nodes, sorted
715 @raise errors.ProgrammerError: if the nodes parameter is wrong type
719 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
721 return utils.NiceSort(lu.cfg.GetNodeList())
724 def _GetWantedInstances(lu, instances):
725 """Returns list of checked and expanded instance names.
727 @type lu: L{LogicalUnit}
728 @param lu: the logical unit on whose behalf we execute
729 @type instances: list
730 @param instances: list of instance names or None for all instances
732 @return: the list of instances, sorted
733 @raise errors.OpPrereqError: if the instances parameter is wrong type
734 @raise errors.OpPrereqError: if any of the passed instances is not found
738 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
740 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
744 def _GetUpdatedParams(old_params, update_dict,
745 use_default=True, use_none=False):
746 """Return the new version of a parameter dictionary.
748 @type old_params: dict
749 @param old_params: old parameters
750 @type update_dict: dict
751 @param update_dict: dict containing new parameter values, or
752 constants.VALUE_DEFAULT to reset the parameter to its default
754 @param use_default: boolean
755 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
756 values as 'to be deleted' values
757 @param use_none: boolean
758 @type use_none: whether to recognise C{None} values as 'to be
761 @return: the new parameter dictionary
764 params_copy = copy.deepcopy(old_params)
765 for key, val in update_dict.iteritems():
766 if ((use_default and val == constants.VALUE_DEFAULT) or
767 (use_none and val is None)):
773 params_copy[key] = val
777 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
778 """Return the new version of a instance policy.
780 @param group_policy: whether this policy applies to a group and thus
781 we should support removal of policy entries
784 use_none = use_default = group_policy
785 ipolicy = copy.deepcopy(old_ipolicy)
786 for key, value in new_ipolicy.items():
787 if key not in constants.IPOLICY_ALL_KEYS:
788 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
790 if key in constants.IPOLICY_ISPECS:
791 utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
792 ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
794 use_default=use_default)
796 if (not value or value == [constants.VALUE_DEFAULT] or
797 value == constants.VALUE_DEFAULT):
801 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
802 " on the cluster'" % key,
805 if key in constants.IPOLICY_PARAMETERS:
806 # FIXME: we assume all such values are float
808 ipolicy[key] = float(value)
809 except (TypeError, ValueError), err:
810 raise errors.OpPrereqError("Invalid value for attribute"
811 " '%s': '%s', error: %s" %
812 (key, value, err), errors.ECODE_INVAL)
814 # FIXME: we assume all others are lists; this should be redone
816 ipolicy[key] = list(value)
818 objects.InstancePolicy.CheckParameterSyntax(ipolicy)
819 except errors.ConfigurationError, err:
820 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
825 def _UpdateAndVerifySubDict(base, updates, type_check):
826 """Updates and verifies a dict with sub dicts of the same type.
828 @param base: The dict with the old data
829 @param updates: The dict with the new data
830 @param type_check: Dict suitable to ForceDictType to verify correct types
831 @returns: A new dict with updated and verified values
835 new = _GetUpdatedParams(old, value)
836 utils.ForceDictType(new, type_check)
839 ret = copy.deepcopy(base)
840 ret.update(dict((key, fn(base.get(key, {}), value))
841 for key, value in updates.items()))
845 def _MergeAndVerifyHvState(op_input, obj_input):
846 """Combines the hv state from an opcode with the one of the object
848 @param op_input: The input dict from the opcode
849 @param obj_input: The input dict from the objects
850 @return: The verified and updated dict
854 invalid_hvs = set(op_input) - constants.HYPER_TYPES
856 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
857 " %s" % utils.CommaJoin(invalid_hvs),
859 if obj_input is None:
861 type_check = constants.HVSTS_PARAMETER_TYPES
862 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
867 def _MergeAndVerifyDiskState(op_input, obj_input):
868 """Combines the disk state from an opcode with the one of the object
870 @param op_input: The input dict from the opcode
871 @param obj_input: The input dict from the objects
872 @return: The verified and updated dict
875 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
877 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
878 utils.CommaJoin(invalid_dst),
880 type_check = constants.DSS_PARAMETER_TYPES
881 if obj_input is None:
883 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
885 for key, value in op_input.items())
890 def _ReleaseLocks(lu, level, names=None, keep=None):
891 """Releases locks owned by an LU.
893 @type lu: L{LogicalUnit}
894 @param level: Lock level
895 @type names: list or None
896 @param names: Names of locks to release
897 @type keep: list or None
898 @param keep: Names of locks to retain
901 assert not (keep is not None and names is not None), \
902 "Only one of the 'names' and the 'keep' parameters can be given"
904 if names is not None:
905 should_release = names.__contains__
907 should_release = lambda name: name not in keep
909 should_release = None
911 owned = lu.owned_locks(level)
913 # Not owning any lock at this level, do nothing
920 # Determine which locks to release
922 if should_release(name):
927 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
929 # Release just some locks
930 lu.glm.release(level, names=release)
932 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
935 lu.glm.release(level)
937 assert not lu.glm.is_owned(level), "No locks should be owned"
940 def _MapInstanceDisksToNodes(instances):
941 """Creates a map from (node, volume) to instance name.
943 @type instances: list of L{objects.Instance}
944 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
947 return dict(((node, vol), inst.name)
948 for inst in instances
949 for (node, vols) in inst.MapLVsByNode().items()
953 def _RunPostHook(lu, node_name):
954 """Runs the post-hook for an opcode on a single node.
957 hm = lu.proc.BuildHooksManager(lu)
959 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
961 # pylint: disable=W0702
962 lu.LogWarning("Errors occurred running hooks on %s" % node_name)
965 def _CheckOutputFields(static, dynamic, selected):
966 """Checks whether all selected fields are valid.
968 @type static: L{utils.FieldSet}
969 @param static: static fields set
970 @type dynamic: L{utils.FieldSet}
971 @param dynamic: dynamic fields set
978 delta = f.NonMatching(selected)
980 raise errors.OpPrereqError("Unknown output fields selected: %s"
981 % ",".join(delta), errors.ECODE_INVAL)
984 def _CheckGlobalHvParams(params):
985 """Validates that given hypervisor params are not global ones.
987 This will ensure that instances don't get customised versions of
991 used_globals = constants.HVC_GLOBALS.intersection(params)
993 msg = ("The following hypervisor parameters are global and cannot"
994 " be customized at instance level, please modify them at"
995 " cluster level: %s" % utils.CommaJoin(used_globals))
996 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
999 def _CheckNodeOnline(lu, node, msg=None):
1000 """Ensure that a given node is online.
1002 @param lu: the LU on behalf of which we make the check
1003 @param node: the node to check
1004 @param msg: if passed, should be a message to replace the default one
1005 @raise errors.OpPrereqError: if the node is offline
1009 msg = "Can't use offline node"
1010 if lu.cfg.GetNodeInfo(node).offline:
1011 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
1014 def _CheckNodeNotDrained(lu, node):
1015 """Ensure that a given node is not drained.
1017 @param lu: the LU on behalf of which we make the check
1018 @param node: the node to check
1019 @raise errors.OpPrereqError: if the node is drained
1022 if lu.cfg.GetNodeInfo(node).drained:
1023 raise errors.OpPrereqError("Can't use drained node %s" % node,
1027 def _CheckNodeVmCapable(lu, node):
1028 """Ensure that a given node is vm capable.
1030 @param lu: the LU on behalf of which we make the check
1031 @param node: the node to check
1032 @raise errors.OpPrereqError: if the node is not vm capable
1035 if not lu.cfg.GetNodeInfo(node).vm_capable:
1036 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
1040 def _CheckNodeHasOS(lu, node, os_name, force_variant):
1041 """Ensure that a node supports a given OS.
1043 @param lu: the LU on behalf of which we make the check
1044 @param node: the node to check
1045 @param os_name: the OS to query about
1046 @param force_variant: whether to ignore variant errors
1047 @raise errors.OpPrereqError: if the node is not supporting the OS
1050 result = lu.rpc.call_os_get(node, os_name)
1051 result.Raise("OS '%s' not in supported OS list for node %s" %
1053 prereq=True, ecode=errors.ECODE_INVAL)
1054 if not force_variant:
1055 _CheckOSVariant(result.payload, os_name)
1058 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
1059 """Ensure that a node has the given secondary ip.
1061 @type lu: L{LogicalUnit}
1062 @param lu: the LU on behalf of which we make the check
1064 @param node: the node to check
1065 @type secondary_ip: string
1066 @param secondary_ip: the ip to check
1067 @type prereq: boolean
1068 @param prereq: whether to throw a prerequisite or an execute error
1069 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
1070 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
1073 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
1074 result.Raise("Failure checking secondary ip on node %s" % node,
1075 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1076 if not result.payload:
1077 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
1078 " please fix and re-run this command" % secondary_ip)
1080 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
1082 raise errors.OpExecError(msg)
1085 def _GetClusterDomainSecret():
1086 """Reads the cluster domain secret.
1089 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
1093 def _CheckInstanceState(lu, instance, req_states, msg=None):
1094 """Ensure that an instance is in one of the required states.
1096 @param lu: the LU on behalf of which we make the check
1097 @param instance: the instance to check
1098 @param msg: if passed, should be a message to replace the default one
1099 @raise errors.OpPrereqError: if the instance is not in the required state
1103 msg = "can't use instance from outside %s states" % ", ".join(req_states)
1104 if instance.admin_state not in req_states:
1105 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1106 (instance.name, instance.admin_state, msg),
1109 if constants.ADMINST_UP not in req_states:
1110 pnode = instance.primary_node
1111 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
1112 ins_l.Raise("Can't contact node %s for instance information" % pnode,
1113 prereq=True, ecode=errors.ECODE_ENVIRON)
1115 if instance.name in ins_l.payload:
1116 raise errors.OpPrereqError("Instance %s is running, %s" %
1117 (instance.name, msg), errors.ECODE_STATE)
1120 def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
1121 """Computes if value is in the desired range.
1123 @param name: name of the parameter for which we perform the check
1124 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
1126 @param ipolicy: dictionary containing min, max and std values
1127 @param value: actual value that we want to use
1128 @return: None or element not meeting the criteria
1132 if value in [None, constants.VALUE_AUTO]:
1134 max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
1135 min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
1136 if value > max_v or min_v > value:
1138 fqn = "%s/%s" % (name, qualifier)
1141 return ("%s value %s is not in range [%s, %s]" %
1142 (fqn, value, min_v, max_v))
1146 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
1147 nic_count, disk_sizes, spindle_use,
1148 _compute_fn=_ComputeMinMaxSpec):
1149 """Verifies ipolicy against provided specs.
1152 @param ipolicy: The ipolicy
1154 @param mem_size: The memory size
1155 @type cpu_count: int
1156 @param cpu_count: Used cpu cores
1157 @type disk_count: int
1158 @param disk_count: Number of disks used
1159 @type nic_count: int
1160 @param nic_count: Number of nics used
1161 @type disk_sizes: list of ints
1162 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
1163 @type spindle_use: int
1164 @param spindle_use: The number of spindles this instance uses
1165 @param _compute_fn: The compute function (unittest only)
1166 @return: A list of violations, or an empty list of no violations are found
1169 assert disk_count == len(disk_sizes)
1172 (constants.ISPEC_MEM_SIZE, "", mem_size),
1173 (constants.ISPEC_CPU_COUNT, "", cpu_count),
1174 (constants.ISPEC_DISK_COUNT, "", disk_count),
1175 (constants.ISPEC_NIC_COUNT, "", nic_count),
1176 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
1177 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
1178 for idx, d in enumerate(disk_sizes)]
1181 (_compute_fn(name, qualifier, ipolicy, value)
1182 for (name, qualifier, value) in test_settings))
1185 def _ComputeIPolicyInstanceViolation(ipolicy, instance,
1186 _compute_fn=_ComputeIPolicySpecViolation):
1187 """Compute if instance meets the specs of ipolicy.
1190 @param ipolicy: The ipolicy to verify against
1191 @type instance: L{objects.Instance}
1192 @param instance: The instance to verify
1193 @param _compute_fn: The function to verify ipolicy (unittest only)
1194 @see: L{_ComputeIPolicySpecViolation}
1197 mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
1198 cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
1199 spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
1200 disk_count = len(instance.disks)
1201 disk_sizes = [disk.size for disk in instance.disks]
1202 nic_count = len(instance.nics)
1204 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1205 disk_sizes, spindle_use)
1208 def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
1209 _compute_fn=_ComputeIPolicySpecViolation):
1210 """Compute if instance specs meets the specs of ipolicy.
1213 @param ipolicy: The ipolicy to verify against
1214 @param instance_spec: dict
1215 @param instance_spec: The instance spec to verify
1216 @param _compute_fn: The function to verify ipolicy (unittest only)
1217 @see: L{_ComputeIPolicySpecViolation}
1220 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
1221 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
1222 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
1223 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
1224 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
1225 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
1227 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1228 disk_sizes, spindle_use)
1231 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
1233 _compute_fn=_ComputeIPolicyInstanceViolation):
1234 """Compute if instance meets the specs of the new target group.
1236 @param ipolicy: The ipolicy to verify
1237 @param instance: The instance object to verify
1238 @param current_group: The current group of the instance
1239 @param target_group: The new group of the instance
1240 @param _compute_fn: The function to verify ipolicy (unittest only)
1241 @see: L{_ComputeIPolicySpecViolation}
1244 if current_group == target_group:
1247 return _compute_fn(ipolicy, instance)
1250 def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
1251 _compute_fn=_ComputeIPolicyNodeViolation):
1252 """Checks that the target node is correct in terms of instance policy.
1254 @param ipolicy: The ipolicy to verify
1255 @param instance: The instance object to verify
1256 @param node: The new node to relocate
1257 @param ignore: Ignore violations of the ipolicy
1258 @param _compute_fn: The function to verify ipolicy (unittest only)
1259 @see: L{_ComputeIPolicySpecViolation}
1262 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
1263 res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
1266 msg = ("Instance does not meet target node group's (%s) instance"
1267 " policy: %s") % (node.group, utils.CommaJoin(res))
1271 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1274 def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
1275 """Computes a set of any instances that would violate the new ipolicy.
1277 @param old_ipolicy: The current (still in-place) ipolicy
1278 @param new_ipolicy: The new (to become) ipolicy
1279 @param instances: List of instances to verify
1280 @return: A list of instances which violates the new ipolicy but
1284 return (_ComputeViolatingInstances(new_ipolicy, instances) -
1285 _ComputeViolatingInstances(old_ipolicy, instances))
1288 def _ExpandItemName(fn, name, kind):
1289 """Expand an item name.
1291 @param fn: the function to use for expansion
1292 @param name: requested item name
1293 @param kind: text description ('Node' or 'Instance')
1294 @return: the resolved (full) name
1295 @raise errors.OpPrereqError: if the item is not found
1298 full_name = fn(name)
1299 if full_name is None:
1300 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
1305 def _ExpandNodeName(cfg, name):
1306 """Wrapper over L{_ExpandItemName} for nodes."""
1307 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
1310 def _ExpandInstanceName(cfg, name):
1311 """Wrapper over L{_ExpandItemName} for instance."""
1312 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
1315 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
1316 minmem, maxmem, vcpus, nics, disk_template, disks,
1317 bep, hvp, hypervisor_name, tags):
1318 """Builds instance related env variables for hooks
1320 This builds the hook environment from individual variables.
1323 @param name: the name of the instance
1324 @type primary_node: string
1325 @param primary_node: the name of the instance's primary node
1326 @type secondary_nodes: list
1327 @param secondary_nodes: list of secondary nodes as strings
1328 @type os_type: string
1329 @param os_type: the name of the instance's OS
1330 @type status: string
1331 @param status: the desired status of the instance
1332 @type minmem: string
1333 @param minmem: the minimum memory size of the instance
1334 @type maxmem: string
1335 @param maxmem: the maximum memory size of the instance
1337 @param vcpus: the count of VCPUs the instance has
1339 @param nics: list of tuples (ip, mac, mode, link) representing
1340 the NICs the instance has
1341 @type disk_template: string
1342 @param disk_template: the disk template of the instance
1344 @param disks: the list of (size, mode) pairs
1346 @param bep: the backend parameters for the instance
1348 @param hvp: the hypervisor parameters for the instance
1349 @type hypervisor_name: string
1350 @param hypervisor_name: the hypervisor for the instance
1352 @param tags: list of instance tags as strings
1354 @return: the hook environment for this instance
1359 "INSTANCE_NAME": name,
1360 "INSTANCE_PRIMARY": primary_node,
1361 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
1362 "INSTANCE_OS_TYPE": os_type,
1363 "INSTANCE_STATUS": status,
1364 "INSTANCE_MINMEM": minmem,
1365 "INSTANCE_MAXMEM": maxmem,
1366 # TODO(2.7) remove deprecated "memory" value
1367 "INSTANCE_MEMORY": maxmem,
1368 "INSTANCE_VCPUS": vcpus,
1369 "INSTANCE_DISK_TEMPLATE": disk_template,
1370 "INSTANCE_HYPERVISOR": hypervisor_name,
1373 nic_count = len(nics)
1374 for idx, (ip, mac, mode, link) in enumerate(nics):
1377 env["INSTANCE_NIC%d_IP" % idx] = ip
1378 env["INSTANCE_NIC%d_MAC" % idx] = mac
1379 env["INSTANCE_NIC%d_MODE" % idx] = mode
1380 env["INSTANCE_NIC%d_LINK" % idx] = link
1381 if mode == constants.NIC_MODE_BRIDGED:
1382 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1386 env["INSTANCE_NIC_COUNT"] = nic_count
1389 disk_count = len(disks)
1390 for idx, (size, mode) in enumerate(disks):
1391 env["INSTANCE_DISK%d_SIZE" % idx] = size
1392 env["INSTANCE_DISK%d_MODE" % idx] = mode
1396 env["INSTANCE_DISK_COUNT"] = disk_count
1401 env["INSTANCE_TAGS"] = " ".join(tags)
1403 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1404 for key, value in source.items():
1405 env["INSTANCE_%s_%s" % (kind, key)] = value
1410 def _NICListToTuple(lu, nics):
1411 """Build a list of nic information tuples.
1413 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1414 value in LUInstanceQueryData.
1416 @type lu: L{LogicalUnit}
1417 @param lu: the logical unit on whose behalf we execute
1418 @type nics: list of L{objects.NIC}
1419 @param nics: list of nics to convert to hooks tuples
1423 cluster = lu.cfg.GetClusterInfo()
1427 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1428 mode = filled_params[constants.NIC_MODE]
1429 link = filled_params[constants.NIC_LINK]
1430 hooks_nics.append((ip, mac, mode, link))
1434 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1435 """Builds instance related env variables for hooks from an object.
1437 @type lu: L{LogicalUnit}
1438 @param lu: the logical unit on whose behalf we execute
1439 @type instance: L{objects.Instance}
1440 @param instance: the instance for which we should build the
1442 @type override: dict
1443 @param override: dictionary with key/values that will override
1446 @return: the hook environment dictionary
1449 cluster = lu.cfg.GetClusterInfo()
1450 bep = cluster.FillBE(instance)
1451 hvp = cluster.FillHV(instance)
1453 "name": instance.name,
1454 "primary_node": instance.primary_node,
1455 "secondary_nodes": instance.secondary_nodes,
1456 "os_type": instance.os,
1457 "status": instance.admin_state,
1458 "maxmem": bep[constants.BE_MAXMEM],
1459 "minmem": bep[constants.BE_MINMEM],
1460 "vcpus": bep[constants.BE_VCPUS],
1461 "nics": _NICListToTuple(lu, instance.nics),
1462 "disk_template": instance.disk_template,
1463 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1466 "hypervisor_name": instance.hypervisor,
1467 "tags": instance.tags,
1470 args.update(override)
1471 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1474 def _AdjustCandidatePool(lu, exceptions):
1475 """Adjust the candidate pool after node operations.
1478 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1480 lu.LogInfo("Promoted nodes to master candidate role: %s",
1481 utils.CommaJoin(node.name for node in mod_list))
1482 for name in mod_list:
1483 lu.context.ReaddNode(name)
1484 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1486 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1490 def _DecideSelfPromotion(lu, exceptions=None):
1491 """Decide whether I should promote myself as a master candidate.
1494 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1495 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1496 # the new node will increase mc_max with one, so:
1497 mc_should = min(mc_should + 1, cp_size)
1498 return mc_now < mc_should
1501 def _CalculateGroupIPolicy(cluster, group):
1502 """Calculate instance policy for group.
1505 return cluster.SimpleFillIPolicy(group.ipolicy)
1508 def _ComputeViolatingInstances(ipolicy, instances):
1509 """Computes a set of instances who violates given ipolicy.
1511 @param ipolicy: The ipolicy to verify
1512 @type instances: object.Instance
1513 @param instances: List of instances to verify
1514 @return: A frozenset of instance names violating the ipolicy
1517 return frozenset([inst.name for inst in instances
1518 if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
1521 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1522 """Check that the brigdes needed by a list of nics exist.
1525 cluster = lu.cfg.GetClusterInfo()
1526 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1527 brlist = [params[constants.NIC_LINK] for params in paramslist
1528 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1530 result = lu.rpc.call_bridges_exist(target_node, brlist)
1531 result.Raise("Error checking bridges on destination node '%s'" %
1532 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1535 def _CheckInstanceBridgesExist(lu, instance, node=None):
1536 """Check that the brigdes needed by an instance exist.
1540 node = instance.primary_node
1541 _CheckNicsBridgesExist(lu, instance.nics, node)
1544 def _CheckOSVariant(os_obj, name):
1545 """Check whether an OS name conforms to the os variants specification.
1547 @type os_obj: L{objects.OS}
1548 @param os_obj: OS object to check
1550 @param name: OS name passed by the user, to check for validity
1553 variant = objects.OS.GetVariant(name)
1554 if not os_obj.supported_variants:
1556 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1557 " passed)" % (os_obj.name, variant),
1561 raise errors.OpPrereqError("OS name must include a variant",
1564 if variant not in os_obj.supported_variants:
1565 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1568 def _GetNodeInstancesInner(cfg, fn):
1569 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1572 def _GetNodeInstances(cfg, node_name):
1573 """Returns a list of all primary and secondary instances on a node.
1577 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1580 def _GetNodePrimaryInstances(cfg, node_name):
1581 """Returns primary instances on a node.
1584 return _GetNodeInstancesInner(cfg,
1585 lambda inst: node_name == inst.primary_node)
1588 def _GetNodeSecondaryInstances(cfg, node_name):
1589 """Returns secondary instances on a node.
1592 return _GetNodeInstancesInner(cfg,
1593 lambda inst: node_name in inst.secondary_nodes)
1596 def _GetStorageTypeArgs(cfg, storage_type):
1597 """Returns the arguments for a storage type.
1600 # Special case for file storage
1601 if storage_type == constants.ST_FILE:
1602 # storage.FileStorage wants a list of storage directories
1603 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1608 def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
1611 for dev in instance.disks:
1612 cfg.SetDiskID(dev, node_name)
1614 result = rpc_runner.call_blockdev_getmirrorstatus(node_name, instance.disks)
1615 result.Raise("Failed to get disk status from node %s" % node_name,
1616 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1618 for idx, bdev_status in enumerate(result.payload):
1619 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1625 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1626 """Check the sanity of iallocator and node arguments and use the
1627 cluster-wide iallocator if appropriate.
1629 Check that at most one of (iallocator, node) is specified. If none is
1630 specified, then the LU's opcode's iallocator slot is filled with the
1631 cluster-wide default iallocator.
1633 @type iallocator_slot: string
1634 @param iallocator_slot: the name of the opcode iallocator slot
1635 @type node_slot: string
1636 @param node_slot: the name of the opcode target node slot
1639 node = getattr(lu.op, node_slot, None)
1640 iallocator = getattr(lu.op, iallocator_slot, None)
1642 if node is not None and iallocator is not None:
1643 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1645 elif node is None and iallocator is None:
1646 default_iallocator = lu.cfg.GetDefaultIAllocator()
1647 if default_iallocator:
1648 setattr(lu.op, iallocator_slot, default_iallocator)
1650 raise errors.OpPrereqError("No iallocator or node given and no"
1651 " cluster-wide default iallocator found;"
1652 " please specify either an iallocator or a"
1653 " node, or set a cluster-wide default"
1657 def _GetDefaultIAllocator(cfg, iallocator):
1658 """Decides on which iallocator to use.
1660 @type cfg: L{config.ConfigWriter}
1661 @param cfg: Cluster configuration object
1662 @type iallocator: string or None
1663 @param iallocator: Iallocator specified in opcode
1665 @return: Iallocator name
1669 # Use default iallocator
1670 iallocator = cfg.GetDefaultIAllocator()
1673 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1674 " opcode nor as a cluster-wide default",
1680 class LUClusterPostInit(LogicalUnit):
1681 """Logical unit for running hooks after cluster initialization.
1684 HPATH = "cluster-init"
1685 HTYPE = constants.HTYPE_CLUSTER
1687 def BuildHooksEnv(self):
1692 "OP_TARGET": self.cfg.GetClusterName(),
1695 def BuildHooksNodes(self):
1696 """Build hooks nodes.
1699 return ([], [self.cfg.GetMasterNode()])
1701 def Exec(self, feedback_fn):
1708 class LUClusterDestroy(LogicalUnit):
1709 """Logical unit for destroying the cluster.
1712 HPATH = "cluster-destroy"
1713 HTYPE = constants.HTYPE_CLUSTER
1715 def BuildHooksEnv(self):
1720 "OP_TARGET": self.cfg.GetClusterName(),
1723 def BuildHooksNodes(self):
1724 """Build hooks nodes.
1729 def CheckPrereq(self):
1730 """Check prerequisites.
1732 This checks whether the cluster is empty.
1734 Any errors are signaled by raising errors.OpPrereqError.
1737 master = self.cfg.GetMasterNode()
1739 nodelist = self.cfg.GetNodeList()
1740 if len(nodelist) != 1 or nodelist[0] != master:
1741 raise errors.OpPrereqError("There are still %d node(s) in"
1742 " this cluster." % (len(nodelist) - 1),
1744 instancelist = self.cfg.GetInstanceList()
1746 raise errors.OpPrereqError("There are still %d instance(s) in"
1747 " this cluster." % len(instancelist),
1750 def Exec(self, feedback_fn):
1751 """Destroys the cluster.
1754 master_params = self.cfg.GetMasterNetworkParameters()
1756 # Run post hooks on master node before it's removed
1757 _RunPostHook(self, master_params.name)
1759 ems = self.cfg.GetUseExternalMipScript()
1760 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
1763 self.LogWarning("Error disabling the master IP address: %s",
1766 return master_params.name
1769 def _VerifyCertificate(filename):
1770 """Verifies a certificate for L{LUClusterVerifyConfig}.
1772 @type filename: string
1773 @param filename: Path to PEM file
1777 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1778 utils.ReadFile(filename))
1779 except Exception, err: # pylint: disable=W0703
1780 return (LUClusterVerifyConfig.ETYPE_ERROR,
1781 "Failed to load X509 certificate %s: %s" % (filename, err))
1784 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1785 constants.SSL_CERT_EXPIRATION_ERROR)
1788 fnamemsg = "While verifying %s: %s" % (filename, msg)
1793 return (None, fnamemsg)
1794 elif errcode == utils.CERT_WARNING:
1795 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1796 elif errcode == utils.CERT_ERROR:
1797 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1799 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1802 def _GetAllHypervisorParameters(cluster, instances):
1803 """Compute the set of all hypervisor parameters.
1805 @type cluster: L{objects.Cluster}
1806 @param cluster: the cluster object
1807 @param instances: list of L{objects.Instance}
1808 @param instances: additional instances from which to obtain parameters
1809 @rtype: list of (origin, hypervisor, parameters)
1810 @return: a list with all parameters found, indicating the hypervisor they
1811 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1816 for hv_name in cluster.enabled_hypervisors:
1817 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1819 for os_name, os_hvp in cluster.os_hvp.items():
1820 for hv_name, hv_params in os_hvp.items():
1822 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1823 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1825 # TODO: collapse identical parameter values in a single one
1826 for instance in instances:
1827 if instance.hvparams:
1828 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1829 cluster.FillHV(instance)))
1834 class _VerifyErrors(object):
1835 """Mix-in for cluster/group verify LUs.
1837 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1838 self.op and self._feedback_fn to be available.)
1842 ETYPE_FIELD = "code"
1843 ETYPE_ERROR = "ERROR"
1844 ETYPE_WARNING = "WARNING"
1846 def _Error(self, ecode, item, msg, *args, **kwargs):
1847 """Format an error message.
1849 Based on the opcode's error_codes parameter, either format a
1850 parseable error code, or a simpler error string.
1852 This must be called only from Exec and functions called from Exec.
1855 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1856 itype, etxt, _ = ecode
1857 # first complete the msg
1860 # then format the whole message
1861 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1862 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1868 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1869 # and finally report it via the feedback_fn
1870 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1872 def _ErrorIf(self, cond, ecode, *args, **kwargs):
1873 """Log an error message if the passed condition is True.
1877 or self.op.debug_simulate_errors) # pylint: disable=E1101
1879 # If the error code is in the list of ignored errors, demote the error to a
1881 (_, etxt, _) = ecode
1882 if etxt in self.op.ignore_errors: # pylint: disable=E1101
1883 kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
1886 self._Error(ecode, *args, **kwargs)
1888 # do not mark the operation as failed for WARN cases only
1889 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1890 self.bad = self.bad or cond
1893 class LUClusterVerify(NoHooksLU):
1894 """Submits all jobs necessary to verify the cluster.
1899 def ExpandNames(self):
1900 self.needed_locks = {}
1902 def Exec(self, feedback_fn):
1905 if self.op.group_name:
1906 groups = [self.op.group_name]
1907 depends_fn = lambda: None
1909 groups = self.cfg.GetNodeGroupList()
1911 # Verify global configuration
1913 opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
1916 # Always depend on global verification
1917 depends_fn = lambda: [(-len(jobs), [])]
1919 jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1920 ignore_errors=self.op.ignore_errors,
1921 depends=depends_fn())]
1922 for group in groups)
1924 # Fix up all parameters
1925 for op in itertools.chain(*jobs): # pylint: disable=W0142
1926 op.debug_simulate_errors = self.op.debug_simulate_errors
1927 op.verbose = self.op.verbose
1928 op.error_codes = self.op.error_codes
1930 op.skip_checks = self.op.skip_checks
1931 except AttributeError:
1932 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1934 return ResultWithJobs(jobs)
1937 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1938 """Verifies the cluster config.
1943 def _VerifyHVP(self, hvp_data):
1944 """Verifies locally the syntax of the hypervisor parameters.
1947 for item, hv_name, hv_params in hvp_data:
1948 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1951 hv_class = hypervisor.GetHypervisor(hv_name)
1952 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1953 hv_class.CheckParameterSyntax(hv_params)
1954 except errors.GenericError, err:
1955 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
1957 def ExpandNames(self):
1958 self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
1959 self.share_locks = _ShareAll()
1961 def CheckPrereq(self):
1962 """Check prerequisites.
1965 # Retrieve all information
1966 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1967 self.all_node_info = self.cfg.GetAllNodesInfo()
1968 self.all_inst_info = self.cfg.GetAllInstancesInfo()
1970 def Exec(self, feedback_fn):
1971 """Verify integrity of cluster, performing various test on nodes.
1975 self._feedback_fn = feedback_fn
1977 feedback_fn("* Verifying cluster config")
1979 for msg in self.cfg.VerifyConfig():
1980 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
1982 feedback_fn("* Verifying cluster certificate files")
1984 for cert_filename in constants.ALL_CERT_FILES:
1985 (errcode, msg) = _VerifyCertificate(cert_filename)
1986 self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
1988 feedback_fn("* Verifying hypervisor parameters")
1990 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1991 self.all_inst_info.values()))
1993 feedback_fn("* Verifying all nodes belong to an existing group")
1995 # We do this verification here because, should this bogus circumstance
1996 # occur, it would never be caught by VerifyGroup, which only acts on
1997 # nodes/instances reachable from existing node groups.
1999 dangling_nodes = set(node.name for node in self.all_node_info.values()
2000 if node.group not in self.all_group_info)
2002 dangling_instances = {}
2003 no_node_instances = []
2005 for inst in self.all_inst_info.values():
2006 if inst.primary_node in dangling_nodes:
2007 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
2008 elif inst.primary_node not in self.all_node_info:
2009 no_node_instances.append(inst.name)
2014 utils.CommaJoin(dangling_instances.get(node.name,
2016 for node in dangling_nodes]
2018 self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
2020 "the following nodes (and their instances) belong to a non"
2021 " existing group: %s", utils.CommaJoin(pretty_dangling))
2023 self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
2025 "the following instances have a non-existing primary-node:"
2026 " %s", utils.CommaJoin(no_node_instances))
2031 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
2032 """Verifies the status of a node group.
2035 HPATH = "cluster-verify"
2036 HTYPE = constants.HTYPE_CLUSTER
2039 _HOOKS_INDENT_RE = re.compile("^", re.M)
2041 class NodeImage(object):
2042 """A class representing the logical and physical status of a node.
2045 @ivar name: the node name to which this object refers
2046 @ivar volumes: a structure as returned from
2047 L{ganeti.backend.GetVolumeList} (runtime)
2048 @ivar instances: a list of running instances (runtime)
2049 @ivar pinst: list of configured primary instances (config)
2050 @ivar sinst: list of configured secondary instances (config)
2051 @ivar sbp: dictionary of {primary-node: list of instances} for all
2052 instances for which this node is secondary (config)
2053 @ivar mfree: free memory, as reported by hypervisor (runtime)
2054 @ivar dfree: free disk, as reported by the node (runtime)
2055 @ivar offline: the offline status (config)
2056 @type rpc_fail: boolean
2057 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
2058 not whether the individual keys were correct) (runtime)
2059 @type lvm_fail: boolean
2060 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
2061 @type hyp_fail: boolean
2062 @ivar hyp_fail: whether the RPC call didn't return the instance list
2063 @type ghost: boolean
2064 @ivar ghost: whether this is a known node or not (config)
2065 @type os_fail: boolean
2066 @ivar os_fail: whether the RPC call didn't return valid OS data
2068 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
2069 @type vm_capable: boolean
2070 @ivar vm_capable: whether the node can host instances
2073 def __init__(self, offline=False, name=None, vm_capable=True):
2082 self.offline = offline
2083 self.vm_capable = vm_capable
2084 self.rpc_fail = False
2085 self.lvm_fail = False
2086 self.hyp_fail = False
2088 self.os_fail = False
2091 def ExpandNames(self):
2092 # This raises errors.OpPrereqError on its own:
2093 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2095 # Get instances in node group; this is unsafe and needs verification later
2097 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2099 self.needed_locks = {
2100 locking.LEVEL_INSTANCE: inst_names,
2101 locking.LEVEL_NODEGROUP: [self.group_uuid],
2102 locking.LEVEL_NODE: [],
2105 self.share_locks = _ShareAll()
2107 def DeclareLocks(self, level):
2108 if level == locking.LEVEL_NODE:
2109 # Get members of node group; this is unsafe and needs verification later
2110 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
2112 all_inst_info = self.cfg.GetAllInstancesInfo()
2114 # In Exec(), we warn about mirrored instances that have primary and
2115 # secondary living in separate node groups. To fully verify that
2116 # volumes for these instances are healthy, we will need to do an
2117 # extra call to their secondaries. We ensure here those nodes will
2119 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
2120 # Important: access only the instances whose lock is owned
2121 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
2122 nodes.update(all_inst_info[inst].secondary_nodes)
2124 self.needed_locks[locking.LEVEL_NODE] = nodes
2126 def CheckPrereq(self):
2127 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
2128 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
2130 group_nodes = set(self.group_info.members)
2132 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2135 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2137 unlocked_instances = \
2138 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
2141 raise errors.OpPrereqError("Missing lock for nodes: %s" %
2142 utils.CommaJoin(unlocked_nodes),
2145 if unlocked_instances:
2146 raise errors.OpPrereqError("Missing lock for instances: %s" %
2147 utils.CommaJoin(unlocked_instances),
2150 self.all_node_info = self.cfg.GetAllNodesInfo()
2151 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2153 self.my_node_names = utils.NiceSort(group_nodes)
2154 self.my_inst_names = utils.NiceSort(group_instances)
2156 self.my_node_info = dict((name, self.all_node_info[name])
2157 for name in self.my_node_names)
2159 self.my_inst_info = dict((name, self.all_inst_info[name])
2160 for name in self.my_inst_names)
2162 # We detect here the nodes that will need the extra RPC calls for verifying
2163 # split LV volumes; they should be locked.
2164 extra_lv_nodes = set()
2166 for inst in self.my_inst_info.values():
2167 if inst.disk_template in constants.DTS_INT_MIRROR:
2168 for nname in inst.all_nodes:
2169 if self.all_node_info[nname].group != self.group_uuid:
2170 extra_lv_nodes.add(nname)
2172 unlocked_lv_nodes = \
2173 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2175 if unlocked_lv_nodes:
2176 raise errors.OpPrereqError("Missing node locks for LV check: %s" %
2177 utils.CommaJoin(unlocked_lv_nodes),
2179 self.extra_lv_nodes = list(extra_lv_nodes)
2181 def _VerifyNode(self, ninfo, nresult):
2182 """Perform some basic validation on data returned from a node.
2184 - check the result data structure is well formed and has all the
2186 - check ganeti version
2188 @type ninfo: L{objects.Node}
2189 @param ninfo: the node to check
2190 @param nresult: the results from the node
2192 @return: whether overall this call was successful (and we can expect
2193 reasonable values in the respose)
2197 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2199 # main result, nresult should be a non-empty dict
2200 test = not nresult or not isinstance(nresult, dict)
2201 _ErrorIf(test, constants.CV_ENODERPC, node,
2202 "unable to verify node: no data returned")
2206 # compares ganeti version
2207 local_version = constants.PROTOCOL_VERSION
2208 remote_version = nresult.get("version", None)
2209 test = not (remote_version and
2210 isinstance(remote_version, (list, tuple)) and
2211 len(remote_version) == 2)
2212 _ErrorIf(test, constants.CV_ENODERPC, node,
2213 "connection to node returned invalid data")
2217 test = local_version != remote_version[0]
2218 _ErrorIf(test, constants.CV_ENODEVERSION, node,
2219 "incompatible protocol versions: master %s,"
2220 " node %s", local_version, remote_version[0])
2224 # node seems compatible, we can actually try to look into its results
2226 # full package version
2227 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
2228 constants.CV_ENODEVERSION, node,
2229 "software version mismatch: master %s, node %s",
2230 constants.RELEASE_VERSION, remote_version[1],
2231 code=self.ETYPE_WARNING)
2233 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
2234 if ninfo.vm_capable and isinstance(hyp_result, dict):
2235 for hv_name, hv_result in hyp_result.iteritems():
2236 test = hv_result is not None
2237 _ErrorIf(test, constants.CV_ENODEHV, node,
2238 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
2240 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
2241 if ninfo.vm_capable and isinstance(hvp_result, list):
2242 for item, hv_name, hv_result in hvp_result:
2243 _ErrorIf(True, constants.CV_ENODEHV, node,
2244 "hypervisor %s parameter verify failure (source %s): %s",
2245 hv_name, item, hv_result)
2247 test = nresult.get(constants.NV_NODESETUP,
2248 ["Missing NODESETUP results"])
2249 _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
2254 def _VerifyNodeTime(self, ninfo, nresult,
2255 nvinfo_starttime, nvinfo_endtime):
2256 """Check the node time.
2258 @type ninfo: L{objects.Node}
2259 @param ninfo: the node to check
2260 @param nresult: the remote results for the node
2261 @param nvinfo_starttime: the start time of the RPC call
2262 @param nvinfo_endtime: the end time of the RPC call
2266 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2268 ntime = nresult.get(constants.NV_TIME, None)
2270 ntime_merged = utils.MergeTime(ntime)
2271 except (ValueError, TypeError):
2272 _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
2275 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
2276 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
2277 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
2278 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
2282 _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
2283 "Node time diverges by at least %s from master node time",
2286 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
2287 """Check the node LVM results.
2289 @type ninfo: L{objects.Node}
2290 @param ninfo: the node to check
2291 @param nresult: the remote results for the node
2292 @param vg_name: the configured VG name
2299 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2301 # checks vg existence and size > 20G
2302 vglist = nresult.get(constants.NV_VGLIST, None)
2304 _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
2306 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
2307 constants.MIN_VG_SIZE)
2308 _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
2311 pvlist = nresult.get(constants.NV_PVLIST, None)
2312 test = pvlist is None
2313 _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
2315 # check that ':' is not present in PV names, since it's a
2316 # special character for lvcreate (denotes the range of PEs to
2318 for _, pvname, owner_vg in pvlist:
2319 test = ":" in pvname
2320 _ErrorIf(test, constants.CV_ENODELVM, node,
2321 "Invalid character ':' in PV '%s' of VG '%s'",
2324 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
2325 """Check the node bridges.
2327 @type ninfo: L{objects.Node}
2328 @param ninfo: the node to check
2329 @param nresult: the remote results for the node
2330 @param bridges: the expected list of bridges
2337 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2339 missing = nresult.get(constants.NV_BRIDGES, None)
2340 test = not isinstance(missing, list)
2341 _ErrorIf(test, constants.CV_ENODENET, node,
2342 "did not return valid bridge information")
2344 _ErrorIf(bool(missing), constants.CV_ENODENET, node,
2345 "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
2347 def _VerifyNodeUserScripts(self, ninfo, nresult):
2348 """Check the results of user scripts presence and executability on the node
2350 @type ninfo: L{objects.Node}
2351 @param ninfo: the node to check
2352 @param nresult: the remote results for the node
2357 test = not constants.NV_USERSCRIPTS in nresult
2358 self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
2359 "did not return user scripts information")
2361 broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
2363 self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
2364 "user scripts not present or not executable: %s" %
2365 utils.CommaJoin(sorted(broken_scripts)))
2367 def _VerifyNodeNetwork(self, ninfo, nresult):
2368 """Check the node network connectivity results.
2370 @type ninfo: L{objects.Node}
2371 @param ninfo: the node to check
2372 @param nresult: the remote results for the node
2376 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2378 test = constants.NV_NODELIST not in nresult
2379 _ErrorIf(test, constants.CV_ENODESSH, node,
2380 "node hasn't returned node ssh connectivity data")
2382 if nresult[constants.NV_NODELIST]:
2383 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
2384 _ErrorIf(True, constants.CV_ENODESSH, node,
2385 "ssh communication with node '%s': %s", a_node, a_msg)
2387 test = constants.NV_NODENETTEST not in nresult
2388 _ErrorIf(test, constants.CV_ENODENET, node,
2389 "node hasn't returned node tcp connectivity data")
2391 if nresult[constants.NV_NODENETTEST]:
2392 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
2394 _ErrorIf(True, constants.CV_ENODENET, node,
2395 "tcp communication with node '%s': %s",
2396 anode, nresult[constants.NV_NODENETTEST][anode])
2398 test = constants.NV_MASTERIP not in nresult
2399 _ErrorIf(test, constants.CV_ENODENET, node,
2400 "node hasn't returned node master IP reachability data")
2402 if not nresult[constants.NV_MASTERIP]:
2403 if node == self.master_node:
2404 msg = "the master node cannot reach the master IP (not configured?)"
2406 msg = "cannot reach the master IP"
2407 _ErrorIf(True, constants.CV_ENODENET, node, msg)
2409 def _VerifyInstance(self, instance, instanceconfig, node_image,
2411 """Verify an instance.
2413 This function checks to see if the required block devices are
2414 available on the instance's node.
2417 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2418 node_current = instanceconfig.primary_node
2420 node_vol_should = {}
2421 instanceconfig.MapLVsByNode(node_vol_should)
2423 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
2424 err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
2425 _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
2427 for node in node_vol_should:
2428 n_img = node_image[node]
2429 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2430 # ignore missing volumes on offline or broken nodes
2432 for volume in node_vol_should[node]:
2433 test = volume not in n_img.volumes
2434 _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2435 "volume %s missing on node %s", volume, node)
2437 if instanceconfig.admin_state == constants.ADMINST_UP:
2438 pri_img = node_image[node_current]
2439 test = instance not in pri_img.instances and not pri_img.offline
2440 _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
2441 "instance not running on its primary node %s",
2444 diskdata = [(nname, success, status, idx)
2445 for (nname, disks) in diskstatus.items()
2446 for idx, (success, status) in enumerate(disks)]
2448 for nname, success, bdev_status, idx in diskdata:
2449 # the 'ghost node' construction in Exec() ensures that we have a
2451 snode = node_image[nname]
2452 bad_snode = snode.ghost or snode.offline
2453 _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
2454 not success and not bad_snode,
2455 constants.CV_EINSTANCEFAULTYDISK, instance,
2456 "couldn't retrieve status for disk/%s on %s: %s",
2457 idx, nname, bdev_status)
2458 _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
2459 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
2460 constants.CV_EINSTANCEFAULTYDISK, instance,
2461 "disk/%s on %s is faulty", idx, nname)
2463 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2464 """Verify if there are any unknown volumes in the cluster.
2466 The .os, .swap and backup volumes are ignored. All other volumes are
2467 reported as unknown.
2469 @type reserved: L{ganeti.utils.FieldSet}
2470 @param reserved: a FieldSet of reserved volume names
2473 for node, n_img in node_image.items():
2474 if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2475 self.all_node_info[node].group != self.group_uuid):
2476 # skip non-healthy nodes
2478 for volume in n_img.volumes:
2479 test = ((node not in node_vol_should or
2480 volume not in node_vol_should[node]) and
2481 not reserved.Matches(volume))
2482 self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
2483 "volume %s is unknown", volume)
2485 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2486 """Verify N+1 Memory Resilience.
2488 Check that if one single node dies we can still start all the
2489 instances it was primary for.
2492 cluster_info = self.cfg.GetClusterInfo()
2493 for node, n_img in node_image.items():
2494 # This code checks that every node which is now listed as
2495 # secondary has enough memory to host all instances it is
2496 # supposed to should a single other node in the cluster fail.
2497 # FIXME: not ready for failover to an arbitrary node
2498 # FIXME: does not support file-backed instances
2499 # WARNING: we currently take into account down instances as well
2500 # as up ones, considering that even if they're down someone
2501 # might want to start them even in the event of a node failure.
2502 if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2503 # we're skipping nodes marked offline and nodes in other groups from
2504 # the N+1 warning, since most likely we don't have good memory
2505 # infromation from them; we already list instances living on such
2506 # nodes, and that's enough warning
2508 #TODO(dynmem): also consider ballooning out other instances
2509 for prinode, instances in n_img.sbp.items():
2511 for instance in instances:
2512 bep = cluster_info.FillBE(instance_cfg[instance])
2513 if bep[constants.BE_AUTO_BALANCE]:
2514 needed_mem += bep[constants.BE_MINMEM]
2515 test = n_img.mfree < needed_mem
2516 self._ErrorIf(test, constants.CV_ENODEN1, node,
2517 "not enough memory to accomodate instance failovers"
2518 " should node %s fail (%dMiB needed, %dMiB available)",
2519 prinode, needed_mem, n_img.mfree)
2522 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2523 (files_all, files_opt, files_mc, files_vm)):
2524 """Verifies file checksums collected from all nodes.
2526 @param errorif: Callback for reporting errors
2527 @param nodeinfo: List of L{objects.Node} objects
2528 @param master_node: Name of master node
2529 @param all_nvinfo: RPC results
2532 # Define functions determining which nodes to consider for a file
2535 (files_mc, lambda node: (node.master_candidate or
2536 node.name == master_node)),
2537 (files_vm, lambda node: node.vm_capable),
2540 # Build mapping from filename to list of nodes which should have the file
2542 for (files, fn) in files2nodefn:
2544 filenodes = nodeinfo
2546 filenodes = filter(fn, nodeinfo)
2547 nodefiles.update((filename,
2548 frozenset(map(operator.attrgetter("name"), filenodes)))
2549 for filename in files)
2551 assert set(nodefiles) == (files_all | files_mc | files_vm)
2553 fileinfo = dict((filename, {}) for filename in nodefiles)
2554 ignore_nodes = set()
2556 for node in nodeinfo:
2558 ignore_nodes.add(node.name)
2561 nresult = all_nvinfo[node.name]
2563 if nresult.fail_msg or not nresult.payload:
2566 node_files = nresult.payload.get(constants.NV_FILELIST, None)
2568 test = not (node_files and isinstance(node_files, dict))
2569 errorif(test, constants.CV_ENODEFILECHECK, node.name,
2570 "Node did not return file checksum data")
2572 ignore_nodes.add(node.name)
2575 # Build per-checksum mapping from filename to nodes having it
2576 for (filename, checksum) in node_files.items():
2577 assert filename in nodefiles
2578 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2580 for (filename, checksums) in fileinfo.items():
2581 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2583 # Nodes having the file
2584 with_file = frozenset(node_name
2585 for nodes in fileinfo[filename].values()
2586 for node_name in nodes) - ignore_nodes
2588 expected_nodes = nodefiles[filename] - ignore_nodes
2590 # Nodes missing file
2591 missing_file = expected_nodes - with_file
2593 if filename in files_opt:
2595 errorif(missing_file and missing_file != expected_nodes,
2596 constants.CV_ECLUSTERFILECHECK, None,
2597 "File %s is optional, but it must exist on all or no"
2598 " nodes (not found on %s)",
2599 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2601 errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
2602 "File %s is missing from node(s) %s", filename,
2603 utils.CommaJoin(utils.NiceSort(missing_file)))
2605 # Warn if a node has a file it shouldn't
2606 unexpected = with_file - expected_nodes
2608 constants.CV_ECLUSTERFILECHECK, None,
2609 "File %s should not exist on node(s) %s",
2610 filename, utils.CommaJoin(utils.NiceSort(unexpected)))
2612 # See if there are multiple versions of the file
2613 test = len(checksums) > 1
2615 variants = ["variant %s on %s" %
2616 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2617 for (idx, (checksum, nodes)) in
2618 enumerate(sorted(checksums.items()))]
2622 errorif(test, constants.CV_ECLUSTERFILECHECK, None,
2623 "File %s found with %s different checksums (%s)",
2624 filename, len(checksums), "; ".join(variants))
2626 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2628 """Verifies and the node DRBD status.
2630 @type ninfo: L{objects.Node}
2631 @param ninfo: the node to check
2632 @param nresult: the remote results for the node
2633 @param instanceinfo: the dict of instances
2634 @param drbd_helper: the configured DRBD usermode helper
2635 @param drbd_map: the DRBD map as returned by
2636 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2640 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2643 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2644 test = (helper_result == None)
2645 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2646 "no drbd usermode helper returned")
2648 status, payload = helper_result
2650 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2651 "drbd usermode helper check unsuccessful: %s", payload)
2652 test = status and (payload != drbd_helper)
2653 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2654 "wrong drbd usermode helper: %s", payload)
2656 # compute the DRBD minors
2658 for minor, instance in drbd_map[node].items():
2659 test = instance not in instanceinfo
2660 _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2661 "ghost instance '%s' in temporary DRBD map", instance)
2662 # ghost instance should not be running, but otherwise we
2663 # don't give double warnings (both ghost instance and
2664 # unallocated minor in use)
2666 node_drbd[minor] = (instance, False)
2668 instance = instanceinfo[instance]
2669 node_drbd[minor] = (instance.name,
2670 instance.admin_state == constants.ADMINST_UP)
2672 # and now check them
2673 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2674 test = not isinstance(used_minors, (tuple, list))
2675 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2676 "cannot parse drbd status file: %s", str(used_minors))
2678 # we cannot check drbd status
2681 for minor, (iname, must_exist) in node_drbd.items():
2682 test = minor not in used_minors and must_exist
2683 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2684 "drbd minor %d of instance %s is not active", minor, iname)
2685 for minor in used_minors:
2686 test = minor not in node_drbd
2687 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2688 "unallocated drbd minor %d is in use", minor)
2690 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2691 """Builds the node OS structures.
2693 @type ninfo: L{objects.Node}
2694 @param ninfo: the node to check
2695 @param nresult: the remote results for the node
2696 @param nimg: the node image object
2700 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2702 remote_os = nresult.get(constants.NV_OSLIST, None)
2703 test = (not isinstance(remote_os, list) or
2704 not compat.all(isinstance(v, list) and len(v) == 7
2705 for v in remote_os))
2707 _ErrorIf(test, constants.CV_ENODEOS, node,
2708 "node hasn't returned valid OS data")
2717 for (name, os_path, status, diagnose,
2718 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2720 if name not in os_dict:
2723 # parameters is a list of lists instead of list of tuples due to
2724 # JSON lacking a real tuple type, fix it:
2725 parameters = [tuple(v) for v in parameters]
2726 os_dict[name].append((os_path, status, diagnose,
2727 set(variants), set(parameters), set(api_ver)))
2729 nimg.oslist = os_dict
2731 def _VerifyNodeOS(self, ninfo, nimg, base):
2732 """Verifies the node OS list.
2734 @type ninfo: L{objects.Node}
2735 @param ninfo: the node to check
2736 @param nimg: the node image object
2737 @param base: the 'template' node we match against (e.g. from the master)
2741 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2743 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2745 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2746 for os_name, os_data in nimg.oslist.items():
2747 assert os_data, "Empty OS status for OS %s?!" % os_name
2748 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2749 _ErrorIf(not f_status, constants.CV_ENODEOS, node,
2750 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2751 _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
2752 "OS '%s' has multiple entries (first one shadows the rest): %s",
2753 os_name, utils.CommaJoin([v[0] for v in os_data]))
2754 # comparisons with the 'base' image
2755 test = os_name not in base.oslist
2756 _ErrorIf(test, constants.CV_ENODEOS, node,
2757 "Extra OS %s not present on reference node (%s)",
2761 assert base.oslist[os_name], "Base node has empty OS status?"
2762 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2764 # base OS is invalid, skipping
2766 for kind, a, b in [("API version", f_api, b_api),
2767 ("variants list", f_var, b_var),
2768 ("parameters", beautify_params(f_param),
2769 beautify_params(b_param))]:
2770 _ErrorIf(a != b, constants.CV_ENODEOS, node,
2771 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2772 kind, os_name, base.name,
2773 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2775 # check any missing OSes
2776 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2777 _ErrorIf(missing, constants.CV_ENODEOS, node,
2778 "OSes present on reference node %s but missing on this node: %s",
2779 base.name, utils.CommaJoin(missing))
2781 def _VerifyOob(self, ninfo, nresult):
2782 """Verifies out of band functionality of a node.
2784 @type ninfo: L{objects.Node}
2785 @param ninfo: the node to check
2786 @param nresult: the remote results for the node
2790 # We just have to verify the paths on master and/or master candidates
2791 # as the oob helper is invoked on the master
2792 if ((ninfo.master_candidate or ninfo.master_capable) and
2793 constants.NV_OOB_PATHS in nresult):
2794 for path_result in nresult[constants.NV_OOB_PATHS]:
2795 self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
2797 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2798 """Verifies and updates the node volume data.
2800 This function will update a L{NodeImage}'s internal structures
2801 with data from the remote call.
2803 @type ninfo: L{objects.Node}
2804 @param ninfo: the node to check
2805 @param nresult: the remote results for the node
2806 @param nimg: the node image object
2807 @param vg_name: the configured VG name
2811 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2813 nimg.lvm_fail = True
2814 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2817 elif isinstance(lvdata, basestring):
2818 _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
2819 utils.SafeEncode(lvdata))
2820 elif not isinstance(lvdata, dict):
2821 _ErrorIf(True, constants.CV_ENODELVM, node,
2822 "rpc call to node failed (lvlist)")
2824 nimg.volumes = lvdata
2825 nimg.lvm_fail = False
2827 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2828 """Verifies and updates the node instance list.
2830 If the listing was successful, then updates this node's instance
2831 list. Otherwise, it marks the RPC call as failed for the instance
2834 @type ninfo: L{objects.Node}
2835 @param ninfo: the node to check
2836 @param nresult: the remote results for the node
2837 @param nimg: the node image object
2840 idata = nresult.get(constants.NV_INSTANCELIST, None)
2841 test = not isinstance(idata, list)
2842 self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2843 "rpc call to node failed (instancelist): %s",
2844 utils.SafeEncode(str(idata)))
2846 nimg.hyp_fail = True
2848 nimg.instances = idata
2850 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2851 """Verifies and computes a node information map
2853 @type ninfo: L{objects.Node}
2854 @param ninfo: the node to check
2855 @param nresult: the remote results for the node
2856 @param nimg: the node image object
2857 @param vg_name: the configured VG name
2861 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2863 # try to read free memory (from the hypervisor)
2864 hv_info = nresult.get(constants.NV_HVINFO, None)
2865 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2866 _ErrorIf(test, constants.CV_ENODEHV, node,
2867 "rpc call to node failed (hvinfo)")
2870 nimg.mfree = int(hv_info["memory_free"])
2871 except (ValueError, TypeError):
2872 _ErrorIf(True, constants.CV_ENODERPC, node,
2873 "node returned invalid nodeinfo, check hypervisor")
2875 # FIXME: devise a free space model for file based instances as well
2876 if vg_name is not None:
2877 test = (constants.NV_VGLIST not in nresult or
2878 vg_name not in nresult[constants.NV_VGLIST])
2879 _ErrorIf(test, constants.CV_ENODELVM, node,
2880 "node didn't return data for the volume group '%s'"
2881 " - it is either missing or broken", vg_name)
2884 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2885 except (ValueError, TypeError):
2886 _ErrorIf(True, constants.CV_ENODERPC, node,
2887 "node returned invalid LVM info, check LVM status")
2889 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2890 """Gets per-disk status information for all instances.
2892 @type nodelist: list of strings
2893 @param nodelist: Node names
2894 @type node_image: dict of (name, L{objects.Node})
2895 @param node_image: Node objects
2896 @type instanceinfo: dict of (name, L{objects.Instance})
2897 @param instanceinfo: Instance objects
2898 @rtype: {instance: {node: [(succes, payload)]}}
2899 @return: a dictionary of per-instance dictionaries with nodes as
2900 keys and disk information as values; the disk information is a
2901 list of tuples (success, payload)
2904 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2907 node_disks_devonly = {}
2908 diskless_instances = set()
2909 diskless = constants.DT_DISKLESS
2911 for nname in nodelist:
2912 node_instances = list(itertools.chain(node_image[nname].pinst,
2913 node_image[nname].sinst))
2914 diskless_instances.update(inst for inst in node_instances
2915 if instanceinfo[inst].disk_template == diskless)
2916 disks = [(inst, disk)
2917 for inst in node_instances
2918 for disk in instanceinfo[inst].disks]
2921 # No need to collect data
2924 node_disks[nname] = disks
2926 # Creating copies as SetDiskID below will modify the objects and that can
2927 # lead to incorrect data returned from nodes
2928 devonly = [dev.Copy() for (_, dev) in disks]
2931 self.cfg.SetDiskID(dev, nname)
2933 node_disks_devonly[nname] = devonly
2935 assert len(node_disks) == len(node_disks_devonly)
2937 # Collect data from all nodes with disks
2938 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2941 assert len(result) == len(node_disks)
2945 for (nname, nres) in result.items():
2946 disks = node_disks[nname]
2949 # No data from this node
2950 data = len(disks) * [(False, "node offline")]
2953 _ErrorIf(msg, constants.CV_ENODERPC, nname,
2954 "while getting disk information: %s", msg)
2956 # No data from this node
2957 data = len(disks) * [(False, msg)]
2960 for idx, i in enumerate(nres.payload):
2961 if isinstance(i, (tuple, list)) and len(i) == 2:
2964 logging.warning("Invalid result from node %s, entry %d: %s",
2966 data.append((False, "Invalid result from the remote node"))
2968 for ((inst, _), status) in zip(disks, data):
2969 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2971 # Add empty entries for diskless instances.
2972 for inst in diskless_instances:
2973 assert inst not in instdisk
2976 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2977 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2978 compat.all(isinstance(s, (tuple, list)) and
2979 len(s) == 2 for s in statuses)
2980 for inst, nnames in instdisk.items()
2981 for nname, statuses in nnames.items())
2982 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2987 def _SshNodeSelector(group_uuid, all_nodes):
2988 """Create endless iterators for all potential SSH check hosts.
2991 nodes = [node for node in all_nodes
2992 if (node.group != group_uuid and
2994 keyfunc = operator.attrgetter("group")
2996 return map(itertools.cycle,
2997 [sorted(map(operator.attrgetter("name"), names))
2998 for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
3002 def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
3003 """Choose which nodes should talk to which other nodes.
3005 We will make nodes contact all nodes in their group, and one node from
3008 @warning: This algorithm has a known issue if one node group is much
3009 smaller than others (e.g. just one node). In such a case all other
3010 nodes will talk to the single node.
3013 online_nodes = sorted(node.name for node in group_nodes if not node.offline)
3014 sel = cls._SshNodeSelector(group_uuid, all_nodes)
3016 return (online_nodes,
3017 dict((name, sorted([i.next() for i in sel]))
3018 for name in online_nodes))
3020 def BuildHooksEnv(self):
3023 Cluster-Verify hooks just ran in the post phase and their failure makes
3024 the output be logged in the verify output and the verification to fail.
3028 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
3031 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
3032 for node in self.my_node_info.values())
3036 def BuildHooksNodes(self):
3037 """Build hooks nodes.
3040 return ([], self.my_node_names)
3042 def Exec(self, feedback_fn):
3043 """Verify integrity of the node group, performing various test on nodes.
3046 # This method has too many local variables. pylint: disable=R0914
3047 feedback_fn("* Verifying group '%s'" % self.group_info.name)
3049 if not self.my_node_names:
3051 feedback_fn("* Empty node group, skipping verification")
3055 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3056 verbose = self.op.verbose
3057 self._feedback_fn = feedback_fn
3059 vg_name = self.cfg.GetVGName()
3060 drbd_helper = self.cfg.GetDRBDHelper()
3061 cluster = self.cfg.GetClusterInfo()
3062 groupinfo = self.cfg.GetAllNodeGroupsInfo()
3063 hypervisors = cluster.enabled_hypervisors
3064 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
3066 i_non_redundant = [] # Non redundant instances
3067 i_non_a_balanced = [] # Non auto-balanced instances
3068 i_offline = 0 # Count of offline instances
3069 n_offline = 0 # Count of offline nodes
3070 n_drained = 0 # Count of nodes being drained
3071 node_vol_should = {}
3073 # FIXME: verify OS list
3076 filemap = _ComputeAncillaryFiles(cluster, False)
3078 # do local checksums
3079 master_node = self.master_node = self.cfg.GetMasterNode()
3080 master_ip = self.cfg.GetMasterIP()
3082 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
3085 if self.cfg.GetUseExternalMipScript():
3086 user_scripts.append(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
3088 node_verify_param = {
3089 constants.NV_FILELIST:
3090 utils.UniqueSequence(filename
3091 for files in filemap
3092 for filename in files),
3093 constants.NV_NODELIST:
3094 self._SelectSshCheckNodes(node_data_list, self.group_uuid,
3095 self.all_node_info.values()),
3096 constants.NV_HYPERVISOR: hypervisors,
3097 constants.NV_HVPARAMS:
3098 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
3099 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
3100 for node in node_data_list
3101 if not node.offline],
3102 constants.NV_INSTANCELIST: hypervisors,
3103 constants.NV_VERSION: None,
3104 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
3105 constants.NV_NODESETUP: None,
3106 constants.NV_TIME: None,
3107 constants.NV_MASTERIP: (master_node, master_ip),
3108 constants.NV_OSLIST: None,
3109 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
3110 constants.NV_USERSCRIPTS: user_scripts,
3113 if vg_name is not None:
3114 node_verify_param[constants.NV_VGLIST] = None
3115 node_verify_param[constants.NV_LVLIST] = vg_name
3116 node_verify_param[constants.NV_PVLIST] = [vg_name]
3117 node_verify_param[constants.NV_DRBDLIST] = None
3120 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
3123 # FIXME: this needs to be changed per node-group, not cluster-wide
3125 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
3126 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3127 bridges.add(default_nicpp[constants.NIC_LINK])
3128 for instance in self.my_inst_info.values():
3129 for nic in instance.nics:
3130 full_nic = cluster.SimpleFillNIC(nic.nicparams)
3131 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3132 bridges.add(full_nic[constants.NIC_LINK])
3135 node_verify_param[constants.NV_BRIDGES] = list(bridges)
3137 # Build our expected cluster state
3138 node_image = dict((node.name, self.NodeImage(offline=node.offline,
3140 vm_capable=node.vm_capable))
3141 for node in node_data_list)
3145 for node in self.all_node_info.values():
3146 path = _SupportsOob(self.cfg, node)
3147 if path and path not in oob_paths:
3148 oob_paths.append(path)
3151 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
3153 for instance in self.my_inst_names:
3154 inst_config = self.my_inst_info[instance]
3155 if inst_config.admin_state == constants.ADMINST_OFFLINE:
3158 for nname in inst_config.all_nodes:
3159 if nname not in node_image:
3160 gnode = self.NodeImage(name=nname)
3161 gnode.ghost = (nname not in self.all_node_info)
3162 node_image[nname] = gnode
3164 inst_config.MapLVsByNode(node_vol_should)
3166 pnode = inst_config.primary_node
3167 node_image[pnode].pinst.append(instance)
3169 for snode in inst_config.secondary_nodes:
3170 nimg = node_image[snode]
3171 nimg.sinst.append(instance)
3172 if pnode not in nimg.sbp:
3173 nimg.sbp[pnode] = []
3174 nimg.sbp[pnode].append(instance)
3176 # At this point, we have the in-memory data structures complete,
3177 # except for the runtime information, which we'll gather next
3179 # Due to the way our RPC system works, exact response times cannot be
3180 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
3181 # time before and after executing the request, we can at least have a time
3183 nvinfo_starttime = time.time()
3184 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
3186 self.cfg.GetClusterName())
3187 nvinfo_endtime = time.time()
3189 if self.extra_lv_nodes and vg_name is not None:
3191 self.rpc.call_node_verify(self.extra_lv_nodes,
3192 {constants.NV_LVLIST: vg_name},
3193 self.cfg.GetClusterName())
3195 extra_lv_nvinfo = {}
3197 all_drbd_map = self.cfg.ComputeDRBDMap()
3199 feedback_fn("* Gathering disk information (%s nodes)" %
3200 len(self.my_node_names))
3201 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
3204 feedback_fn("* Verifying configuration file consistency")
3206 # If not all nodes are being checked, we need to make sure the master node
3207 # and a non-checked vm_capable node are in the list.
3208 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
3210 vf_nvinfo = all_nvinfo.copy()
3211 vf_node_info = list(self.my_node_info.values())
3212 additional_nodes = []
3213 if master_node not in self.my_node_info:
3214 additional_nodes.append(master_node)
3215 vf_node_info.append(self.all_node_info[master_node])
3216 # Add the first vm_capable node we find which is not included
3217 for node in absent_nodes:
3218 nodeinfo = self.all_node_info[node]
3219 if nodeinfo.vm_capable and not nodeinfo.offline:
3220 additional_nodes.append(node)
3221 vf_node_info.append(self.all_node_info[node])
3223 key = constants.NV_FILELIST
3224 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
3225 {key: node_verify_param[key]},
3226 self.cfg.GetClusterName()))
3228 vf_nvinfo = all_nvinfo
3229 vf_node_info = self.my_node_info.values()
3231 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
3233 feedback_fn("* Verifying node status")
3237 for node_i in node_data_list:
3239 nimg = node_image[node]
3243 feedback_fn("* Skipping offline node %s" % (node,))
3247 if node == master_node:
3249 elif node_i.master_candidate:
3250 ntype = "master candidate"
3251 elif node_i.drained:
3257 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
3259 msg = all_nvinfo[node].fail_msg
3260 _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
3263 nimg.rpc_fail = True
3266 nresult = all_nvinfo[node].payload
3268 nimg.call_ok = self._VerifyNode(node_i, nresult)
3269 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
3270 self._VerifyNodeNetwork(node_i, nresult)
3271 self._VerifyNodeUserScripts(node_i, nresult)
3272 self._VerifyOob(node_i, nresult)
3275 self._VerifyNodeLVM(node_i, nresult, vg_name)
3276 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
3279 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
3280 self._UpdateNodeInstances(node_i, nresult, nimg)
3281 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
3282 self._UpdateNodeOS(node_i, nresult, nimg)
3284 if not nimg.os_fail:
3285 if refos_img is None:
3287 self._VerifyNodeOS(node_i, nimg, refos_img)
3288 self._VerifyNodeBridges(node_i, nresult, bridges)
3290 # Check whether all running instancies are primary for the node. (This
3291 # can no longer be done from _VerifyInstance below, since some of the
3292 # wrong instances could be from other node groups.)
3293 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
3295 for inst in non_primary_inst:
3296 test = inst in self.all_inst_info
3297 _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
3298 "instance should not run on node %s", node_i.name)
3299 _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
3300 "node is running unknown instance %s", inst)
3302 for node, result in extra_lv_nvinfo.items():
3303 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
3304 node_image[node], vg_name)
3306 feedback_fn("* Verifying instance status")
3307 for instance in self.my_inst_names:
3309 feedback_fn("* Verifying instance %s" % instance)
3310 inst_config = self.my_inst_info[instance]
3311 self._VerifyInstance(instance, inst_config, node_image,
3313 inst_nodes_offline = []
3315 pnode = inst_config.primary_node
3316 pnode_img = node_image[pnode]
3317 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
3318 constants.CV_ENODERPC, pnode, "instance %s, connection to"
3319 " primary node failed", instance)
3321 _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
3323 constants.CV_EINSTANCEBADNODE, instance,
3324 "instance is marked as running and lives on offline node %s",
3325 inst_config.primary_node)
3327 # If the instance is non-redundant we cannot survive losing its primary
3328 # node, so we are not N+1 compliant. On the other hand we have no disk
3329 # templates with more than one secondary so that situation is not well
3331 # FIXME: does not support file-backed instances
3332 if not inst_config.secondary_nodes:
3333 i_non_redundant.append(instance)
3335 _ErrorIf(len(inst_config.secondary_nodes) > 1,
3336 constants.CV_EINSTANCELAYOUT,
3337 instance, "instance has multiple secondary nodes: %s",
3338 utils.CommaJoin(inst_config.secondary_nodes),
3339 code=self.ETYPE_WARNING)
3341 if inst_config.disk_template in constants.DTS_INT_MIRROR:
3342 pnode = inst_config.primary_node
3343 instance_nodes = utils.NiceSort(inst_config.all_nodes)
3344 instance_groups = {}
3346 for node in instance_nodes:
3347 instance_groups.setdefault(self.all_node_info[node].group,
3351 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
3352 # Sort so that we always list the primary node first.
3353 for group, nodes in sorted(instance_groups.items(),
3354 key=lambda (_, nodes): pnode in nodes,
3357 self._ErrorIf(len(instance_groups) > 1,
3358 constants.CV_EINSTANCESPLITGROUPS,
3359 instance, "instance has primary and secondary nodes in"
3360 " different groups: %s", utils.CommaJoin(pretty_list),
3361 code=self.ETYPE_WARNING)
3363 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
3364 i_non_a_balanced.append(instance)
3366 for snode in inst_config.secondary_nodes:
3367 s_img = node_image[snode]
3368 _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
3369 snode, "instance %s, connection to secondary node failed",
3373 inst_nodes_offline.append(snode)
3375 # warn that the instance lives on offline nodes
3376 _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
3377 "instance has offline secondary node(s) %s",
3378 utils.CommaJoin(inst_nodes_offline))
3379 # ... or ghost/non-vm_capable nodes
3380 for node in inst_config.all_nodes:
3381 _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
3382 instance, "instance lives on ghost node %s", node)
3383 _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
3384 instance, "instance lives on non-vm_capable node %s", node)
3386 feedback_fn("* Verifying orphan volumes")
3387 reserved = utils.FieldSet(*cluster.reserved_lvs)
3389 # We will get spurious "unknown volume" warnings if any node of this group
3390 # is secondary for an instance whose primary is in another group. To avoid
3391 # them, we find these instances and add their volumes to node_vol_should.
3392 for inst in self.all_inst_info.values():
3393 for secondary in inst.secondary_nodes:
3394 if (secondary in self.my_node_info
3395 and inst.name not in self.my_inst_info):
3396 inst.MapLVsByNode(node_vol_should)
3399 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
3401 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
3402 feedback_fn("* Verifying N+1 Memory redundancy")
3403 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
3405 feedback_fn("* Other Notes")
3407 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
3408 % len(i_non_redundant))
3410 if i_non_a_balanced:
3411 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
3412 % len(i_non_a_balanced))
3415 feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
3418 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
3421 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
3425 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
3426 """Analyze the post-hooks' result
3428 This method analyses the hook result, handles it, and sends some
3429 nicely-formatted feedback back to the user.
3431 @param phase: one of L{constants.HOOKS_PHASE_POST} or
3432 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
3433 @param hooks_results: the results of the multi-node hooks rpc call
3434 @param feedback_fn: function used send feedback back to the caller
3435 @param lu_result: previous Exec result
3436 @return: the new Exec result, based on the previous result
3440 # We only really run POST phase hooks, only for non-empty groups,
3441 # and are only interested in their results
3442 if not self.my_node_names:
3445 elif phase == constants.HOOKS_PHASE_POST:
3446 # Used to change hooks' output to proper indentation
3447 feedback_fn("* Hooks Results")
3448 assert hooks_results, "invalid result from hooks"
3450 for node_name in hooks_results:
3451 res = hooks_results[node_name]
3453 test = msg and not res.offline
3454 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3455 "Communication failure in hooks execution: %s", msg)
3456 if res.offline or msg:
3457 # No need to investigate payload if node is offline or gave
3460 for script, hkr, output in res.payload:
3461 test = hkr == constants.HKR_FAIL
3462 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3463 "Script %s failed, output:", script)
3465 output = self._HOOKS_INDENT_RE.sub(" ", output)
3466 feedback_fn("%s" % output)
3472 class LUClusterVerifyDisks(NoHooksLU):
3473 """Verifies the cluster disks status.
3478 def ExpandNames(self):
3479 self.share_locks = _ShareAll()
3480 self.needed_locks = {
3481 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3484 def Exec(self, feedback_fn):
3485 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3487 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3488 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3489 for group in group_names])
3492 class LUGroupVerifyDisks(NoHooksLU):
3493 """Verifies the status of all disks in a node group.
3498 def ExpandNames(self):
3499 # Raises errors.OpPrereqError on its own if group can't be found
3500 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3502 self.share_locks = _ShareAll()
3503 self.needed_locks = {
3504 locking.LEVEL_INSTANCE: [],
3505 locking.LEVEL_NODEGROUP: [],
3506 locking.LEVEL_NODE: [],
3509 def DeclareLocks(self, level):
3510 if level == locking.LEVEL_INSTANCE:
3511 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3513 # Lock instances optimistically, needs verification once node and group
3514 # locks have been acquired
3515 self.needed_locks[locking.LEVEL_INSTANCE] = \
3516 self.cfg.GetNodeGroupInstances(self.group_uuid)
3518 elif level == locking.LEVEL_NODEGROUP:
3519 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3521 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3522 set([self.group_uuid] +
3523 # Lock all groups used by instances optimistically; this requires
3524 # going via the node before it's locked, requiring verification
3527 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3528 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3530 elif level == locking.LEVEL_NODE:
3531 # This will only lock the nodes in the group to be verified which contain
3533 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3534 self._LockInstancesNodes()
3536 # Lock all nodes in group to be verified
3537 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3538 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3539 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3541 def CheckPrereq(self):
3542 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3543 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3544 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3546 assert self.group_uuid in owned_groups
3548 # Check if locked instances are still correct
3549 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3551 # Get instance information
3552 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3554 # Check if node groups for locked instances are still correct
3555 _CheckInstancesNodeGroups(self.cfg, self.instances,
3556 owned_groups, owned_nodes, self.group_uuid)
3558 def Exec(self, feedback_fn):
3559 """Verify integrity of cluster disks.
3561 @rtype: tuple of three items
3562 @return: a tuple of (dict of node-to-node_error, list of instances
3563 which need activate-disks, dict of instance: (node, volume) for
3568 res_instances = set()
3571 nv_dict = _MapInstanceDisksToNodes([inst
3572 for inst in self.instances.values()
3573 if inst.admin_state == constants.ADMINST_UP])
3576 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3577 set(self.cfg.GetVmCapableNodeList()))
3579 node_lvs = self.rpc.call_lv_list(nodes, [])
3581 for (node, node_res) in node_lvs.items():
3582 if node_res.offline:
3585 msg = node_res.fail_msg
3587 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3588 res_nodes[node] = msg
3591 for lv_name, (_, _, lv_online) in node_res.payload.items():
3592 inst = nv_dict.pop((node, lv_name), None)
3593 if not (lv_online or inst is None):
3594 res_instances.add(inst)
3596 # any leftover items in nv_dict are missing LVs, let's arrange the data
3598 for key, inst in nv_dict.iteritems():
3599 res_missing.setdefault(inst, []).append(list(key))
3601 return (res_nodes, list(res_instances), res_missing)
3604 class LUClusterRepairDiskSizes(NoHooksLU):
3605 """Verifies the cluster disks sizes.
3610 def ExpandNames(self):
3611 if self.op.instances:
3612 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3613 self.needed_locks = {
3614 locking.LEVEL_NODE_RES: [],
3615 locking.LEVEL_INSTANCE: self.wanted_names,
3617 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
3619 self.wanted_names = None
3620 self.needed_locks = {
3621 locking.LEVEL_NODE_RES: locking.ALL_SET,
3622 locking.LEVEL_INSTANCE: locking.ALL_SET,
3624 self.share_locks = {
3625 locking.LEVEL_NODE_RES: 1,
3626 locking.LEVEL_INSTANCE: 0,
3629 def DeclareLocks(self, level):
3630 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
3631 self._LockInstancesNodes(primary_only=True, level=level)
3633 def CheckPrereq(self):
3634 """Check prerequisites.
3636 This only checks the optional instance list against the existing names.
3639 if self.wanted_names is None:
3640 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3642 self.wanted_instances = \
3643 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3645 def _EnsureChildSizes(self, disk):
3646 """Ensure children of the disk have the needed disk size.
3648 This is valid mainly for DRBD8 and fixes an issue where the
3649 children have smaller disk size.
3651 @param disk: an L{ganeti.objects.Disk} object
3654 if disk.dev_type == constants.LD_DRBD8:
3655 assert disk.children, "Empty children for DRBD8?"
3656 fchild = disk.children[0]
3657 mismatch = fchild.size < disk.size
3659 self.LogInfo("Child disk has size %d, parent %d, fixing",
3660 fchild.size, disk.size)
3661 fchild.size = disk.size
3663 # and we recurse on this child only, not on the metadev
3664 return self._EnsureChildSizes(fchild) or mismatch
3668 def Exec(self, feedback_fn):
3669 """Verify the size of cluster disks.
3672 # TODO: check child disks too
3673 # TODO: check differences in size between primary/secondary nodes
3675 for instance in self.wanted_instances:
3676 pnode = instance.primary_node
3677 if pnode not in per_node_disks:
3678 per_node_disks[pnode] = []
3679 for idx, disk in enumerate(instance.disks):
3680 per_node_disks[pnode].append((instance, idx, disk))
3682 assert not (frozenset(per_node_disks.keys()) -
3683 self.owned_locks(locking.LEVEL_NODE_RES)), \
3684 "Not owning correct locks"
3685 assert not self.owned_locks(locking.LEVEL_NODE)
3688 for node, dskl in per_node_disks.items():
3689 newl = [v[2].Copy() for v in dskl]
3691 self.cfg.SetDiskID(dsk, node)
3692 result = self.rpc.call_blockdev_getsize(node, newl)
3694 self.LogWarning("Failure in blockdev_getsize call to node"
3695 " %s, ignoring", node)
3697 if len(result.payload) != len(dskl):
3698 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3699 " result.payload=%s", node, len(dskl), result.payload)
3700 self.LogWarning("Invalid result from node %s, ignoring node results",
3703 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3705 self.LogWarning("Disk %d of instance %s did not return size"
3706 " information, ignoring", idx, instance.name)
3708 if not isinstance(size, (int, long)):
3709 self.LogWarning("Disk %d of instance %s did not return valid"
3710 " size information, ignoring", idx, instance.name)
3713 if size != disk.size:
3714 self.LogInfo("Disk %d of instance %s has mismatched size,"
3715 " correcting: recorded %d, actual %d", idx,
3716 instance.name, disk.size, size)
3718 self.cfg.Update(instance, feedback_fn)
3719 changed.append((instance.name, idx, size))
3720 if self._EnsureChildSizes(disk):
3721 self.cfg.Update(instance, feedback_fn)
3722 changed.append((instance.name, idx, disk.size))
3726 class LUClusterRename(LogicalUnit):
3727 """Rename the cluster.
3730 HPATH = "cluster-rename"
3731 HTYPE = constants.HTYPE_CLUSTER
3733 def BuildHooksEnv(self):
3738 "OP_TARGET": self.cfg.GetClusterName(),
3739 "NEW_NAME": self.op.name,
3742 def BuildHooksNodes(self):
3743 """Build hooks nodes.
3746 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3748 def CheckPrereq(self):
3749 """Verify that the passed name is a valid one.
3752 hostname = netutils.GetHostname(name=self.op.name,
3753 family=self.cfg.GetPrimaryIPFamily())
3755 new_name = hostname.name
3756 self.ip = new_ip = hostname.ip
3757 old_name = self.cfg.GetClusterName()
3758 old_ip = self.cfg.GetMasterIP()
3759 if new_name == old_name and new_ip == old_ip:
3760 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3761 " cluster has changed",
3763 if new_ip != old_ip:
3764 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3765 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3766 " reachable on the network" %
3767 new_ip, errors.ECODE_NOTUNIQUE)
3769 self.op.name = new_name
3771 def Exec(self, feedback_fn):
3772 """Rename the cluster.
3775 clustername = self.op.name
3778 # shutdown the master IP
3779 master_params = self.cfg.GetMasterNetworkParameters()
3780 ems = self.cfg.GetUseExternalMipScript()
3781 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
3783 result.Raise("Could not disable the master role")
3786 cluster = self.cfg.GetClusterInfo()
3787 cluster.cluster_name = clustername
3788 cluster.master_ip = new_ip
3789 self.cfg.Update(cluster, feedback_fn)
3791 # update the known hosts file
3792 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3793 node_list = self.cfg.GetOnlineNodeList()
3795 node_list.remove(master_params.name)
3798 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3800 master_params.ip = new_ip
3801 result = self.rpc.call_node_activate_master_ip(master_params.name,
3803 msg = result.fail_msg
3805 self.LogWarning("Could not re-enable the master role on"
3806 " the master, please restart manually: %s", msg)
3811 def _ValidateNetmask(cfg, netmask):
3812 """Checks if a netmask is valid.
3814 @type cfg: L{config.ConfigWriter}
3815 @param cfg: The cluster configuration
3817 @param netmask: the netmask to be verified
3818 @raise errors.OpPrereqError: if the validation fails
3821 ip_family = cfg.GetPrimaryIPFamily()
3823 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
3824 except errors.ProgrammerError:
3825 raise errors.OpPrereqError("Invalid primary ip family: %s." %
3827 if not ipcls.ValidateNetmask(netmask):
3828 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
3832 class LUClusterSetParams(LogicalUnit):
3833 """Change the parameters of the cluster.
3836 HPATH = "cluster-modify"
3837 HTYPE = constants.HTYPE_CLUSTER
3840 def CheckArguments(self):
3844 if self.op.uid_pool:
3845 uidpool.CheckUidPool(self.op.uid_pool)
3847 if self.op.add_uids:
3848 uidpool.CheckUidPool(self.op.add_uids)
3850 if self.op.remove_uids:
3851 uidpool.CheckUidPool(self.op.remove_uids)
3853 if self.op.master_netmask is not None:
3854 _ValidateNetmask(self.cfg, self.op.master_netmask)
3856 if self.op.diskparams:
3857 for dt_params in self.op.diskparams.values():
3858 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
3860 def ExpandNames(self):
3861 # FIXME: in the future maybe other cluster params won't require checking on
3862 # all nodes to be modified.
3863 self.needed_locks = {
3864 locking.LEVEL_NODE: locking.ALL_SET,
3865 locking.LEVEL_INSTANCE: locking.ALL_SET,
3866 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3868 self.share_locks = {
3869 locking.LEVEL_NODE: 1,
3870 locking.LEVEL_INSTANCE: 1,
3871 locking.LEVEL_NODEGROUP: 1,
3874 def BuildHooksEnv(self):
3879 "OP_TARGET": self.cfg.GetClusterName(),
3880 "NEW_VG_NAME": self.op.vg_name,
3883 def BuildHooksNodes(self):
3884 """Build hooks nodes.
3887 mn = self.cfg.GetMasterNode()
3890 def CheckPrereq(self):
3891 """Check prerequisites.
3893 This checks whether the given params don't conflict and
3894 if the given volume group is valid.
3897 if self.op.vg_name is not None and not self.op.vg_name:
3898 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3899 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3900 " instances exist", errors.ECODE_INVAL)
3902 if self.op.drbd_helper is not None and not self.op.drbd_helper:
3903 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3904 raise errors.OpPrereqError("Cannot disable drbd helper while"
3905 " drbd-based instances exist",
3908 node_list = self.owned_locks(locking.LEVEL_NODE)
3910 # if vg_name not None, checks given volume group on all nodes
3912 vglist = self.rpc.call_vg_list(node_list)
3913 for node in node_list:
3914 msg = vglist[node].fail_msg
3916 # ignoring down node
3917 self.LogWarning("Error while gathering data on node %s"
3918 " (ignoring node): %s", node, msg)
3920 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3922 constants.MIN_VG_SIZE)
3924 raise errors.OpPrereqError("Error on node '%s': %s" %
3925 (node, vgstatus), errors.ECODE_ENVIRON)
3927 if self.op.drbd_helper:
3928 # checks given drbd helper on all nodes
3929 helpers = self.rpc.call_drbd_helper(node_list)
3930 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3932 self.LogInfo("Not checking drbd helper on offline node %s", node)
3934 msg = helpers[node].fail_msg
3936 raise errors.OpPrereqError("Error checking drbd helper on node"
3937 " '%s': %s" % (node, msg),
3938 errors.ECODE_ENVIRON)
3939 node_helper = helpers[node].payload
3940 if node_helper != self.op.drbd_helper:
3941 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3942 (node, node_helper), errors.ECODE_ENVIRON)
3944 self.cluster = cluster = self.cfg.GetClusterInfo()
3945 # validate params changes
3946 if self.op.beparams:
3947 objects.UpgradeBeParams(self.op.beparams)
3948 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3949 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3951 if self.op.ndparams:
3952 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3953 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3955 # TODO: we need a more general way to handle resetting
3956 # cluster-level parameters to default values
3957 if self.new_ndparams["oob_program"] == "":
3958 self.new_ndparams["oob_program"] = \
3959 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3961 if self.op.hv_state:
3962 new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
3963 self.cluster.hv_state_static)
3964 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
3965 for hv, values in new_hv_state.items())
3967 if self.op.disk_state:
3968 new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
3969 self.cluster.disk_state_static)
3970 self.new_disk_state = \
3971 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
3972 for name, values in svalues.items()))
3973 for storage, svalues in new_disk_state.items())
3976 self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
3979 all_instances = self.cfg.GetAllInstancesInfo().values()
3981 for group in self.cfg.GetAllNodeGroupsInfo().values():
3982 instances = frozenset([inst for inst in all_instances
3983 if compat.any(node in group.members
3984 for node in inst.all_nodes)])
3985 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
3986 new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
3988 new_ipolicy, instances)
3990 violations.update(new)
3993 self.LogWarning("After the ipolicy change the following instances"
3994 " violate them: %s",
3995 utils.CommaJoin(utils.NiceSort(violations)))
3997 if self.op.nicparams:
3998 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3999 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
4000 objects.NIC.CheckParameterSyntax(self.new_nicparams)
4003 # check all instances for consistency
4004 for instance in self.cfg.GetAllInstancesInfo().values():
4005 for nic_idx, nic in enumerate(instance.nics):
4006 params_copy = copy.deepcopy(nic.nicparams)
4007 params_filled = objects.FillDict(self.new_nicparams, params_copy)
4009 # check parameter syntax
4011 objects.NIC.CheckParameterSyntax(params_filled)
4012 except errors.ConfigurationError, err:
4013 nic_errors.append("Instance %s, nic/%d: %s" %
4014 (instance.name, nic_idx, err))
4016 # if we're moving instances to routed, check that they have an ip
4017 target_mode = params_filled[constants.NIC_MODE]
4018 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
4019 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
4020 " address" % (instance.name, nic_idx))
4022 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
4023 "\n".join(nic_errors))
4025 # hypervisor list/parameters
4026 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
4027 if self.op.hvparams:
4028 for hv_name, hv_dict in self.op.hvparams.items():
4029 if hv_name not in self.new_hvparams:
4030 self.new_hvparams[hv_name] = hv_dict
4032 self.new_hvparams[hv_name].update(hv_dict)
4034 # disk template parameters
4035 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
4036 if self.op.diskparams:
4037 for dt_name, dt_params in self.op.diskparams.items():
4038 if dt_name not in self.op.diskparams:
4039 self.new_diskparams[dt_name] = dt_params
4041 self.new_diskparams[dt_name].update(dt_params)
4043 # os hypervisor parameters
4044 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
4046 for os_name, hvs in self.op.os_hvp.items():
4047 if os_name not in self.new_os_hvp:
4048 self.new_os_hvp[os_name] = hvs
4050 for hv_name, hv_dict in hvs.items():
4051 if hv_name not in self.new_os_hvp[os_name]:
4052 self.new_os_hvp[os_name][hv_name] = hv_dict
4054 self.new_os_hvp[os_name][hv_name].update(hv_dict)
4057 self.new_osp = objects.FillDict(cluster.osparams, {})
4058 if self.op.osparams:
4059 for os_name, osp in self.op.osparams.items():
4060 if os_name not in self.new_osp:
4061 self.new_osp[os_name] = {}
4063 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
4066 if not self.new_osp[os_name]:
4067 # we removed all parameters
4068 del self.new_osp[os_name]
4070 # check the parameter validity (remote check)
4071 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
4072 os_name, self.new_osp[os_name])
4074 # changes to the hypervisor list
4075 if self.op.enabled_hypervisors is not None:
4076 self.hv_list = self.op.enabled_hypervisors
4077 for hv in self.hv_list:
4078 # if the hypervisor doesn't already exist in the cluster
4079 # hvparams, we initialize it to empty, and then (in both
4080 # cases) we make sure to fill the defaults, as we might not
4081 # have a complete defaults list if the hypervisor wasn't
4083 if hv not in new_hvp:
4085 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
4086 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
4088 self.hv_list = cluster.enabled_hypervisors
4090 if self.op.hvparams or self.op.enabled_hypervisors is not None:
4091 # either the enabled list has changed, or the parameters have, validate
4092 for hv_name, hv_params in self.new_hvparams.items():
4093 if ((self.op.hvparams and hv_name in self.op.hvparams) or
4094 (self.op.enabled_hypervisors and
4095 hv_name in self.op.enabled_hypervisors)):
4096 # either this is a new hypervisor, or its parameters have changed
4097 hv_class = hypervisor.GetHypervisor(hv_name)
4098 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4099 hv_class.CheckParameterSyntax(hv_params)
4100 _CheckHVParams(self, node_list, hv_name, hv_params)
4103 # no need to check any newly-enabled hypervisors, since the
4104 # defaults have already been checked in the above code-block
4105 for os_name, os_hvp in self.new_os_hvp.items():
4106 for hv_name, hv_params in os_hvp.items():
4107 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4108 # we need to fill in the new os_hvp on top of the actual hv_p
4109 cluster_defaults = self.new_hvparams.get(hv_name, {})
4110 new_osp = objects.FillDict(cluster_defaults, hv_params)
4111 hv_class = hypervisor.GetHypervisor(hv_name)
4112 hv_class.CheckParameterSyntax(new_osp)
4113 _CheckHVParams(self, node_list, hv_name, new_osp)
4115 if self.op.default_iallocator:
4116 alloc_script = utils.FindFile(self.op.default_iallocator,
4117 constants.IALLOCATOR_SEARCH_PATH,
4119 if alloc_script is None:
4120 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
4121 " specified" % self.op.default_iallocator,
4124 def Exec(self, feedback_fn):
4125 """Change the parameters of the cluster.
4128 if self.op.vg_name is not None:
4129 new_volume = self.op.vg_name
4132 if new_volume != self.cfg.GetVGName():
4133 self.cfg.SetVGName(new_volume)
4135 feedback_fn("Cluster LVM configuration already in desired"
4136 " state, not changing")
4137 if self.op.drbd_helper is not None:
4138 new_helper = self.op.drbd_helper
4141 if new_helper != self.cfg.GetDRBDHelper():
4142 self.cfg.SetDRBDHelper(new_helper)
4144 feedback_fn("Cluster DRBD helper already in desired state,"
4146 if self.op.hvparams:
4147 self.cluster.hvparams = self.new_hvparams
4149 self.cluster.os_hvp = self.new_os_hvp
4150 if self.op.enabled_hypervisors is not None:
4151 self.cluster.hvparams = self.new_hvparams
4152 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
4153 if self.op.beparams:
4154 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
4155 if self.op.nicparams:
4156 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
4158 self.cluster.ipolicy = self.new_ipolicy
4159 if self.op.osparams:
4160 self.cluster.osparams = self.new_osp
4161 if self.op.ndparams:
4162 self.cluster.ndparams = self.new_ndparams
4163 if self.op.diskparams:
4164 self.cluster.diskparams = self.new_diskparams
4165 if self.op.hv_state:
4166 self.cluster.hv_state_static = self.new_hv_state
4167 if self.op.disk_state:
4168 self.cluster.disk_state_static = self.new_disk_state
4170 if self.op.candidate_pool_size is not None:
4171 self.cluster.candidate_pool_size = self.op.candidate_pool_size
4172 # we need to update the pool size here, otherwise the save will fail
4173 _AdjustCandidatePool(self, [])
4175 if self.op.maintain_node_health is not None:
4176 if self.op.maintain_node_health and not constants.ENABLE_CONFD:
4177 feedback_fn("Note: CONFD was disabled at build time, node health"
4178 " maintenance is not useful (still enabling it)")
4179 self.cluster.maintain_node_health = self.op.maintain_node_health
4181 if self.op.prealloc_wipe_disks is not None:
4182 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
4184 if self.op.add_uids is not None:
4185 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
4187 if self.op.remove_uids is not None:
4188 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
4190 if self.op.uid_pool is not None:
4191 self.cluster.uid_pool = self.op.uid_pool
4193 if self.op.default_iallocator is not None:
4194 self.cluster.default_iallocator = self.op.default_iallocator
4196 if self.op.reserved_lvs is not None:
4197 self.cluster.reserved_lvs = self.op.reserved_lvs
4199 if self.op.use_external_mip_script is not None:
4200 self.cluster.use_external_mip_script = self.op.use_external_mip_script
4202 def helper_os(aname, mods, desc):
4204 lst = getattr(self.cluster, aname)
4205 for key, val in mods:
4206 if key == constants.DDM_ADD:
4208 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
4211 elif key == constants.DDM_REMOVE:
4215 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
4217 raise errors.ProgrammerError("Invalid modification '%s'" % key)
4219 if self.op.hidden_os:
4220 helper_os("hidden_os", self.op.hidden_os, "hidden")
4222 if self.op.blacklisted_os:
4223 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
4225 if self.op.master_netdev:
4226 master_params = self.cfg.GetMasterNetworkParameters()
4227 ems = self.cfg.GetUseExternalMipScript()
4228 feedback_fn("Shutting down master ip on the current netdev (%s)" %
4229 self.cluster.master_netdev)
4230 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4232 result.Raise("Could not disable the master ip")
4233 feedback_fn("Changing master_netdev from %s to %s" %
4234 (master_params.netdev, self.op.master_netdev))
4235 self.cluster.master_netdev = self.op.master_netdev
4237 if self.op.master_netmask:
4238 master_params = self.cfg.GetMasterNetworkParameters()
4239 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
4240 result = self.rpc.call_node_change_master_netmask(master_params.name,
4241 master_params.netmask,
4242 self.op.master_netmask,
4244 master_params.netdev)
4246 msg = "Could not change the master IP netmask: %s" % result.fail_msg
4249 self.cluster.master_netmask = self.op.master_netmask
4251 self.cfg.Update(self.cluster, feedback_fn)
4253 if self.op.master_netdev:
4254 master_params = self.cfg.GetMasterNetworkParameters()
4255 feedback_fn("Starting the master ip on the new master netdev (%s)" %
4256 self.op.master_netdev)
4257 ems = self.cfg.GetUseExternalMipScript()
4258 result = self.rpc.call_node_activate_master_ip(master_params.name,
4261 self.LogWarning("Could not re-enable the master ip on"
4262 " the master, please restart manually: %s",
4266 def _UploadHelper(lu, nodes, fname):
4267 """Helper for uploading a file and showing warnings.
4270 if os.path.exists(fname):
4271 result = lu.rpc.call_upload_file(nodes, fname)
4272 for to_node, to_result in result.items():
4273 msg = to_result.fail_msg
4275 msg = ("Copy of file %s to node %s failed: %s" %
4276 (fname, to_node, msg))
4277 lu.proc.LogWarning(msg)
4280 def _ComputeAncillaryFiles(cluster, redist):
4281 """Compute files external to Ganeti which need to be consistent.
4283 @type redist: boolean
4284 @param redist: Whether to include files which need to be redistributed
4287 # Compute files for all nodes
4289 constants.SSH_KNOWN_HOSTS_FILE,
4290 constants.CONFD_HMAC_KEY,
4291 constants.CLUSTER_DOMAIN_SECRET_FILE,
4292 constants.SPICE_CERT_FILE,
4293 constants.SPICE_CACERT_FILE,
4294 constants.RAPI_USERS_FILE,
4298 files_all.update(constants.ALL_CERT_FILES)
4299 files_all.update(ssconf.SimpleStore().GetFileList())
4301 # we need to ship at least the RAPI certificate
4302 files_all.add(constants.RAPI_CERT_FILE)
4304 if cluster.modify_etc_hosts:
4305 files_all.add(constants.ETC_HOSTS)
4307 # Files which are optional, these must:
4308 # - be present in one other category as well
4309 # - either exist or not exist on all nodes of that category (mc, vm all)
4311 constants.RAPI_USERS_FILE,
4314 # Files which should only be on master candidates
4318 files_mc.add(constants.CLUSTER_CONF_FILE)
4320 # FIXME: this should also be replicated but Ganeti doesn't support files_mc
4322 files_mc.add(constants.DEFAULT_MASTER_SETUP_SCRIPT)
4324 # Files which should only be on VM-capable nodes
4325 files_vm = set(filename
4326 for hv_name in cluster.enabled_hypervisors
4327 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
4329 files_opt |= set(filename
4330 for hv_name in cluster.enabled_hypervisors
4331 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
4333 # Filenames in each category must be unique
4334 all_files_set = files_all | files_mc | files_vm
4335 assert (len(all_files_set) ==
4336 sum(map(len, [files_all, files_mc, files_vm]))), \
4337 "Found file listed in more than one file list"
4339 # Optional files must be present in one other category
4340 assert all_files_set.issuperset(files_opt), \
4341 "Optional file not in a different required list"
4343 return (files_all, files_opt, files_mc, files_vm)
4346 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
4347 """Distribute additional files which are part of the cluster configuration.
4349 ConfigWriter takes care of distributing the config and ssconf files, but
4350 there are more files which should be distributed to all nodes. This function
4351 makes sure those are copied.
4353 @param lu: calling logical unit
4354 @param additional_nodes: list of nodes not in the config to distribute to
4355 @type additional_vm: boolean
4356 @param additional_vm: whether the additional nodes are vm-capable or not
4359 # Gather target nodes
4360 cluster = lu.cfg.GetClusterInfo()
4361 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
4363 online_nodes = lu.cfg.GetOnlineNodeList()
4364 vm_nodes = lu.cfg.GetVmCapableNodeList()
4366 if additional_nodes is not None:
4367 online_nodes.extend(additional_nodes)
4369 vm_nodes.extend(additional_nodes)
4371 # Never distribute to master node
4372 for nodelist in [online_nodes, vm_nodes]:
4373 if master_info.name in nodelist:
4374 nodelist.remove(master_info.name)
4377 (files_all, _, files_mc, files_vm) = \
4378 _ComputeAncillaryFiles(cluster, True)
4380 # Never re-distribute configuration file from here
4381 assert not (constants.CLUSTER_CONF_FILE in files_all or
4382 constants.CLUSTER_CONF_FILE in files_vm)
4383 assert not files_mc, "Master candidates not handled in this function"
4386 (online_nodes, files_all),
4387 (vm_nodes, files_vm),
4391 for (node_list, files) in filemap:
4393 _UploadHelper(lu, node_list, fname)
4396 class LUClusterRedistConf(NoHooksLU):
4397 """Force the redistribution of cluster configuration.
4399 This is a very simple LU.
4404 def ExpandNames(self):
4405 self.needed_locks = {
4406 locking.LEVEL_NODE: locking.ALL_SET,
4408 self.share_locks[locking.LEVEL_NODE] = 1
4410 def Exec(self, feedback_fn):
4411 """Redistribute the configuration.
4414 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
4415 _RedistributeAncillaryFiles(self)
4418 class LUClusterActivateMasterIp(NoHooksLU):
4419 """Activate the master IP on the master node.
4422 def Exec(self, feedback_fn):
4423 """Activate the master IP.
4426 master_params = self.cfg.GetMasterNetworkParameters()
4427 ems = self.cfg.GetUseExternalMipScript()
4428 result = self.rpc.call_node_activate_master_ip(master_params.name,
4430 result.Raise("Could not activate the master IP")
4433 class LUClusterDeactivateMasterIp(NoHooksLU):
4434 """Deactivate the master IP on the master node.
4437 def Exec(self, feedback_fn):
4438 """Deactivate the master IP.
4441 master_params = self.cfg.GetMasterNetworkParameters()
4442 ems = self.cfg.GetUseExternalMipScript()
4443 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4445 result.Raise("Could not deactivate the master IP")
4448 def _WaitForSync(lu, instance, disks=None, oneshot=False):
4449 """Sleep and poll for an instance's disk to sync.
4452 if not instance.disks or disks is not None and not disks:
4455 disks = _ExpandCheckDisks(instance, disks)
4458 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
4460 node = instance.primary_node
4463 lu.cfg.SetDiskID(dev, node)
4465 # TODO: Convert to utils.Retry
4468 degr_retries = 10 # in seconds, as we sleep 1 second each time
4472 cumul_degraded = False
4473 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
4474 msg = rstats.fail_msg
4476 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
4479 raise errors.RemoteError("Can't contact node %s for mirror data,"
4480 " aborting." % node)
4483 rstats = rstats.payload
4485 for i, mstat in enumerate(rstats):
4487 lu.LogWarning("Can't compute data for node %s/%s",
4488 node, disks[i].iv_name)
4491 cumul_degraded = (cumul_degraded or
4492 (mstat.is_degraded and mstat.sync_percent is None))
4493 if mstat.sync_percent is not None:
4495 if mstat.estimated_time is not None:
4496 rem_time = ("%s remaining (estimated)" %
4497 utils.FormatSeconds(mstat.estimated_time))
4498 max_time = mstat.estimated_time
4500 rem_time = "no time estimate"
4501 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
4502 (disks[i].iv_name, mstat.sync_percent, rem_time))
4504 # if we're done but degraded, let's do a few small retries, to
4505 # make sure we see a stable and not transient situation; therefore
4506 # we force restart of the loop
4507 if (done or oneshot) and cumul_degraded and degr_retries > 0:
4508 logging.info("Degraded disks found, %d retries left", degr_retries)
4516 time.sleep(min(60, max_time))
4519 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
4520 return not cumul_degraded
4523 def _BlockdevFind(lu, node, dev, instance):
4524 """Wrapper around call_blockdev_find to annotate diskparams.
4526 @param lu: A reference to the lu object
4527 @param node: The node to call out
4528 @param dev: The device to find
4529 @param instance: The instance object the device belongs to
4530 @returns The result of the rpc call
4533 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4534 return lu.rpc.call_blockdev_find(node, disk)
4537 def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4538 """Wrapper around L{_CheckDiskConsistencyInner}.
4541 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4542 return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
4546 def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
4548 """Check that mirrors are not degraded.
4550 @attention: The device has to be annotated already.
4552 The ldisk parameter, if True, will change the test from the
4553 is_degraded attribute (which represents overall non-ok status for
4554 the device(s)) to the ldisk (representing the local storage status).
4557 lu.cfg.SetDiskID(dev, node)
4561 if on_primary or dev.AssembleOnSecondary():
4562 rstats = lu.rpc.call_blockdev_find(node, dev)
4563 msg = rstats.fail_msg
4565 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
4567 elif not rstats.payload:
4568 lu.LogWarning("Can't find disk on node %s", node)
4572 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
4574 result = result and not rstats.payload.is_degraded
4577 for child in dev.children:
4578 result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
4584 class LUOobCommand(NoHooksLU):
4585 """Logical unit for OOB handling.
4589 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
4591 def ExpandNames(self):
4592 """Gather locks we need.
4595 if self.op.node_names:
4596 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
4597 lock_names = self.op.node_names
4599 lock_names = locking.ALL_SET
4601 self.needed_locks = {
4602 locking.LEVEL_NODE: lock_names,
4605 def CheckPrereq(self):
4606 """Check prerequisites.
4609 - the node exists in the configuration
4612 Any errors are signaled by raising errors.OpPrereqError.
4616 self.master_node = self.cfg.GetMasterNode()
4618 assert self.op.power_delay >= 0.0
4620 if self.op.node_names:
4621 if (self.op.command in self._SKIP_MASTER and
4622 self.master_node in self.op.node_names):
4623 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
4624 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
4626 if master_oob_handler:
4627 additional_text = ("run '%s %s %s' if you want to operate on the"
4628 " master regardless") % (master_oob_handler,
4632 additional_text = "it does not support out-of-band operations"
4634 raise errors.OpPrereqError(("Operating on the master node %s is not"
4635 " allowed for %s; %s") %
4636 (self.master_node, self.op.command,
4637 additional_text), errors.ECODE_INVAL)
4639 self.op.node_names = self.cfg.GetNodeList()
4640 if self.op.command in self._SKIP_MASTER:
4641 self.op.node_names.remove(self.master_node)
4643 if self.op.command in self._SKIP_MASTER:
4644 assert self.master_node not in self.op.node_names
4646 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
4648 raise errors.OpPrereqError("Node %s not found" % node_name,
4651 self.nodes.append(node)
4653 if (not self.op.ignore_status and
4654 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
4655 raise errors.OpPrereqError(("Cannot power off node %s because it is"
4656 " not marked offline") % node_name,
4659 def Exec(self, feedback_fn):
4660 """Execute OOB and return result if we expect any.
4663 master_node = self.master_node
4666 for idx, node in enumerate(utils.NiceSort(self.nodes,
4667 key=lambda node: node.name)):
4668 node_entry = [(constants.RS_NORMAL, node.name)]
4669 ret.append(node_entry)
4671 oob_program = _SupportsOob(self.cfg, node)
4674 node_entry.append((constants.RS_UNAVAIL, None))
4677 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4678 self.op.command, oob_program, node.name)
4679 result = self.rpc.call_run_oob(master_node, oob_program,
4680 self.op.command, node.name,
4684 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4685 node.name, result.fail_msg)
4686 node_entry.append((constants.RS_NODATA, None))
4689 self._CheckPayload(result)
4690 except errors.OpExecError, err:
4691 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4693 node_entry.append((constants.RS_NODATA, None))
4695 if self.op.command == constants.OOB_HEALTH:
4696 # For health we should log important events
4697 for item, status in result.payload:
4698 if status in [constants.OOB_STATUS_WARNING,
4699 constants.OOB_STATUS_CRITICAL]:
4700 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4701 item, node.name, status)
4703 if self.op.command == constants.OOB_POWER_ON:
4705 elif self.op.command == constants.OOB_POWER_OFF:
4706 node.powered = False
4707 elif self.op.command == constants.OOB_POWER_STATUS:
4708 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4709 if powered != node.powered:
4710 logging.warning(("Recorded power state (%s) of node '%s' does not"
4711 " match actual power state (%s)"), node.powered,
4714 # For configuration changing commands we should update the node
4715 if self.op.command in (constants.OOB_POWER_ON,
4716 constants.OOB_POWER_OFF):
4717 self.cfg.Update(node, feedback_fn)
4719 node_entry.append((constants.RS_NORMAL, result.payload))
4721 if (self.op.command == constants.OOB_POWER_ON and
4722 idx < len(self.nodes) - 1):
4723 time.sleep(self.op.power_delay)
4727 def _CheckPayload(self, result):
4728 """Checks if the payload is valid.
4730 @param result: RPC result
4731 @raises errors.OpExecError: If payload is not valid
4735 if self.op.command == constants.OOB_HEALTH:
4736 if not isinstance(result.payload, list):
4737 errs.append("command 'health' is expected to return a list but got %s" %
4738 type(result.payload))
4740 for item, status in result.payload:
4741 if status not in constants.OOB_STATUSES:
4742 errs.append("health item '%s' has invalid status '%s'" %
4745 if self.op.command == constants.OOB_POWER_STATUS:
4746 if not isinstance(result.payload, dict):
4747 errs.append("power-status is expected to return a dict but got %s" %
4748 type(result.payload))
4750 if self.op.command in [
4751 constants.OOB_POWER_ON,
4752 constants.OOB_POWER_OFF,
4753 constants.OOB_POWER_CYCLE,
4755 if result.payload is not None:
4756 errs.append("%s is expected to not return payload but got '%s'" %
4757 (self.op.command, result.payload))
4760 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4761 utils.CommaJoin(errs))
4764 class _OsQuery(_QueryBase):
4765 FIELDS = query.OS_FIELDS
4767 def ExpandNames(self, lu):
4768 # Lock all nodes in shared mode
4769 # Temporary removal of locks, should be reverted later
4770 # TODO: reintroduce locks when they are lighter-weight
4771 lu.needed_locks = {}
4772 #self.share_locks[locking.LEVEL_NODE] = 1
4773 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4775 # The following variables interact with _QueryBase._GetNames
4777 self.wanted = self.names
4779 self.wanted = locking.ALL_SET
4781 self.do_locking = self.use_locking
4783 def DeclareLocks(self, lu, level):
4787 def _DiagnoseByOS(rlist):
4788 """Remaps a per-node return list into an a per-os per-node dictionary
4790 @param rlist: a map with node names as keys and OS objects as values
4793 @return: a dictionary with osnames as keys and as value another
4794 map, with nodes as keys and tuples of (path, status, diagnose,
4795 variants, parameters, api_versions) as values, eg::
4797 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4798 (/srv/..., False, "invalid api")],
4799 "node2": [(/srv/..., True, "", [], [])]}
4804 # we build here the list of nodes that didn't fail the RPC (at RPC
4805 # level), so that nodes with a non-responding node daemon don't
4806 # make all OSes invalid
4807 good_nodes = [node_name for node_name in rlist
4808 if not rlist[node_name].fail_msg]
4809 for node_name, nr in rlist.items():
4810 if nr.fail_msg or not nr.payload:
4812 for (name, path, status, diagnose, variants,
4813 params, api_versions) in nr.payload:
4814 if name not in all_os:
4815 # build a list of nodes for this os containing empty lists
4816 # for each node in node_list
4818 for nname in good_nodes:
4819 all_os[name][nname] = []
4820 # convert params from [name, help] to (name, help)
4821 params = [tuple(v) for v in params]
4822 all_os[name][node_name].append((path, status, diagnose,
4823 variants, params, api_versions))
4826 def _GetQueryData(self, lu):
4827 """Computes the list of nodes and their attributes.
4830 # Locking is not used
4831 assert not (compat.any(lu.glm.is_owned(level)
4832 for level in locking.LEVELS
4833 if level != locking.LEVEL_CLUSTER) or
4834 self.do_locking or self.use_locking)
4836 valid_nodes = [node.name
4837 for node in lu.cfg.GetAllNodesInfo().values()
4838 if not node.offline and node.vm_capable]
4839 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4840 cluster = lu.cfg.GetClusterInfo()
4844 for (os_name, os_data) in pol.items():
4845 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4846 hidden=(os_name in cluster.hidden_os),
4847 blacklisted=(os_name in cluster.blacklisted_os))
4851 api_versions = set()
4853 for idx, osl in enumerate(os_data.values()):
4854 info.valid = bool(info.valid and osl and osl[0][1])
4858 (node_variants, node_params, node_api) = osl[0][3:6]
4861 variants.update(node_variants)
4862 parameters.update(node_params)
4863 api_versions.update(node_api)
4865 # Filter out inconsistent values
4866 variants.intersection_update(node_variants)
4867 parameters.intersection_update(node_params)
4868 api_versions.intersection_update(node_api)
4870 info.variants = list(variants)
4871 info.parameters = list(parameters)
4872 info.api_versions = list(api_versions)
4874 data[os_name] = info
4876 # Prepare data in requested order
4877 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4881 class LUOsDiagnose(NoHooksLU):
4882 """Logical unit for OS diagnose/query.
4888 def _BuildFilter(fields, names):
4889 """Builds a filter for querying OSes.
4892 name_filter = qlang.MakeSimpleFilter("name", names)
4894 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4895 # respective field is not requested
4896 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4897 for fname in ["hidden", "blacklisted"]
4898 if fname not in fields]
4899 if "valid" not in fields:
4900 status_filter.append([qlang.OP_TRUE, "valid"])
4903 status_filter.insert(0, qlang.OP_AND)
4905 status_filter = None
4907 if name_filter and status_filter:
4908 return [qlang.OP_AND, name_filter, status_filter]
4912 return status_filter
4914 def CheckArguments(self):
4915 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4916 self.op.output_fields, False)
4918 def ExpandNames(self):
4919 self.oq.ExpandNames(self)
4921 def Exec(self, feedback_fn):
4922 return self.oq.OldStyleQuery(self)
4925 class LUNodeRemove(LogicalUnit):
4926 """Logical unit for removing a node.
4929 HPATH = "node-remove"
4930 HTYPE = constants.HTYPE_NODE
4932 def BuildHooksEnv(self):
4937 "OP_TARGET": self.op.node_name,
4938 "NODE_NAME": self.op.node_name,
4941 def BuildHooksNodes(self):
4942 """Build hooks nodes.
4944 This doesn't run on the target node in the pre phase as a failed
4945 node would then be impossible to remove.
4948 all_nodes = self.cfg.GetNodeList()
4950 all_nodes.remove(self.op.node_name)
4953 return (all_nodes, all_nodes)
4955 def CheckPrereq(self):
4956 """Check prerequisites.
4959 - the node exists in the configuration
4960 - it does not have primary or secondary instances
4961 - it's not the master
4963 Any errors are signaled by raising errors.OpPrereqError.
4966 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4967 node = self.cfg.GetNodeInfo(self.op.node_name)
4968 assert node is not None
4970 masternode = self.cfg.GetMasterNode()
4971 if node.name == masternode:
4972 raise errors.OpPrereqError("Node is the master node, failover to another"
4973 " node is required", errors.ECODE_INVAL)
4975 for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
4976 if node.name in instance.all_nodes:
4977 raise errors.OpPrereqError("Instance %s is still running on the node,"
4978 " please remove first" % instance_name,
4980 self.op.node_name = node.name
4983 def Exec(self, feedback_fn):
4984 """Removes the node from the cluster.
4988 logging.info("Stopping the node daemon and removing configs from node %s",
4991 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
4993 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
4996 # Promote nodes to master candidate as needed
4997 _AdjustCandidatePool(self, exceptions=[node.name])
4998 self.context.RemoveNode(node.name)
5000 # Run post hooks on the node before it's removed
5001 _RunPostHook(self, node.name)
5003 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
5004 msg = result.fail_msg
5006 self.LogWarning("Errors encountered on the remote node while leaving"
5007 " the cluster: %s", msg)
5009 # Remove node from our /etc/hosts
5010 if self.cfg.GetClusterInfo().modify_etc_hosts:
5011 master_node = self.cfg.GetMasterNode()
5012 result = self.rpc.call_etc_hosts_modify(master_node,
5013 constants.ETC_HOSTS_REMOVE,
5015 result.Raise("Can't update hosts file with new host data")
5016 _RedistributeAncillaryFiles(self)
5019 class _NodeQuery(_QueryBase):
5020 FIELDS = query.NODE_FIELDS
5022 def ExpandNames(self, lu):
5023 lu.needed_locks = {}
5024 lu.share_locks = _ShareAll()
5027 self.wanted = _GetWantedNodes(lu, self.names)
5029 self.wanted = locking.ALL_SET
5031 self.do_locking = (self.use_locking and
5032 query.NQ_LIVE in self.requested_data)
5035 # If any non-static field is requested we need to lock the nodes
5036 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
5038 def DeclareLocks(self, lu, level):
5041 def _GetQueryData(self, lu):
5042 """Computes the list of nodes and their attributes.
5045 all_info = lu.cfg.GetAllNodesInfo()
5047 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
5049 # Gather data as requested
5050 if query.NQ_LIVE in self.requested_data:
5051 # filter out non-vm_capable nodes
5052 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
5054 node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
5055 [lu.cfg.GetHypervisorType()])
5056 live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
5057 for (name, nresult) in node_data.items()
5058 if not nresult.fail_msg and nresult.payload)
5062 if query.NQ_INST in self.requested_data:
5063 node_to_primary = dict([(name, set()) for name in nodenames])
5064 node_to_secondary = dict([(name, set()) for name in nodenames])
5066 inst_data = lu.cfg.GetAllInstancesInfo()
5068 for inst in inst_data.values():
5069 if inst.primary_node in node_to_primary:
5070 node_to_primary[inst.primary_node].add(inst.name)
5071 for secnode in inst.secondary_nodes:
5072 if secnode in node_to_secondary:
5073 node_to_secondary[secnode].add(inst.name)
5075 node_to_primary = None
5076 node_to_secondary = None
5078 if query.NQ_OOB in self.requested_data:
5079 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
5080 for name, node in all_info.iteritems())
5084 if query.NQ_GROUP in self.requested_data:
5085 groups = lu.cfg.GetAllNodeGroupsInfo()
5089 return query.NodeQueryData([all_info[name] for name in nodenames],
5090 live_data, lu.cfg.GetMasterNode(),
5091 node_to_primary, node_to_secondary, groups,
5092 oob_support, lu.cfg.GetClusterInfo())
5095 class LUNodeQuery(NoHooksLU):
5096 """Logical unit for querying nodes.
5099 # pylint: disable=W0142
5102 def CheckArguments(self):
5103 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
5104 self.op.output_fields, self.op.use_locking)
5106 def ExpandNames(self):
5107 self.nq.ExpandNames(self)
5109 def DeclareLocks(self, level):
5110 self.nq.DeclareLocks(self, level)
5112 def Exec(self, feedback_fn):
5113 return self.nq.OldStyleQuery(self)
5116 class LUNodeQueryvols(NoHooksLU):
5117 """Logical unit for getting volumes on node(s).
5121 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
5122 _FIELDS_STATIC = utils.FieldSet("node")
5124 def CheckArguments(self):
5125 _CheckOutputFields(static=self._FIELDS_STATIC,
5126 dynamic=self._FIELDS_DYNAMIC,
5127 selected=self.op.output_fields)
5129 def ExpandNames(self):
5130 self.share_locks = _ShareAll()
5131 self.needed_locks = {}
5133 if not self.op.nodes:
5134 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5136 self.needed_locks[locking.LEVEL_NODE] = \
5137 _GetWantedNodes(self, self.op.nodes)
5139 def Exec(self, feedback_fn):
5140 """Computes the list of nodes and their attributes.
5143 nodenames = self.owned_locks(locking.LEVEL_NODE)
5144 volumes = self.rpc.call_node_volumes(nodenames)
5146 ilist = self.cfg.GetAllInstancesInfo()
5147 vol2inst = _MapInstanceDisksToNodes(ilist.values())
5150 for node in nodenames:
5151 nresult = volumes[node]
5154 msg = nresult.fail_msg
5156 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
5159 node_vols = sorted(nresult.payload,
5160 key=operator.itemgetter("dev"))
5162 for vol in node_vols:
5164 for field in self.op.output_fields:
5167 elif field == "phys":
5171 elif field == "name":
5173 elif field == "size":
5174 val = int(float(vol["size"]))
5175 elif field == "instance":
5176 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
5178 raise errors.ParameterError(field)
5179 node_output.append(str(val))
5181 output.append(node_output)
5186 class LUNodeQueryStorage(NoHooksLU):
5187 """Logical unit for getting information on storage units on node(s).
5190 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
5193 def CheckArguments(self):
5194 _CheckOutputFields(static=self._FIELDS_STATIC,
5195 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
5196 selected=self.op.output_fields)
5198 def ExpandNames(self):
5199 self.share_locks = _ShareAll()
5200 self.needed_locks = {}
5203 self.needed_locks[locking.LEVEL_NODE] = \
5204 _GetWantedNodes(self, self.op.nodes)
5206 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5208 def Exec(self, feedback_fn):
5209 """Computes the list of nodes and their attributes.
5212 self.nodes = self.owned_locks(locking.LEVEL_NODE)
5214 # Always get name to sort by
5215 if constants.SF_NAME in self.op.output_fields:
5216 fields = self.op.output_fields[:]
5218 fields = [constants.SF_NAME] + self.op.output_fields
5220 # Never ask for node or type as it's only known to the LU
5221 for extra in [constants.SF_NODE, constants.SF_TYPE]:
5222 while extra in fields:
5223 fields.remove(extra)
5225 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
5226 name_idx = field_idx[constants.SF_NAME]
5228 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5229 data = self.rpc.call_storage_list(self.nodes,
5230 self.op.storage_type, st_args,
5231 self.op.name, fields)
5235 for node in utils.NiceSort(self.nodes):
5236 nresult = data[node]
5240 msg = nresult.fail_msg
5242 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
5245 rows = dict([(row[name_idx], row) for row in nresult.payload])
5247 for name in utils.NiceSort(rows.keys()):
5252 for field in self.op.output_fields:
5253 if field == constants.SF_NODE:
5255 elif field == constants.SF_TYPE:
5256 val = self.op.storage_type
5257 elif field in field_idx:
5258 val = row[field_idx[field]]
5260 raise errors.ParameterError(field)
5269 class _InstanceQuery(_QueryBase):
5270 FIELDS = query.INSTANCE_FIELDS
5272 def ExpandNames(self, lu):
5273 lu.needed_locks = {}
5274 lu.share_locks = _ShareAll()
5277 self.wanted = _GetWantedInstances(lu, self.names)
5279 self.wanted = locking.ALL_SET
5281 self.do_locking = (self.use_locking and
5282 query.IQ_LIVE in self.requested_data)
5284 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5285 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
5286 lu.needed_locks[locking.LEVEL_NODE] = []
5287 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5289 self.do_grouplocks = (self.do_locking and
5290 query.IQ_NODES in self.requested_data)
5292 def DeclareLocks(self, lu, level):
5294 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
5295 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
5297 # Lock all groups used by instances optimistically; this requires going
5298 # via the node before it's locked, requiring verification later on
5299 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
5301 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
5302 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
5303 elif level == locking.LEVEL_NODE:
5304 lu._LockInstancesNodes() # pylint: disable=W0212
5307 def _CheckGroupLocks(lu):
5308 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
5309 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
5311 # Check if node groups for locked instances are still correct
5312 for instance_name in owned_instances:
5313 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
5315 def _GetQueryData(self, lu):
5316 """Computes the list of instances and their attributes.
5319 if self.do_grouplocks:
5320 self._CheckGroupLocks(lu)
5322 cluster = lu.cfg.GetClusterInfo()
5323 all_info = lu.cfg.GetAllInstancesInfo()
5325 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
5327 instance_list = [all_info[name] for name in instance_names]
5328 nodes = frozenset(itertools.chain(*(inst.all_nodes
5329 for inst in instance_list)))
5330 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5333 wrongnode_inst = set()
5335 # Gather data as requested
5336 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
5338 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
5340 result = node_data[name]
5342 # offline nodes will be in both lists
5343 assert result.fail_msg
5344 offline_nodes.append(name)
5346 bad_nodes.append(name)
5347 elif result.payload:
5348 for inst in result.payload:
5349 if inst in all_info:
5350 if all_info[inst].primary_node == name:
5351 live_data.update(result.payload)
5353 wrongnode_inst.add(inst)
5355 # orphan instance; we don't list it here as we don't
5356 # handle this case yet in the output of instance listing
5357 logging.warning("Orphan instance '%s' found on node %s",
5359 # else no instance is alive
5363 if query.IQ_DISKUSAGE in self.requested_data:
5364 disk_usage = dict((inst.name,
5365 _ComputeDiskSize(inst.disk_template,
5366 [{constants.IDISK_SIZE: disk.size}
5367 for disk in inst.disks]))
5368 for inst in instance_list)
5372 if query.IQ_CONSOLE in self.requested_data:
5374 for inst in instance_list:
5375 if inst.name in live_data:
5376 # Instance is running
5377 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
5379 consinfo[inst.name] = None
5380 assert set(consinfo.keys()) == set(instance_names)
5384 if query.IQ_NODES in self.requested_data:
5385 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
5387 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
5388 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
5389 for uuid in set(map(operator.attrgetter("group"),
5395 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
5396 disk_usage, offline_nodes, bad_nodes,
5397 live_data, wrongnode_inst, consinfo,
5401 class LUQuery(NoHooksLU):
5402 """Query for resources/items of a certain kind.
5405 # pylint: disable=W0142
5408 def CheckArguments(self):
5409 qcls = _GetQueryImplementation(self.op.what)
5411 self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
5413 def ExpandNames(self):
5414 self.impl.ExpandNames(self)
5416 def DeclareLocks(self, level):
5417 self.impl.DeclareLocks(self, level)
5419 def Exec(self, feedback_fn):
5420 return self.impl.NewStyleQuery(self)
5423 class LUQueryFields(NoHooksLU):
5424 """Query for resources/items of a certain kind.
5427 # pylint: disable=W0142
5430 def CheckArguments(self):
5431 self.qcls = _GetQueryImplementation(self.op.what)
5433 def ExpandNames(self):
5434 self.needed_locks = {}
5436 def Exec(self, feedback_fn):
5437 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
5440 class LUNodeModifyStorage(NoHooksLU):
5441 """Logical unit for modifying a storage volume on a node.
5446 def CheckArguments(self):
5447 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5449 storage_type = self.op.storage_type
5452 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
5454 raise errors.OpPrereqError("Storage units of type '%s' can not be"
5455 " modified" % storage_type,
5458 diff = set(self.op.changes.keys()) - modifiable
5460 raise errors.OpPrereqError("The following fields can not be modified for"
5461 " storage units of type '%s': %r" %
5462 (storage_type, list(diff)),
5465 def ExpandNames(self):
5466 self.needed_locks = {
5467 locking.LEVEL_NODE: self.op.node_name,
5470 def Exec(self, feedback_fn):
5471 """Computes the list of nodes and their attributes.
5474 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5475 result = self.rpc.call_storage_modify(self.op.node_name,
5476 self.op.storage_type, st_args,
5477 self.op.name, self.op.changes)
5478 result.Raise("Failed to modify storage unit '%s' on %s" %
5479 (self.op.name, self.op.node_name))
5482 class LUNodeAdd(LogicalUnit):
5483 """Logical unit for adding node to the cluster.
5487 HTYPE = constants.HTYPE_NODE
5488 _NFLAGS = ["master_capable", "vm_capable"]
5490 def CheckArguments(self):
5491 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
5492 # validate/normalize the node name
5493 self.hostname = netutils.GetHostname(name=self.op.node_name,
5494 family=self.primary_ip_family)
5495 self.op.node_name = self.hostname.name
5497 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
5498 raise errors.OpPrereqError("Cannot readd the master node",
5501 if self.op.readd and self.op.group:
5502 raise errors.OpPrereqError("Cannot pass a node group when a node is"
5503 " being readded", errors.ECODE_INVAL)
5505 def BuildHooksEnv(self):
5508 This will run on all nodes before, and on all nodes + the new node after.
5512 "OP_TARGET": self.op.node_name,
5513 "NODE_NAME": self.op.node_name,
5514 "NODE_PIP": self.op.primary_ip,
5515 "NODE_SIP": self.op.secondary_ip,
5516 "MASTER_CAPABLE": str(self.op.master_capable),
5517 "VM_CAPABLE": str(self.op.vm_capable),
5520 def BuildHooksNodes(self):
5521 """Build hooks nodes.
5524 # Exclude added node
5525 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
5526 post_nodes = pre_nodes + [self.op.node_name, ]
5528 return (pre_nodes, post_nodes)
5530 def CheckPrereq(self):
5531 """Check prerequisites.
5534 - the new node is not already in the config
5536 - its parameters (single/dual homed) matches the cluster
5538 Any errors are signaled by raising errors.OpPrereqError.
5542 hostname = self.hostname
5543 node = hostname.name
5544 primary_ip = self.op.primary_ip = hostname.ip
5545 if self.op.secondary_ip is None:
5546 if self.primary_ip_family == netutils.IP6Address.family:
5547 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
5548 " IPv4 address must be given as secondary",
5550 self.op.secondary_ip = primary_ip
5552 secondary_ip = self.op.secondary_ip
5553 if not netutils.IP4Address.IsValid(secondary_ip):
5554 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5555 " address" % secondary_ip, errors.ECODE_INVAL)
5557 node_list = cfg.GetNodeList()
5558 if not self.op.readd and node in node_list:
5559 raise errors.OpPrereqError("Node %s is already in the configuration" %
5560 node, errors.ECODE_EXISTS)
5561 elif self.op.readd and node not in node_list:
5562 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
5565 self.changed_primary_ip = False
5567 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
5568 if self.op.readd and node == existing_node_name:
5569 if existing_node.secondary_ip != secondary_ip:
5570 raise errors.OpPrereqError("Readded node doesn't have the same IP"
5571 " address configuration as before",
5573 if existing_node.primary_ip != primary_ip:
5574 self.changed_primary_ip = True
5578 if (existing_node.primary_ip == primary_ip or
5579 existing_node.secondary_ip == primary_ip or
5580 existing_node.primary_ip == secondary_ip or
5581 existing_node.secondary_ip == secondary_ip):
5582 raise errors.OpPrereqError("New node ip address(es) conflict with"
5583 " existing node %s" % existing_node.name,
5584 errors.ECODE_NOTUNIQUE)
5586 # After this 'if' block, None is no longer a valid value for the
5587 # _capable op attributes
5589 old_node = self.cfg.GetNodeInfo(node)
5590 assert old_node is not None, "Can't retrieve locked node %s" % node
5591 for attr in self._NFLAGS:
5592 if getattr(self.op, attr) is None:
5593 setattr(self.op, attr, getattr(old_node, attr))
5595 for attr in self._NFLAGS:
5596 if getattr(self.op, attr) is None:
5597 setattr(self.op, attr, True)
5599 if self.op.readd and not self.op.vm_capable:
5600 pri, sec = cfg.GetNodeInstances(node)
5602 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
5603 " flag set to false, but it already holds"
5604 " instances" % node,
5607 # check that the type of the node (single versus dual homed) is the
5608 # same as for the master
5609 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
5610 master_singlehomed = myself.secondary_ip == myself.primary_ip
5611 newbie_singlehomed = secondary_ip == primary_ip
5612 if master_singlehomed != newbie_singlehomed:
5613 if master_singlehomed:
5614 raise errors.OpPrereqError("The master has no secondary ip but the"
5615 " new node has one",
5618 raise errors.OpPrereqError("The master has a secondary ip but the"
5619 " new node doesn't have one",
5622 # checks reachability
5623 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
5624 raise errors.OpPrereqError("Node not reachable by ping",
5625 errors.ECODE_ENVIRON)
5627 if not newbie_singlehomed:
5628 # check reachability from my secondary ip to newbie's secondary ip
5629 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5630 source=myself.secondary_ip):
5631 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5632 " based ping to node daemon port",
5633 errors.ECODE_ENVIRON)
5640 if self.op.master_capable:
5641 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
5643 self.master_candidate = False
5646 self.new_node = old_node
5648 node_group = cfg.LookupNodeGroup(self.op.group)
5649 self.new_node = objects.Node(name=node,
5650 primary_ip=primary_ip,
5651 secondary_ip=secondary_ip,
5652 master_candidate=self.master_candidate,
5653 offline=False, drained=False,
5656 if self.op.ndparams:
5657 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
5659 if self.op.hv_state:
5660 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
5662 if self.op.disk_state:
5663 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
5665 # TODO: If we need to have multiple DnsOnlyRunner we probably should make
5666 # it a property on the base class.
5667 result = rpc.DnsOnlyRunner().call_version([node])[node]
5668 result.Raise("Can't get version information from node %s" % node)
5669 if constants.PROTOCOL_VERSION == result.payload:
5670 logging.info("Communication to node %s fine, sw version %s match",
5671 node, result.payload)
5673 raise errors.OpPrereqError("Version mismatch master version %s,"
5674 " node version %s" %
5675 (constants.PROTOCOL_VERSION, result.payload),
5676 errors.ECODE_ENVIRON)
5678 def Exec(self, feedback_fn):
5679 """Adds the new node to the cluster.
5682 new_node = self.new_node
5683 node = new_node.name
5685 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5688 # We adding a new node so we assume it's powered
5689 new_node.powered = True
5691 # for re-adds, reset the offline/drained/master-candidate flags;
5692 # we need to reset here, otherwise offline would prevent RPC calls
5693 # later in the procedure; this also means that if the re-add
5694 # fails, we are left with a non-offlined, broken node
5696 new_node.drained = new_node.offline = False # pylint: disable=W0201
5697 self.LogInfo("Readding a node, the offline/drained flags were reset")
5698 # if we demote the node, we do cleanup later in the procedure
5699 new_node.master_candidate = self.master_candidate
5700 if self.changed_primary_ip:
5701 new_node.primary_ip = self.op.primary_ip
5703 # copy the master/vm_capable flags
5704 for attr in self._NFLAGS:
5705 setattr(new_node, attr, getattr(self.op, attr))
5707 # notify the user about any possible mc promotion
5708 if new_node.master_candidate:
5709 self.LogInfo("Node will be a master candidate")
5711 if self.op.ndparams:
5712 new_node.ndparams = self.op.ndparams
5714 new_node.ndparams = {}
5716 if self.op.hv_state:
5717 new_node.hv_state_static = self.new_hv_state
5719 if self.op.disk_state:
5720 new_node.disk_state_static = self.new_disk_state
5722 # Add node to our /etc/hosts, and add key to known_hosts
5723 if self.cfg.GetClusterInfo().modify_etc_hosts:
5724 master_node = self.cfg.GetMasterNode()
5725 result = self.rpc.call_etc_hosts_modify(master_node,
5726 constants.ETC_HOSTS_ADD,
5729 result.Raise("Can't update hosts file with new host data")
5731 if new_node.secondary_ip != new_node.primary_ip:
5732 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5735 node_verify_list = [self.cfg.GetMasterNode()]
5736 node_verify_param = {
5737 constants.NV_NODELIST: ([node], {}),
5738 # TODO: do a node-net-test as well?
5741 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5742 self.cfg.GetClusterName())
5743 for verifier in node_verify_list:
5744 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5745 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5747 for failed in nl_payload:
5748 feedback_fn("ssh/hostname verification failed"
5749 " (checking from %s): %s" %
5750 (verifier, nl_payload[failed]))
5751 raise errors.OpExecError("ssh/hostname verification failed")
5754 _RedistributeAncillaryFiles(self)
5755 self.context.ReaddNode(new_node)
5756 # make sure we redistribute the config
5757 self.cfg.Update(new_node, feedback_fn)
5758 # and make sure the new node will not have old files around
5759 if not new_node.master_candidate:
5760 result = self.rpc.call_node_demote_from_mc(new_node.name)
5761 msg = result.fail_msg
5763 self.LogWarning("Node failed to demote itself from master"
5764 " candidate status: %s" % msg)
5766 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5767 additional_vm=self.op.vm_capable)
5768 self.context.AddNode(new_node, self.proc.GetECId())
5771 class LUNodeSetParams(LogicalUnit):
5772 """Modifies the parameters of a node.
5774 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5775 to the node role (as _ROLE_*)
5776 @cvar _R2F: a dictionary from node role to tuples of flags
5777 @cvar _FLAGS: a list of attribute names corresponding to the flags
5780 HPATH = "node-modify"
5781 HTYPE = constants.HTYPE_NODE
5783 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5785 (True, False, False): _ROLE_CANDIDATE,
5786 (False, True, False): _ROLE_DRAINED,
5787 (False, False, True): _ROLE_OFFLINE,
5788 (False, False, False): _ROLE_REGULAR,
5790 _R2F = dict((v, k) for k, v in _F2R.items())
5791 _FLAGS = ["master_candidate", "drained", "offline"]
5793 def CheckArguments(self):
5794 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5795 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5796 self.op.master_capable, self.op.vm_capable,
5797 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
5799 if all_mods.count(None) == len(all_mods):
5800 raise errors.OpPrereqError("Please pass at least one modification",
5802 if all_mods.count(True) > 1:
5803 raise errors.OpPrereqError("Can't set the node into more than one"
5804 " state at the same time",
5807 # Boolean value that tells us whether we might be demoting from MC
5808 self.might_demote = (self.op.master_candidate == False or
5809 self.op.offline == True or
5810 self.op.drained == True or
5811 self.op.master_capable == False)
5813 if self.op.secondary_ip:
5814 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5815 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5816 " address" % self.op.secondary_ip,
5819 self.lock_all = self.op.auto_promote and self.might_demote
5820 self.lock_instances = self.op.secondary_ip is not None
5822 def _InstanceFilter(self, instance):
5823 """Filter for getting affected instances.
5826 return (instance.disk_template in constants.DTS_INT_MIRROR and
5827 self.op.node_name in instance.all_nodes)
5829 def ExpandNames(self):
5831 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5833 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5835 # Since modifying a node can have severe effects on currently running
5836 # operations the resource lock is at least acquired in shared mode
5837 self.needed_locks[locking.LEVEL_NODE_RES] = \
5838 self.needed_locks[locking.LEVEL_NODE]
5840 # Get node resource and instance locks in shared mode; they are not used
5841 # for anything but read-only access
5842 self.share_locks[locking.LEVEL_NODE_RES] = 1
5843 self.share_locks[locking.LEVEL_INSTANCE] = 1
5845 if self.lock_instances:
5846 self.needed_locks[locking.LEVEL_INSTANCE] = \
5847 frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
5849 def BuildHooksEnv(self):
5852 This runs on the master node.
5856 "OP_TARGET": self.op.node_name,
5857 "MASTER_CANDIDATE": str(self.op.master_candidate),
5858 "OFFLINE": str(self.op.offline),
5859 "DRAINED": str(self.op.drained),
5860 "MASTER_CAPABLE": str(self.op.master_capable),
5861 "VM_CAPABLE": str(self.op.vm_capable),
5864 def BuildHooksNodes(self):
5865 """Build hooks nodes.
5868 nl = [self.cfg.GetMasterNode(), self.op.node_name]
5871 def CheckPrereq(self):
5872 """Check prerequisites.
5874 This only checks the instance list against the existing names.
5877 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5879 if self.lock_instances:
5880 affected_instances = \
5881 self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
5883 # Verify instance locks
5884 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
5885 wanted_instances = frozenset(affected_instances.keys())
5886 if wanted_instances - owned_instances:
5887 raise errors.OpPrereqError("Instances affected by changing node %s's"
5888 " secondary IP address have changed since"
5889 " locks were acquired, wanted '%s', have"
5890 " '%s'; retry the operation" %
5892 utils.CommaJoin(wanted_instances),
5893 utils.CommaJoin(owned_instances)),
5896 affected_instances = None
5898 if (self.op.master_candidate is not None or
5899 self.op.drained is not None or
5900 self.op.offline is not None):
5901 # we can't change the master's node flags
5902 if self.op.node_name == self.cfg.GetMasterNode():
5903 raise errors.OpPrereqError("The master role can be changed"
5904 " only via master-failover",
5907 if self.op.master_candidate and not node.master_capable:
5908 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5909 " it a master candidate" % node.name,
5912 if self.op.vm_capable == False:
5913 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5915 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5916 " the vm_capable flag" % node.name,
5919 if node.master_candidate and self.might_demote and not self.lock_all:
5920 assert not self.op.auto_promote, "auto_promote set but lock_all not"
5921 # check if after removing the current node, we're missing master
5923 (mc_remaining, mc_should, _) = \
5924 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5925 if mc_remaining < mc_should:
5926 raise errors.OpPrereqError("Not enough master candidates, please"
5927 " pass auto promote option to allow"
5928 " promotion (--auto-promote or RAPI"
5929 " auto_promote=True)", errors.ECODE_STATE)
5931 self.old_flags = old_flags = (node.master_candidate,
5932 node.drained, node.offline)
5933 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5934 self.old_role = old_role = self._F2R[old_flags]
5936 # Check for ineffective changes
5937 for attr in self._FLAGS:
5938 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5939 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5940 setattr(self.op, attr, None)
5942 # Past this point, any flag change to False means a transition
5943 # away from the respective state, as only real changes are kept
5945 # TODO: We might query the real power state if it supports OOB
5946 if _SupportsOob(self.cfg, node):
5947 if self.op.offline is False and not (node.powered or
5948 self.op.powered == True):
5949 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5950 " offline status can be reset") %
5952 elif self.op.powered is not None:
5953 raise errors.OpPrereqError(("Unable to change powered state for node %s"
5954 " as it does not support out-of-band"
5955 " handling") % self.op.node_name)
5957 # If we're being deofflined/drained, we'll MC ourself if needed
5958 if (self.op.drained == False or self.op.offline == False or
5959 (self.op.master_capable and not node.master_capable)):
5960 if _DecideSelfPromotion(self):
5961 self.op.master_candidate = True
5962 self.LogInfo("Auto-promoting node to master candidate")
5964 # If we're no longer master capable, we'll demote ourselves from MC
5965 if self.op.master_capable == False and node.master_candidate:
5966 self.LogInfo("Demoting from master candidate")
5967 self.op.master_candidate = False
5970 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5971 if self.op.master_candidate:
5972 new_role = self._ROLE_CANDIDATE
5973 elif self.op.drained:
5974 new_role = self._ROLE_DRAINED
5975 elif self.op.offline:
5976 new_role = self._ROLE_OFFLINE
5977 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5978 # False is still in new flags, which means we're un-setting (the
5980 new_role = self._ROLE_REGULAR
5981 else: # no new flags, nothing, keep old role
5984 self.new_role = new_role
5986 if old_role == self._ROLE_OFFLINE and new_role != old_role:
5987 # Trying to transition out of offline status
5988 result = self.rpc.call_version([node.name])[node.name]
5990 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5991 " to report its version: %s" %
5992 (node.name, result.fail_msg),
5995 self.LogWarning("Transitioning node from offline to online state"
5996 " without using re-add. Please make sure the node"
5999 # When changing the secondary ip, verify if this is a single-homed to
6000 # multi-homed transition or vice versa, and apply the relevant
6002 if self.op.secondary_ip:
6003 # Ok even without locking, because this can't be changed by any LU
6004 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
6005 master_singlehomed = master.secondary_ip == master.primary_ip
6006 if master_singlehomed and self.op.secondary_ip != node.primary_ip:
6007 if self.op.force and node.name == master.name:
6008 self.LogWarning("Transitioning from single-homed to multi-homed"
6009 " cluster. All nodes will require a secondary ip.")
6011 raise errors.OpPrereqError("Changing the secondary ip on a"
6012 " single-homed cluster requires the"
6013 " --force option to be passed, and the"
6014 " target node to be the master",
6016 elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
6017 if self.op.force and node.name == master.name:
6018 self.LogWarning("Transitioning from multi-homed to single-homed"
6019 " cluster. Secondary IPs will have to be removed.")
6021 raise errors.OpPrereqError("Cannot set the secondary IP to be the"
6022 " same as the primary IP on a multi-homed"
6023 " cluster, unless the --force option is"
6024 " passed, and the target node is the"
6025 " master", errors.ECODE_INVAL)
6027 assert not (frozenset(affected_instances) -
6028 self.owned_locks(locking.LEVEL_INSTANCE))
6031 if affected_instances:
6032 raise errors.OpPrereqError("Cannot change secondary IP address:"
6033 " offline node has instances (%s)"
6034 " configured to use it" %
6035 utils.CommaJoin(affected_instances.keys()))
6037 # On online nodes, check that no instances are running, and that
6038 # the node has the new ip and we can reach it.
6039 for instance in affected_instances.values():
6040 _CheckInstanceState(self, instance, INSTANCE_DOWN,
6041 msg="cannot change secondary ip")
6043 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
6044 if master.name != node.name:
6045 # check reachability from master secondary ip to new secondary ip
6046 if not netutils.TcpPing(self.op.secondary_ip,
6047 constants.DEFAULT_NODED_PORT,
6048 source=master.secondary_ip):
6049 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
6050 " based ping to node daemon port",
6051 errors.ECODE_ENVIRON)
6053 if self.op.ndparams:
6054 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
6055 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
6056 self.new_ndparams = new_ndparams
6058 if self.op.hv_state:
6059 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
6060 self.node.hv_state_static)
6062 if self.op.disk_state:
6063 self.new_disk_state = \
6064 _MergeAndVerifyDiskState(self.op.disk_state,
6065 self.node.disk_state_static)
6067 def Exec(self, feedback_fn):
6072 old_role = self.old_role
6073 new_role = self.new_role
6077 if self.op.ndparams:
6078 node.ndparams = self.new_ndparams
6080 if self.op.powered is not None:
6081 node.powered = self.op.powered
6083 if self.op.hv_state:
6084 node.hv_state_static = self.new_hv_state
6086 if self.op.disk_state:
6087 node.disk_state_static = self.new_disk_state
6089 for attr in ["master_capable", "vm_capable"]:
6090 val = getattr(self.op, attr)
6092 setattr(node, attr, val)
6093 result.append((attr, str(val)))
6095 if new_role != old_role:
6096 # Tell the node to demote itself, if no longer MC and not offline
6097 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
6098 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
6100 self.LogWarning("Node failed to demote itself: %s", msg)
6102 new_flags = self._R2F[new_role]
6103 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
6105 result.append((desc, str(nf)))
6106 (node.master_candidate, node.drained, node.offline) = new_flags
6108 # we locked all nodes, we adjust the CP before updating this node
6110 _AdjustCandidatePool(self, [node.name])
6112 if self.op.secondary_ip:
6113 node.secondary_ip = self.op.secondary_ip
6114 result.append(("secondary_ip", self.op.secondary_ip))
6116 # this will trigger configuration file update, if needed
6117 self.cfg.Update(node, feedback_fn)
6119 # this will trigger job queue propagation or cleanup if the mc
6121 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
6122 self.context.ReaddNode(node)
6127 class LUNodePowercycle(NoHooksLU):
6128 """Powercycles a node.
6133 def CheckArguments(self):
6134 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6135 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
6136 raise errors.OpPrereqError("The node is the master and the force"
6137 " parameter was not set",
6140 def ExpandNames(self):
6141 """Locking for PowercycleNode.
6143 This is a last-resort option and shouldn't block on other
6144 jobs. Therefore, we grab no locks.
6147 self.needed_locks = {}
6149 def Exec(self, feedback_fn):
6153 result = self.rpc.call_node_powercycle(self.op.node_name,
6154 self.cfg.GetHypervisorType())
6155 result.Raise("Failed to schedule the reboot")
6156 return result.payload
6159 class LUClusterQuery(NoHooksLU):
6160 """Query cluster configuration.
6165 def ExpandNames(self):
6166 self.needed_locks = {}
6168 def Exec(self, feedback_fn):
6169 """Return cluster config.
6172 cluster = self.cfg.GetClusterInfo()
6175 # Filter just for enabled hypervisors
6176 for os_name, hv_dict in cluster.os_hvp.items():
6177 os_hvp[os_name] = {}
6178 for hv_name, hv_params in hv_dict.items():
6179 if hv_name in cluster.enabled_hypervisors:
6180 os_hvp[os_name][hv_name] = hv_params
6182 # Convert ip_family to ip_version
6183 primary_ip_version = constants.IP4_VERSION
6184 if cluster.primary_ip_family == netutils.IP6Address.family:
6185 primary_ip_version = constants.IP6_VERSION
6188 "software_version": constants.RELEASE_VERSION,
6189 "protocol_version": constants.PROTOCOL_VERSION,
6190 "config_version": constants.CONFIG_VERSION,
6191 "os_api_version": max(constants.OS_API_VERSIONS),
6192 "export_version": constants.EXPORT_VERSION,
6193 "architecture": runtime.GetArchInfo(),
6194 "name": cluster.cluster_name,
6195 "master": cluster.master_node,
6196 "default_hypervisor": cluster.primary_hypervisor,
6197 "enabled_hypervisors": cluster.enabled_hypervisors,
6198 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
6199 for hypervisor_name in cluster.enabled_hypervisors]),
6201 "beparams": cluster.beparams,
6202 "osparams": cluster.osparams,
6203 "ipolicy": cluster.ipolicy,
6204 "nicparams": cluster.nicparams,
6205 "ndparams": cluster.ndparams,
6206 "diskparams": cluster.diskparams,
6207 "candidate_pool_size": cluster.candidate_pool_size,
6208 "master_netdev": cluster.master_netdev,
6209 "master_netmask": cluster.master_netmask,
6210 "use_external_mip_script": cluster.use_external_mip_script,
6211 "volume_group_name": cluster.volume_group_name,
6212 "drbd_usermode_helper": cluster.drbd_usermode_helper,
6213 "file_storage_dir": cluster.file_storage_dir,
6214 "shared_file_storage_dir": cluster.shared_file_storage_dir,
6215 "maintain_node_health": cluster.maintain_node_health,
6216 "ctime": cluster.ctime,
6217 "mtime": cluster.mtime,
6218 "uuid": cluster.uuid,
6219 "tags": list(cluster.GetTags()),
6220 "uid_pool": cluster.uid_pool,
6221 "default_iallocator": cluster.default_iallocator,
6222 "reserved_lvs": cluster.reserved_lvs,
6223 "primary_ip_version": primary_ip_version,
6224 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
6225 "hidden_os": cluster.hidden_os,
6226 "blacklisted_os": cluster.blacklisted_os,
6232 class LUClusterConfigQuery(NoHooksLU):
6233 """Return configuration values.
6238 def CheckArguments(self):
6239 self.cq = _ClusterQuery(None, self.op.output_fields, False)
6241 def ExpandNames(self):
6242 self.cq.ExpandNames(self)
6244 def DeclareLocks(self, level):
6245 self.cq.DeclareLocks(self, level)
6247 def Exec(self, feedback_fn):
6248 result = self.cq.OldStyleQuery(self)
6250 assert len(result) == 1
6255 class _ClusterQuery(_QueryBase):
6256 FIELDS = query.CLUSTER_FIELDS
6258 #: Do not sort (there is only one item)
6261 def ExpandNames(self, lu):
6262 lu.needed_locks = {}
6264 # The following variables interact with _QueryBase._GetNames
6265 self.wanted = locking.ALL_SET
6266 self.do_locking = self.use_locking
6269 raise errors.OpPrereqError("Can not use locking for cluster queries",
6272 def DeclareLocks(self, lu, level):
6275 def _GetQueryData(self, lu):
6276 """Computes the list of nodes and their attributes.
6279 # Locking is not used
6280 assert not (compat.any(lu.glm.is_owned(level)
6281 for level in locking.LEVELS
6282 if level != locking.LEVEL_CLUSTER) or
6283 self.do_locking or self.use_locking)
6285 if query.CQ_CONFIG in self.requested_data:
6286 cluster = lu.cfg.GetClusterInfo()
6288 cluster = NotImplemented
6290 if query.CQ_QUEUE_DRAINED in self.requested_data:
6291 drain_flag = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
6293 drain_flag = NotImplemented
6295 if query.CQ_WATCHER_PAUSE in self.requested_data:
6296 watcher_pause = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
6298 watcher_pause = NotImplemented
6300 return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
6303 class LUInstanceActivateDisks(NoHooksLU):
6304 """Bring up an instance's disks.
6309 def ExpandNames(self):
6310 self._ExpandAndLockInstance()
6311 self.needed_locks[locking.LEVEL_NODE] = []
6312 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6314 def DeclareLocks(self, level):
6315 if level == locking.LEVEL_NODE:
6316 self._LockInstancesNodes()
6318 def CheckPrereq(self):
6319 """Check prerequisites.
6321 This checks that the instance is in the cluster.
6324 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6325 assert self.instance is not None, \
6326 "Cannot retrieve locked instance %s" % self.op.instance_name
6327 _CheckNodeOnline(self, self.instance.primary_node)
6329 def Exec(self, feedback_fn):
6330 """Activate the disks.
6333 disks_ok, disks_info = \
6334 _AssembleInstanceDisks(self, self.instance,
6335 ignore_size=self.op.ignore_size)
6337 raise errors.OpExecError("Cannot activate block devices")
6339 if self.op.wait_for_sync:
6340 if not _WaitForSync(self, self.instance):
6341 raise errors.OpExecError("Some disks of the instance are degraded!")
6346 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
6348 """Prepare the block devices for an instance.
6350 This sets up the block devices on all nodes.
6352 @type lu: L{LogicalUnit}
6353 @param lu: the logical unit on whose behalf we execute
6354 @type instance: L{objects.Instance}
6355 @param instance: the instance for whose disks we assemble
6356 @type disks: list of L{objects.Disk} or None
6357 @param disks: which disks to assemble (or all, if None)
6358 @type ignore_secondaries: boolean
6359 @param ignore_secondaries: if true, errors on secondary nodes
6360 won't result in an error return from the function
6361 @type ignore_size: boolean
6362 @param ignore_size: if true, the current known size of the disk
6363 will not be used during the disk activation, useful for cases
6364 when the size is wrong
6365 @return: False if the operation failed, otherwise a list of
6366 (host, instance_visible_name, node_visible_name)
6367 with the mapping from node devices to instance devices
6372 iname = instance.name
6373 disks = _ExpandCheckDisks(instance, disks)
6375 # With the two passes mechanism we try to reduce the window of
6376 # opportunity for the race condition of switching DRBD to primary
6377 # before handshaking occured, but we do not eliminate it
6379 # The proper fix would be to wait (with some limits) until the
6380 # connection has been made and drbd transitions from WFConnection
6381 # into any other network-connected state (Connected, SyncTarget,
6384 # 1st pass, assemble on all nodes in secondary mode
6385 for idx, inst_disk in enumerate(disks):
6386 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6388 node_disk = node_disk.Copy()
6389 node_disk.UnsetSize()
6390 lu.cfg.SetDiskID(node_disk, node)
6391 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6393 msg = result.fail_msg
6395 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6396 " (is_primary=False, pass=1): %s",
6397 inst_disk.iv_name, node, msg)
6398 if not ignore_secondaries:
6401 # FIXME: race condition on drbd migration to primary
6403 # 2nd pass, do only the primary node
6404 for idx, inst_disk in enumerate(disks):
6407 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6408 if node != instance.primary_node:
6411 node_disk = node_disk.Copy()
6412 node_disk.UnsetSize()
6413 lu.cfg.SetDiskID(node_disk, node)
6414 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6416 msg = result.fail_msg
6418 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6419 " (is_primary=True, pass=2): %s",
6420 inst_disk.iv_name, node, msg)
6423 dev_path = result.payload
6425 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
6427 # leave the disks configured for the primary node
6428 # this is a workaround that would be fixed better by
6429 # improving the logical/physical id handling
6431 lu.cfg.SetDiskID(disk, instance.primary_node)
6433 return disks_ok, device_info
6436 def _StartInstanceDisks(lu, instance, force):
6437 """Start the disks of an instance.
6440 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
6441 ignore_secondaries=force)
6443 _ShutdownInstanceDisks(lu, instance)
6444 if force is not None and not force:
6445 lu.proc.LogWarning("", hint="If the message above refers to a"
6447 " you can retry the operation using '--force'.")
6448 raise errors.OpExecError("Disk consistency error")
6451 class LUInstanceDeactivateDisks(NoHooksLU):
6452 """Shutdown an instance's disks.
6457 def ExpandNames(self):
6458 self._ExpandAndLockInstance()
6459 self.needed_locks[locking.LEVEL_NODE] = []
6460 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6462 def DeclareLocks(self, level):
6463 if level == locking.LEVEL_NODE:
6464 self._LockInstancesNodes()
6466 def CheckPrereq(self):
6467 """Check prerequisites.
6469 This checks that the instance is in the cluster.
6472 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6473 assert self.instance is not None, \
6474 "Cannot retrieve locked instance %s" % self.op.instance_name
6476 def Exec(self, feedback_fn):
6477 """Deactivate the disks
6480 instance = self.instance
6482 _ShutdownInstanceDisks(self, instance)
6484 _SafeShutdownInstanceDisks(self, instance)
6487 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
6488 """Shutdown block devices of an instance.
6490 This function checks if an instance is running, before calling
6491 _ShutdownInstanceDisks.
6494 _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
6495 _ShutdownInstanceDisks(lu, instance, disks=disks)
6498 def _ExpandCheckDisks(instance, disks):
6499 """Return the instance disks selected by the disks list
6501 @type disks: list of L{objects.Disk} or None
6502 @param disks: selected disks
6503 @rtype: list of L{objects.Disk}
6504 @return: selected instance disks to act on
6508 return instance.disks
6510 if not set(disks).issubset(instance.disks):
6511 raise errors.ProgrammerError("Can only act on disks belonging to the"
6516 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
6517 """Shutdown block devices of an instance.
6519 This does the shutdown on all nodes of the instance.
6521 If the ignore_primary is false, errors on the primary node are
6526 disks = _ExpandCheckDisks(instance, disks)
6529 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
6530 lu.cfg.SetDiskID(top_disk, node)
6531 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
6532 msg = result.fail_msg
6534 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
6535 disk.iv_name, node, msg)
6536 if ((node == instance.primary_node and not ignore_primary) or
6537 (node != instance.primary_node and not result.offline)):
6542 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
6543 """Checks if a node has enough free memory.
6545 This function check if a given node has the needed amount of free
6546 memory. In case the node has less memory or we cannot get the
6547 information from the node, this function raise an OpPrereqError
6550 @type lu: C{LogicalUnit}
6551 @param lu: a logical unit from which we get configuration data
6553 @param node: the node to check
6554 @type reason: C{str}
6555 @param reason: string to use in the error message
6556 @type requested: C{int}
6557 @param requested: the amount of memory in MiB to check for
6558 @type hypervisor_name: C{str}
6559 @param hypervisor_name: the hypervisor to ask for memory stats
6561 @return: node current free memory
6562 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
6563 we cannot check the node
6566 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6567 nodeinfo[node].Raise("Can't get data from node %s" % node,
6568 prereq=True, ecode=errors.ECODE_ENVIRON)
6569 (_, _, (hv_info, )) = nodeinfo[node].payload
6571 free_mem = hv_info.get("memory_free", None)
6572 if not isinstance(free_mem, int):
6573 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6574 " was '%s'" % (node, free_mem),
6575 errors.ECODE_ENVIRON)
6576 if requested > free_mem:
6577 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
6578 " needed %s MiB, available %s MiB" %
6579 (node, reason, requested, free_mem),
6584 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
6585 """Checks if nodes have enough free disk space in the all VGs.
6587 This function check if all given nodes have the needed amount of
6588 free disk. In case any node has less disk or we cannot get the
6589 information from the node, this function raise an OpPrereqError
6592 @type lu: C{LogicalUnit}
6593 @param lu: a logical unit from which we get configuration data
6594 @type nodenames: C{list}
6595 @param nodenames: the list of node names to check
6596 @type req_sizes: C{dict}
6597 @param req_sizes: the hash of vg and corresponding amount of disk in
6599 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6600 or we cannot check the node
6603 for vg, req_size in req_sizes.items():
6604 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
6607 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
6608 """Checks if nodes have enough free disk space in the specified VG.
6610 This function check if all given nodes have the needed amount of
6611 free disk. In case any node has less disk or we cannot get the
6612 information from the node, this function raise an OpPrereqError
6615 @type lu: C{LogicalUnit}
6616 @param lu: a logical unit from which we get configuration data
6617 @type nodenames: C{list}
6618 @param nodenames: the list of node names to check
6620 @param vg: the volume group to check
6621 @type requested: C{int}
6622 @param requested: the amount of disk in MiB to check for
6623 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6624 or we cannot check the node
6627 nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6628 for node in nodenames:
6629 info = nodeinfo[node]
6630 info.Raise("Cannot get current information from node %s" % node,
6631 prereq=True, ecode=errors.ECODE_ENVIRON)
6632 (_, (vg_info, ), _) = info.payload
6633 vg_free = vg_info.get("vg_free", None)
6634 if not isinstance(vg_free, int):
6635 raise errors.OpPrereqError("Can't compute free disk space on node"
6636 " %s for vg %s, result was '%s'" %
6637 (node, vg, vg_free), errors.ECODE_ENVIRON)
6638 if requested > vg_free:
6639 raise errors.OpPrereqError("Not enough disk space on target node %s"
6640 " vg %s: required %d MiB, available %d MiB" %
6641 (node, vg, requested, vg_free),
6645 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
6646 """Checks if nodes have enough physical CPUs
6648 This function checks if all given nodes have the needed number of
6649 physical CPUs. In case any node has less CPUs or we cannot get the
6650 information from the node, this function raises an OpPrereqError
6653 @type lu: C{LogicalUnit}
6654 @param lu: a logical unit from which we get configuration data
6655 @type nodenames: C{list}
6656 @param nodenames: the list of node names to check
6657 @type requested: C{int}
6658 @param requested: the minimum acceptable number of physical CPUs
6659 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
6660 or we cannot check the node
6663 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6664 for node in nodenames:
6665 info = nodeinfo[node]
6666 info.Raise("Cannot get current information from node %s" % node,
6667 prereq=True, ecode=errors.ECODE_ENVIRON)
6668 (_, _, (hv_info, )) = info.payload
6669 num_cpus = hv_info.get("cpu_total", None)
6670 if not isinstance(num_cpus, int):
6671 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6672 " on node %s, result was '%s'" %
6673 (node, num_cpus), errors.ECODE_ENVIRON)
6674 if requested > num_cpus:
6675 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
6676 "required" % (node, num_cpus, requested),
6680 class LUInstanceStartup(LogicalUnit):
6681 """Starts an instance.
6684 HPATH = "instance-start"
6685 HTYPE = constants.HTYPE_INSTANCE
6688 def CheckArguments(self):
6690 if self.op.beparams:
6691 # fill the beparams dict
6692 objects.UpgradeBeParams(self.op.beparams)
6693 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6695 def ExpandNames(self):
6696 self._ExpandAndLockInstance()
6697 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
6699 def DeclareLocks(self, level):
6700 if level == locking.LEVEL_NODE_RES:
6701 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
6703 def BuildHooksEnv(self):
6706 This runs on master, primary and secondary nodes of the instance.
6710 "FORCE": self.op.force,
6713 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6717 def BuildHooksNodes(self):
6718 """Build hooks nodes.
6721 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6724 def CheckPrereq(self):
6725 """Check prerequisites.
6727 This checks that the instance is in the cluster.
6730 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6731 assert self.instance is not None, \
6732 "Cannot retrieve locked instance %s" % self.op.instance_name
6735 if self.op.hvparams:
6736 # check hypervisor parameter syntax (locally)
6737 cluster = self.cfg.GetClusterInfo()
6738 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6739 filled_hvp = cluster.FillHV(instance)
6740 filled_hvp.update(self.op.hvparams)
6741 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
6742 hv_type.CheckParameterSyntax(filled_hvp)
6743 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
6745 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6747 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
6749 if self.primary_offline and self.op.ignore_offline_nodes:
6750 self.proc.LogWarning("Ignoring offline primary node")
6752 if self.op.hvparams or self.op.beparams:
6753 self.proc.LogWarning("Overridden parameters are ignored")
6755 _CheckNodeOnline(self, instance.primary_node)
6757 bep = self.cfg.GetClusterInfo().FillBE(instance)
6758 bep.update(self.op.beparams)
6760 # check bridges existence
6761 _CheckInstanceBridgesExist(self, instance)
6763 remote_info = self.rpc.call_instance_info(instance.primary_node,
6765 instance.hypervisor)
6766 remote_info.Raise("Error checking node %s" % instance.primary_node,
6767 prereq=True, ecode=errors.ECODE_ENVIRON)
6768 if not remote_info.payload: # not running already
6769 _CheckNodeFreeMemory(self, instance.primary_node,
6770 "starting instance %s" % instance.name,
6771 bep[constants.BE_MINMEM], instance.hypervisor)
6773 def Exec(self, feedback_fn):
6774 """Start the instance.
6777 instance = self.instance
6778 force = self.op.force
6780 if not self.op.no_remember:
6781 self.cfg.MarkInstanceUp(instance.name)
6783 if self.primary_offline:
6784 assert self.op.ignore_offline_nodes
6785 self.proc.LogInfo("Primary node offline, marked instance as started")
6787 node_current = instance.primary_node
6789 _StartInstanceDisks(self, instance, force)
6792 self.rpc.call_instance_start(node_current,
6793 (instance, self.op.hvparams,
6795 self.op.startup_paused)
6796 msg = result.fail_msg
6798 _ShutdownInstanceDisks(self, instance)
6799 raise errors.OpExecError("Could not start instance: %s" % msg)
6802 class LUInstanceReboot(LogicalUnit):
6803 """Reboot an instance.
6806 HPATH = "instance-reboot"
6807 HTYPE = constants.HTYPE_INSTANCE
6810 def ExpandNames(self):
6811 self._ExpandAndLockInstance()
6813 def BuildHooksEnv(self):
6816 This runs on master, primary and secondary nodes of the instance.
6820 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6821 "REBOOT_TYPE": self.op.reboot_type,
6822 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6825 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6829 def BuildHooksNodes(self):
6830 """Build hooks nodes.
6833 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6836 def CheckPrereq(self):
6837 """Check prerequisites.
6839 This checks that the instance is in the cluster.
6842 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6843 assert self.instance is not None, \
6844 "Cannot retrieve locked instance %s" % self.op.instance_name
6845 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6846 _CheckNodeOnline(self, instance.primary_node)
6848 # check bridges existence
6849 _CheckInstanceBridgesExist(self, instance)
6851 def Exec(self, feedback_fn):
6852 """Reboot the instance.
6855 instance = self.instance
6856 ignore_secondaries = self.op.ignore_secondaries
6857 reboot_type = self.op.reboot_type
6859 remote_info = self.rpc.call_instance_info(instance.primary_node,
6861 instance.hypervisor)
6862 remote_info.Raise("Error checking node %s" % instance.primary_node)
6863 instance_running = bool(remote_info.payload)
6865 node_current = instance.primary_node
6867 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6868 constants.INSTANCE_REBOOT_HARD]:
6869 for disk in instance.disks:
6870 self.cfg.SetDiskID(disk, node_current)
6871 result = self.rpc.call_instance_reboot(node_current, instance,
6873 self.op.shutdown_timeout)
6874 result.Raise("Could not reboot instance")
6876 if instance_running:
6877 result = self.rpc.call_instance_shutdown(node_current, instance,
6878 self.op.shutdown_timeout)
6879 result.Raise("Could not shutdown instance for full reboot")
6880 _ShutdownInstanceDisks(self, instance)
6882 self.LogInfo("Instance %s was already stopped, starting now",
6884 _StartInstanceDisks(self, instance, ignore_secondaries)
6885 result = self.rpc.call_instance_start(node_current,
6886 (instance, None, None), False)
6887 msg = result.fail_msg
6889 _ShutdownInstanceDisks(self, instance)
6890 raise errors.OpExecError("Could not start instance for"
6891 " full reboot: %s" % msg)
6893 self.cfg.MarkInstanceUp(instance.name)
6896 class LUInstanceShutdown(LogicalUnit):
6897 """Shutdown an instance.
6900 HPATH = "instance-stop"
6901 HTYPE = constants.HTYPE_INSTANCE
6904 def ExpandNames(self):
6905 self._ExpandAndLockInstance()
6907 def BuildHooksEnv(self):
6910 This runs on master, primary and secondary nodes of the instance.
6913 env = _BuildInstanceHookEnvByObject(self, self.instance)
6914 env["TIMEOUT"] = self.op.timeout
6917 def BuildHooksNodes(self):
6918 """Build hooks nodes.
6921 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6924 def CheckPrereq(self):
6925 """Check prerequisites.
6927 This checks that the instance is in the cluster.
6930 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6931 assert self.instance is not None, \
6932 "Cannot retrieve locked instance %s" % self.op.instance_name
6934 _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
6936 self.primary_offline = \
6937 self.cfg.GetNodeInfo(self.instance.primary_node).offline
6939 if self.primary_offline and self.op.ignore_offline_nodes:
6940 self.proc.LogWarning("Ignoring offline primary node")
6942 _CheckNodeOnline(self, self.instance.primary_node)
6944 def Exec(self, feedback_fn):
6945 """Shutdown the instance.
6948 instance = self.instance
6949 node_current = instance.primary_node
6950 timeout = self.op.timeout
6952 if not self.op.no_remember:
6953 self.cfg.MarkInstanceDown(instance.name)
6955 if self.primary_offline:
6956 assert self.op.ignore_offline_nodes
6957 self.proc.LogInfo("Primary node offline, marked instance as stopped")
6959 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6960 msg = result.fail_msg
6962 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6964 _ShutdownInstanceDisks(self, instance)
6967 class LUInstanceReinstall(LogicalUnit):
6968 """Reinstall an instance.
6971 HPATH = "instance-reinstall"
6972 HTYPE = constants.HTYPE_INSTANCE
6975 def ExpandNames(self):
6976 self._ExpandAndLockInstance()
6978 def BuildHooksEnv(self):
6981 This runs on master, primary and secondary nodes of the instance.
6984 return _BuildInstanceHookEnvByObject(self, self.instance)
6986 def BuildHooksNodes(self):
6987 """Build hooks nodes.
6990 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6993 def CheckPrereq(self):
6994 """Check prerequisites.
6996 This checks that the instance is in the cluster and is not running.
6999 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7000 assert instance is not None, \
7001 "Cannot retrieve locked instance %s" % self.op.instance_name
7002 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
7003 " offline, cannot reinstall")
7004 for node in instance.secondary_nodes:
7005 _CheckNodeOnline(self, node, "Instance secondary node offline,"
7006 " cannot reinstall")
7008 if instance.disk_template == constants.DT_DISKLESS:
7009 raise errors.OpPrereqError("Instance '%s' has no disks" %
7010 self.op.instance_name,
7012 _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
7014 if self.op.os_type is not None:
7016 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
7017 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
7018 instance_os = self.op.os_type
7020 instance_os = instance.os
7022 nodelist = list(instance.all_nodes)
7024 if self.op.osparams:
7025 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
7026 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
7027 self.os_inst = i_osdict # the new dict (without defaults)
7031 self.instance = instance
7033 def Exec(self, feedback_fn):
7034 """Reinstall the instance.
7037 inst = self.instance
7039 if self.op.os_type is not None:
7040 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
7041 inst.os = self.op.os_type
7042 # Write to configuration
7043 self.cfg.Update(inst, feedback_fn)
7045 _StartInstanceDisks(self, inst, None)
7047 feedback_fn("Running the instance OS create scripts...")
7048 # FIXME: pass debug option from opcode to backend
7049 result = self.rpc.call_instance_os_add(inst.primary_node,
7050 (inst, self.os_inst), True,
7051 self.op.debug_level)
7052 result.Raise("Could not install OS for instance %s on node %s" %
7053 (inst.name, inst.primary_node))
7055 _ShutdownInstanceDisks(self, inst)
7058 class LUInstanceRecreateDisks(LogicalUnit):
7059 """Recreate an instance's missing disks.
7062 HPATH = "instance-recreate-disks"
7063 HTYPE = constants.HTYPE_INSTANCE
7066 _MODIFYABLE = frozenset([
7067 constants.IDISK_SIZE,
7068 constants.IDISK_MODE,
7071 # New or changed disk parameters may have different semantics
7072 assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
7073 constants.IDISK_ADOPT,
7075 # TODO: Implement support changing VG while recreating
7077 constants.IDISK_METAVG,
7080 def CheckArguments(self):
7081 if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
7082 # Normalize and convert deprecated list of disk indices
7083 self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
7085 duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
7087 raise errors.OpPrereqError("Some disks have been specified more than"
7088 " once: %s" % utils.CommaJoin(duplicates),
7091 for (idx, params) in self.op.disks:
7092 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
7093 unsupported = frozenset(params.keys()) - self._MODIFYABLE
7095 raise errors.OpPrereqError("Parameters for disk %s try to change"
7096 " unmodifyable parameter(s): %s" %
7097 (idx, utils.CommaJoin(unsupported)),
7100 def ExpandNames(self):
7101 self._ExpandAndLockInstance()
7102 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7104 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
7105 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
7107 self.needed_locks[locking.LEVEL_NODE] = []
7108 self.needed_locks[locking.LEVEL_NODE_RES] = []
7110 def DeclareLocks(self, level):
7111 if level == locking.LEVEL_NODE:
7112 # if we replace the nodes, we only need to lock the old primary,
7113 # otherwise we need to lock all nodes for disk re-creation
7114 primary_only = bool(self.op.nodes)
7115 self._LockInstancesNodes(primary_only=primary_only)
7116 elif level == locking.LEVEL_NODE_RES:
7118 self.needed_locks[locking.LEVEL_NODE_RES] = \
7119 self.needed_locks[locking.LEVEL_NODE][:]
7121 def BuildHooksEnv(self):
7124 This runs on master, primary and secondary nodes of the instance.
7127 return _BuildInstanceHookEnvByObject(self, self.instance)
7129 def BuildHooksNodes(self):
7130 """Build hooks nodes.
7133 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7136 def CheckPrereq(self):
7137 """Check prerequisites.
7139 This checks that the instance is in the cluster and is not running.
7142 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7143 assert instance is not None, \
7144 "Cannot retrieve locked instance %s" % self.op.instance_name
7146 if len(self.op.nodes) != len(instance.all_nodes):
7147 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
7148 " %d replacement nodes were specified" %
7149 (instance.name, len(instance.all_nodes),
7150 len(self.op.nodes)),
7152 assert instance.disk_template != constants.DT_DRBD8 or \
7153 len(self.op.nodes) == 2
7154 assert instance.disk_template != constants.DT_PLAIN or \
7155 len(self.op.nodes) == 1
7156 primary_node = self.op.nodes[0]
7158 primary_node = instance.primary_node
7159 _CheckNodeOnline(self, primary_node)
7161 if instance.disk_template == constants.DT_DISKLESS:
7162 raise errors.OpPrereqError("Instance '%s' has no disks" %
7163 self.op.instance_name, errors.ECODE_INVAL)
7165 # if we replace nodes *and* the old primary is offline, we don't
7167 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
7168 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE_RES)
7169 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
7170 if not (self.op.nodes and old_pnode.offline):
7171 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7172 msg="cannot recreate disks")
7175 self.disks = dict(self.op.disks)
7177 self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
7179 maxidx = max(self.disks.keys())
7180 if maxidx >= len(instance.disks):
7181 raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
7184 if (self.op.nodes and
7185 sorted(self.disks.keys()) != range(len(instance.disks))):
7186 raise errors.OpPrereqError("Can't recreate disks partially and"
7187 " change the nodes at the same time",
7190 self.instance = instance
7192 def Exec(self, feedback_fn):
7193 """Recreate the disks.
7196 instance = self.instance
7198 assert (self.owned_locks(locking.LEVEL_NODE) ==
7199 self.owned_locks(locking.LEVEL_NODE_RES))
7202 mods = [] # keeps track of needed changes
7204 for idx, disk in enumerate(instance.disks):
7206 changes = self.disks[idx]
7208 # Disk should not be recreated
7212 # update secondaries for disks, if needed
7213 if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
7214 # need to update the nodes and minors
7215 assert len(self.op.nodes) == 2
7216 assert len(disk.logical_id) == 6 # otherwise disk internals
7218 (_, _, old_port, _, _, old_secret) = disk.logical_id
7219 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
7220 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
7221 new_minors[0], new_minors[1], old_secret)
7222 assert len(disk.logical_id) == len(new_id)
7226 mods.append((idx, new_id, changes))
7228 # now that we have passed all asserts above, we can apply the mods
7229 # in a single run (to avoid partial changes)
7230 for idx, new_id, changes in mods:
7231 disk = instance.disks[idx]
7232 if new_id is not None:
7233 assert disk.dev_type == constants.LD_DRBD8
7234 disk.logical_id = new_id
7236 disk.Update(size=changes.get(constants.IDISK_SIZE, None),
7237 mode=changes.get(constants.IDISK_MODE, None))
7239 # change primary node, if needed
7241 instance.primary_node = self.op.nodes[0]
7242 self.LogWarning("Changing the instance's nodes, you will have to"
7243 " remove any disks left on the older nodes manually")
7246 self.cfg.Update(instance, feedback_fn)
7248 _CreateDisks(self, instance, to_skip=to_skip)
7251 class LUInstanceRename(LogicalUnit):
7252 """Rename an instance.
7255 HPATH = "instance-rename"
7256 HTYPE = constants.HTYPE_INSTANCE
7258 def CheckArguments(self):
7262 if self.op.ip_check and not self.op.name_check:
7263 # TODO: make the ip check more flexible and not depend on the name check
7264 raise errors.OpPrereqError("IP address check requires a name check",
7267 def BuildHooksEnv(self):
7270 This runs on master, primary and secondary nodes of the instance.
7273 env = _BuildInstanceHookEnvByObject(self, self.instance)
7274 env["INSTANCE_NEW_NAME"] = self.op.new_name
7277 def BuildHooksNodes(self):
7278 """Build hooks nodes.
7281 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7284 def CheckPrereq(self):
7285 """Check prerequisites.
7287 This checks that the instance is in the cluster and is not running.
7290 self.op.instance_name = _ExpandInstanceName(self.cfg,
7291 self.op.instance_name)
7292 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7293 assert instance is not None
7294 _CheckNodeOnline(self, instance.primary_node)
7295 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7296 msg="cannot rename")
7297 self.instance = instance
7299 new_name = self.op.new_name
7300 if self.op.name_check:
7301 hostname = netutils.GetHostname(name=new_name)
7302 if hostname.name != new_name:
7303 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
7305 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
7306 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
7307 " same as given hostname '%s'") %
7308 (hostname.name, self.op.new_name),
7310 new_name = self.op.new_name = hostname.name
7311 if (self.op.ip_check and
7312 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
7313 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7314 (hostname.ip, new_name),
7315 errors.ECODE_NOTUNIQUE)
7317 instance_list = self.cfg.GetInstanceList()
7318 if new_name in instance_list and new_name != instance.name:
7319 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7320 new_name, errors.ECODE_EXISTS)
7322 def Exec(self, feedback_fn):
7323 """Rename the instance.
7326 inst = self.instance
7327 old_name = inst.name
7329 rename_file_storage = False
7330 if (inst.disk_template in constants.DTS_FILEBASED and
7331 self.op.new_name != inst.name):
7332 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7333 rename_file_storage = True
7335 self.cfg.RenameInstance(inst.name, self.op.new_name)
7336 # Change the instance lock. This is definitely safe while we hold the BGL.
7337 # Otherwise the new lock would have to be added in acquired mode.
7339 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
7340 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
7342 # re-read the instance from the configuration after rename
7343 inst = self.cfg.GetInstanceInfo(self.op.new_name)
7345 if rename_file_storage:
7346 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7347 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
7348 old_file_storage_dir,
7349 new_file_storage_dir)
7350 result.Raise("Could not rename on node %s directory '%s' to '%s'"
7351 " (but the instance has been renamed in Ganeti)" %
7352 (inst.primary_node, old_file_storage_dir,
7353 new_file_storage_dir))
7355 _StartInstanceDisks(self, inst, None)
7357 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
7358 old_name, self.op.debug_level)
7359 msg = result.fail_msg
7361 msg = ("Could not run OS rename script for instance %s on node %s"
7362 " (but the instance has been renamed in Ganeti): %s" %
7363 (inst.name, inst.primary_node, msg))
7364 self.proc.LogWarning(msg)
7366 _ShutdownInstanceDisks(self, inst)
7371 class LUInstanceRemove(LogicalUnit):
7372 """Remove an instance.
7375 HPATH = "instance-remove"
7376 HTYPE = constants.HTYPE_INSTANCE
7379 def ExpandNames(self):
7380 self._ExpandAndLockInstance()
7381 self.needed_locks[locking.LEVEL_NODE] = []
7382 self.needed_locks[locking.LEVEL_NODE_RES] = []
7383 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7385 def DeclareLocks(self, level):
7386 if level == locking.LEVEL_NODE:
7387 self._LockInstancesNodes()
7388 elif level == locking.LEVEL_NODE_RES:
7390 self.needed_locks[locking.LEVEL_NODE_RES] = \
7391 self.needed_locks[locking.LEVEL_NODE][:]
7393 def BuildHooksEnv(self):
7396 This runs on master, primary and secondary nodes of the instance.
7399 env = _BuildInstanceHookEnvByObject(self, self.instance)
7400 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
7403 def BuildHooksNodes(self):
7404 """Build hooks nodes.
7407 nl = [self.cfg.GetMasterNode()]
7408 nl_post = list(self.instance.all_nodes) + nl
7409 return (nl, nl_post)
7411 def CheckPrereq(self):
7412 """Check prerequisites.
7414 This checks that the instance is in the cluster.
7417 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7418 assert self.instance is not None, \
7419 "Cannot retrieve locked instance %s" % self.op.instance_name
7421 def Exec(self, feedback_fn):
7422 """Remove the instance.
7425 instance = self.instance
7426 logging.info("Shutting down instance %s on node %s",
7427 instance.name, instance.primary_node)
7429 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
7430 self.op.shutdown_timeout)
7431 msg = result.fail_msg
7433 if self.op.ignore_failures:
7434 feedback_fn("Warning: can't shutdown instance: %s" % msg)
7436 raise errors.OpExecError("Could not shutdown instance %s on"
7438 (instance.name, instance.primary_node, msg))
7440 assert (self.owned_locks(locking.LEVEL_NODE) ==
7441 self.owned_locks(locking.LEVEL_NODE_RES))
7442 assert not (set(instance.all_nodes) -
7443 self.owned_locks(locking.LEVEL_NODE)), \
7444 "Not owning correct locks"
7446 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
7449 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
7450 """Utility function to remove an instance.
7453 logging.info("Removing block devices for instance %s", instance.name)
7455 if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
7456 if not ignore_failures:
7457 raise errors.OpExecError("Can't remove instance's disks")
7458 feedback_fn("Warning: can't remove instance's disks")
7460 logging.info("Removing instance %s out of cluster config", instance.name)
7462 lu.cfg.RemoveInstance(instance.name)
7464 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
7465 "Instance lock removal conflict"
7467 # Remove lock for the instance
7468 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
7471 class LUInstanceQuery(NoHooksLU):
7472 """Logical unit for querying instances.
7475 # pylint: disable=W0142
7478 def CheckArguments(self):
7479 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
7480 self.op.output_fields, self.op.use_locking)
7482 def ExpandNames(self):
7483 self.iq.ExpandNames(self)
7485 def DeclareLocks(self, level):
7486 self.iq.DeclareLocks(self, level)
7488 def Exec(self, feedback_fn):
7489 return self.iq.OldStyleQuery(self)
7492 class LUInstanceFailover(LogicalUnit):
7493 """Failover an instance.
7496 HPATH = "instance-failover"
7497 HTYPE = constants.HTYPE_INSTANCE
7500 def CheckArguments(self):
7501 """Check the arguments.
7504 self.iallocator = getattr(self.op, "iallocator", None)
7505 self.target_node = getattr(self.op, "target_node", None)
7507 def ExpandNames(self):
7508 self._ExpandAndLockInstance()
7510 if self.op.target_node is not None:
7511 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7513 self.needed_locks[locking.LEVEL_NODE] = []
7514 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7516 self.needed_locks[locking.LEVEL_NODE_RES] = []
7517 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
7519 ignore_consistency = self.op.ignore_consistency
7520 shutdown_timeout = self.op.shutdown_timeout
7521 self._migrater = TLMigrateInstance(self, self.op.instance_name,
7524 ignore_consistency=ignore_consistency,
7525 shutdown_timeout=shutdown_timeout,
7526 ignore_ipolicy=self.op.ignore_ipolicy)
7527 self.tasklets = [self._migrater]
7529 def DeclareLocks(self, level):
7530 if level == locking.LEVEL_NODE:
7531 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7532 if instance.disk_template in constants.DTS_EXT_MIRROR:
7533 if self.op.target_node is None:
7534 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7536 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7537 self.op.target_node]
7538 del self.recalculate_locks[locking.LEVEL_NODE]
7540 self._LockInstancesNodes()
7541 elif level == locking.LEVEL_NODE_RES:
7543 self.needed_locks[locking.LEVEL_NODE_RES] = \
7544 self.needed_locks[locking.LEVEL_NODE][:]
7546 def BuildHooksEnv(self):
7549 This runs on master, primary and secondary nodes of the instance.
7552 instance = self._migrater.instance
7553 source_node = instance.primary_node
7554 target_node = self.op.target_node
7556 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
7557 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7558 "OLD_PRIMARY": source_node,
7559 "NEW_PRIMARY": target_node,
7562 if instance.disk_template in constants.DTS_INT_MIRROR:
7563 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
7564 env["NEW_SECONDARY"] = source_node
7566 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
7568 env.update(_BuildInstanceHookEnvByObject(self, instance))
7572 def BuildHooksNodes(self):
7573 """Build hooks nodes.
7576 instance = self._migrater.instance
7577 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7578 return (nl, nl + [instance.primary_node])
7581 class LUInstanceMigrate(LogicalUnit):
7582 """Migrate an instance.
7584 This is migration without shutting down, compared to the failover,
7585 which is done with shutdown.
7588 HPATH = "instance-migrate"
7589 HTYPE = constants.HTYPE_INSTANCE
7592 def ExpandNames(self):
7593 self._ExpandAndLockInstance()
7595 if self.op.target_node is not None:
7596 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7598 self.needed_locks[locking.LEVEL_NODE] = []
7599 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7601 self.needed_locks[locking.LEVEL_NODE] = []
7602 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7605 TLMigrateInstance(self, self.op.instance_name,
7606 cleanup=self.op.cleanup,
7608 fallback=self.op.allow_failover,
7609 allow_runtime_changes=self.op.allow_runtime_changes,
7610 ignore_ipolicy=self.op.ignore_ipolicy)
7611 self.tasklets = [self._migrater]
7613 def DeclareLocks(self, level):
7614 if level == locking.LEVEL_NODE:
7615 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7616 if instance.disk_template in constants.DTS_EXT_MIRROR:
7617 if self.op.target_node is None:
7618 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7620 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7621 self.op.target_node]
7622 del self.recalculate_locks[locking.LEVEL_NODE]
7624 self._LockInstancesNodes()
7625 elif level == locking.LEVEL_NODE_RES:
7627 self.needed_locks[locking.LEVEL_NODE_RES] = \
7628 self.needed_locks[locking.LEVEL_NODE][:]
7630 def BuildHooksEnv(self):
7633 This runs on master, primary and secondary nodes of the instance.
7636 instance = self._migrater.instance
7637 source_node = instance.primary_node
7638 target_node = self.op.target_node
7639 env = _BuildInstanceHookEnvByObject(self, instance)
7641 "MIGRATE_LIVE": self._migrater.live,
7642 "MIGRATE_CLEANUP": self.op.cleanup,
7643 "OLD_PRIMARY": source_node,
7644 "NEW_PRIMARY": target_node,
7645 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7648 if instance.disk_template in constants.DTS_INT_MIRROR:
7649 env["OLD_SECONDARY"] = target_node
7650 env["NEW_SECONDARY"] = source_node
7652 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
7656 def BuildHooksNodes(self):
7657 """Build hooks nodes.
7660 instance = self._migrater.instance
7661 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7662 return (nl, nl + [instance.primary_node])
7665 class LUInstanceMove(LogicalUnit):
7666 """Move an instance by data-copying.
7669 HPATH = "instance-move"
7670 HTYPE = constants.HTYPE_INSTANCE
7673 def ExpandNames(self):
7674 self._ExpandAndLockInstance()
7675 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7676 self.op.target_node = target_node
7677 self.needed_locks[locking.LEVEL_NODE] = [target_node]
7678 self.needed_locks[locking.LEVEL_NODE_RES] = []
7679 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7681 def DeclareLocks(self, level):
7682 if level == locking.LEVEL_NODE:
7683 self._LockInstancesNodes(primary_only=True)
7684 elif level == locking.LEVEL_NODE_RES:
7686 self.needed_locks[locking.LEVEL_NODE_RES] = \
7687 self.needed_locks[locking.LEVEL_NODE][:]
7689 def BuildHooksEnv(self):
7692 This runs on master, primary and secondary nodes of the instance.
7696 "TARGET_NODE": self.op.target_node,
7697 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7699 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7702 def BuildHooksNodes(self):
7703 """Build hooks nodes.
7707 self.cfg.GetMasterNode(),
7708 self.instance.primary_node,
7709 self.op.target_node,
7713 def CheckPrereq(self):
7714 """Check prerequisites.
7716 This checks that the instance is in the cluster.
7719 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7720 assert self.instance is not None, \
7721 "Cannot retrieve locked instance %s" % self.op.instance_name
7723 node = self.cfg.GetNodeInfo(self.op.target_node)
7724 assert node is not None, \
7725 "Cannot retrieve locked node %s" % self.op.target_node
7727 self.target_node = target_node = node.name
7729 if target_node == instance.primary_node:
7730 raise errors.OpPrereqError("Instance %s is already on the node %s" %
7731 (instance.name, target_node),
7734 bep = self.cfg.GetClusterInfo().FillBE(instance)
7736 for idx, dsk in enumerate(instance.disks):
7737 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
7738 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
7739 " cannot copy" % idx, errors.ECODE_STATE)
7741 _CheckNodeOnline(self, target_node)
7742 _CheckNodeNotDrained(self, target_node)
7743 _CheckNodeVmCapable(self, target_node)
7744 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
7745 self.cfg.GetNodeGroup(node.group))
7746 _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
7747 ignore=self.op.ignore_ipolicy)
7749 if instance.admin_state == constants.ADMINST_UP:
7750 # check memory requirements on the secondary node
7751 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7752 instance.name, bep[constants.BE_MAXMEM],
7753 instance.hypervisor)
7755 self.LogInfo("Not checking memory on the secondary node as"
7756 " instance will not be started")
7758 # check bridge existance
7759 _CheckInstanceBridgesExist(self, instance, node=target_node)
7761 def Exec(self, feedback_fn):
7762 """Move an instance.
7764 The move is done by shutting it down on its present node, copying
7765 the data over (slow) and starting it on the new node.
7768 instance = self.instance
7770 source_node = instance.primary_node
7771 target_node = self.target_node
7773 self.LogInfo("Shutting down instance %s on source node %s",
7774 instance.name, source_node)
7776 assert (self.owned_locks(locking.LEVEL_NODE) ==
7777 self.owned_locks(locking.LEVEL_NODE_RES))
7779 result = self.rpc.call_instance_shutdown(source_node, instance,
7780 self.op.shutdown_timeout)
7781 msg = result.fail_msg
7783 if self.op.ignore_consistency:
7784 self.proc.LogWarning("Could not shutdown instance %s on node %s."
7785 " Proceeding anyway. Please make sure node"
7786 " %s is down. Error details: %s",
7787 instance.name, source_node, source_node, msg)
7789 raise errors.OpExecError("Could not shutdown instance %s on"
7791 (instance.name, source_node, msg))
7793 # create the target disks
7795 _CreateDisks(self, instance, target_node=target_node)
7796 except errors.OpExecError:
7797 self.LogWarning("Device creation failed, reverting...")
7799 _RemoveDisks(self, instance, target_node=target_node)
7801 self.cfg.ReleaseDRBDMinors(instance.name)
7804 cluster_name = self.cfg.GetClusterInfo().cluster_name
7807 # activate, get path, copy the data over
7808 for idx, disk in enumerate(instance.disks):
7809 self.LogInfo("Copying data for disk %d", idx)
7810 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
7811 instance.name, True, idx)
7813 self.LogWarning("Can't assemble newly created disk %d: %s",
7814 idx, result.fail_msg)
7815 errs.append(result.fail_msg)
7817 dev_path = result.payload
7818 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
7819 target_node, dev_path,
7822 self.LogWarning("Can't copy data over for disk %d: %s",
7823 idx, result.fail_msg)
7824 errs.append(result.fail_msg)
7828 self.LogWarning("Some disks failed to copy, aborting")
7830 _RemoveDisks(self, instance, target_node=target_node)
7832 self.cfg.ReleaseDRBDMinors(instance.name)
7833 raise errors.OpExecError("Errors during disk copy: %s" %
7836 instance.primary_node = target_node
7837 self.cfg.Update(instance, feedback_fn)
7839 self.LogInfo("Removing the disks on the original node")
7840 _RemoveDisks(self, instance, target_node=source_node)
7842 # Only start the instance if it's marked as up
7843 if instance.admin_state == constants.ADMINST_UP:
7844 self.LogInfo("Starting instance %s on node %s",
7845 instance.name, target_node)
7847 disks_ok, _ = _AssembleInstanceDisks(self, instance,
7848 ignore_secondaries=True)
7850 _ShutdownInstanceDisks(self, instance)
7851 raise errors.OpExecError("Can't activate the instance's disks")
7853 result = self.rpc.call_instance_start(target_node,
7854 (instance, None, None), False)
7855 msg = result.fail_msg
7857 _ShutdownInstanceDisks(self, instance)
7858 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7859 (instance.name, target_node, msg))
7862 class LUNodeMigrate(LogicalUnit):
7863 """Migrate all instances from a node.
7866 HPATH = "node-migrate"
7867 HTYPE = constants.HTYPE_NODE
7870 def CheckArguments(self):
7873 def ExpandNames(self):
7874 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7876 self.share_locks = _ShareAll()
7877 self.needed_locks = {
7878 locking.LEVEL_NODE: [self.op.node_name],
7881 def BuildHooksEnv(self):
7884 This runs on the master, the primary and all the secondaries.
7888 "NODE_NAME": self.op.node_name,
7889 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7892 def BuildHooksNodes(self):
7893 """Build hooks nodes.
7896 nl = [self.cfg.GetMasterNode()]
7899 def CheckPrereq(self):
7902 def Exec(self, feedback_fn):
7903 # Prepare jobs for migration instances
7904 allow_runtime_changes = self.op.allow_runtime_changes
7906 [opcodes.OpInstanceMigrate(instance_name=inst.name,
7909 iallocator=self.op.iallocator,
7910 target_node=self.op.target_node,
7911 allow_runtime_changes=allow_runtime_changes,
7912 ignore_ipolicy=self.op.ignore_ipolicy)]
7913 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
7916 # TODO: Run iallocator in this opcode and pass correct placement options to
7917 # OpInstanceMigrate. Since other jobs can modify the cluster between
7918 # running the iallocator and the actual migration, a good consistency model
7919 # will have to be found.
7921 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
7922 frozenset([self.op.node_name]))
7924 return ResultWithJobs(jobs)
7927 class TLMigrateInstance(Tasklet):
7928 """Tasklet class for instance migration.
7931 @ivar live: whether the migration will be done live or non-live;
7932 this variable is initalized only after CheckPrereq has run
7933 @type cleanup: boolean
7934 @ivar cleanup: Wheater we cleanup from a failed migration
7935 @type iallocator: string
7936 @ivar iallocator: The iallocator used to determine target_node
7937 @type target_node: string
7938 @ivar target_node: If given, the target_node to reallocate the instance to
7939 @type failover: boolean
7940 @ivar failover: Whether operation results in failover or migration
7941 @type fallback: boolean
7942 @ivar fallback: Whether fallback to failover is allowed if migration not
7944 @type ignore_consistency: boolean
7945 @ivar ignore_consistency: Wheter we should ignore consistency between source
7947 @type shutdown_timeout: int
7948 @ivar shutdown_timeout: In case of failover timeout of the shutdown
7949 @type ignore_ipolicy: bool
7950 @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
7955 _MIGRATION_POLL_INTERVAL = 1 # seconds
7956 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7958 def __init__(self, lu, instance_name, cleanup=False,
7959 failover=False, fallback=False,
7960 ignore_consistency=False,
7961 allow_runtime_changes=True,
7962 shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
7963 ignore_ipolicy=False):
7964 """Initializes this class.
7967 Tasklet.__init__(self, lu)
7970 self.instance_name = instance_name
7971 self.cleanup = cleanup
7972 self.live = False # will be overridden later
7973 self.failover = failover
7974 self.fallback = fallback
7975 self.ignore_consistency = ignore_consistency
7976 self.shutdown_timeout = shutdown_timeout
7977 self.ignore_ipolicy = ignore_ipolicy
7978 self.allow_runtime_changes = allow_runtime_changes
7980 def CheckPrereq(self):
7981 """Check prerequisites.
7983 This checks that the instance is in the cluster.
7986 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7987 instance = self.cfg.GetInstanceInfo(instance_name)
7988 assert instance is not None
7989 self.instance = instance
7990 cluster = self.cfg.GetClusterInfo()
7992 if (not self.cleanup and
7993 not instance.admin_state == constants.ADMINST_UP and
7994 not self.failover and self.fallback):
7995 self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
7996 " switching to failover")
7997 self.failover = True
7999 if instance.disk_template not in constants.DTS_MIRRORED:
8004 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
8005 " %s" % (instance.disk_template, text),
8008 if instance.disk_template in constants.DTS_EXT_MIRROR:
8009 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
8011 if self.lu.op.iallocator:
8012 self._RunAllocator()
8014 # We set set self.target_node as it is required by
8016 self.target_node = self.lu.op.target_node
8018 # Check that the target node is correct in terms of instance policy
8019 nodeinfo = self.cfg.GetNodeInfo(self.target_node)
8020 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8021 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8022 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8023 ignore=self.ignore_ipolicy)
8025 # self.target_node is already populated, either directly or by the
8027 target_node = self.target_node
8028 if self.target_node == instance.primary_node:
8029 raise errors.OpPrereqError("Cannot migrate instance %s"
8030 " to its primary (%s)" %
8031 (instance.name, instance.primary_node))
8033 if len(self.lu.tasklets) == 1:
8034 # It is safe to release locks only when we're the only tasklet
8036 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
8037 keep=[instance.primary_node, self.target_node])
8040 secondary_nodes = instance.secondary_nodes
8041 if not secondary_nodes:
8042 raise errors.ConfigurationError("No secondary node but using"
8043 " %s disk template" %
8044 instance.disk_template)
8045 target_node = secondary_nodes[0]
8046 if self.lu.op.iallocator or (self.lu.op.target_node and
8047 self.lu.op.target_node != target_node):
8049 text = "failed over"
8052 raise errors.OpPrereqError("Instances with disk template %s cannot"
8053 " be %s to arbitrary nodes"
8054 " (neither an iallocator nor a target"
8055 " node can be passed)" %
8056 (instance.disk_template, text),
8058 nodeinfo = self.cfg.GetNodeInfo(target_node)
8059 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8060 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8061 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8062 ignore=self.ignore_ipolicy)
8064 i_be = cluster.FillBE(instance)
8066 # check memory requirements on the secondary node
8067 if (not self.cleanup and
8068 (not self.failover or instance.admin_state == constants.ADMINST_UP)):
8069 self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
8070 "migrating instance %s" %
8072 i_be[constants.BE_MINMEM],
8073 instance.hypervisor)
8075 self.lu.LogInfo("Not checking memory on the secondary node as"
8076 " instance will not be started")
8078 # check if failover must be forced instead of migration
8079 if (not self.cleanup and not self.failover and
8080 i_be[constants.BE_ALWAYS_FAILOVER]):
8082 self.lu.LogInfo("Instance configured to always failover; fallback"
8084 self.failover = True
8086 raise errors.OpPrereqError("This instance has been configured to"
8087 " always failover, please allow failover",
8090 # check bridge existance
8091 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
8093 if not self.cleanup:
8094 _CheckNodeNotDrained(self.lu, target_node)
8095 if not self.failover:
8096 result = self.rpc.call_instance_migratable(instance.primary_node,
8098 if result.fail_msg and self.fallback:
8099 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
8101 self.failover = True
8103 result.Raise("Can't migrate, please use failover",
8104 prereq=True, ecode=errors.ECODE_STATE)
8106 assert not (self.failover and self.cleanup)
8108 if not self.failover:
8109 if self.lu.op.live is not None and self.lu.op.mode is not None:
8110 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
8111 " parameters are accepted",
8113 if self.lu.op.live is not None:
8115 self.lu.op.mode = constants.HT_MIGRATION_LIVE
8117 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
8118 # reset the 'live' parameter to None so that repeated
8119 # invocations of CheckPrereq do not raise an exception
8120 self.lu.op.live = None
8121 elif self.lu.op.mode is None:
8122 # read the default value from the hypervisor
8123 i_hv = cluster.FillHV(self.instance, skip_globals=False)
8124 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
8126 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
8128 # Failover is never live
8131 if not (self.failover or self.cleanup):
8132 remote_info = self.rpc.call_instance_info(instance.primary_node,
8134 instance.hypervisor)
8135 remote_info.Raise("Error checking instance on node %s" %
8136 instance.primary_node)
8137 instance_running = bool(remote_info.payload)
8138 if instance_running:
8139 self.current_mem = int(remote_info.payload["memory"])
8141 def _RunAllocator(self):
8142 """Run the allocator based on input opcode.
8145 # FIXME: add a self.ignore_ipolicy option
8146 ial = IAllocator(self.cfg, self.rpc,
8147 mode=constants.IALLOCATOR_MODE_RELOC,
8148 name=self.instance_name,
8149 relocate_from=[self.instance.primary_node],
8152 ial.Run(self.lu.op.iallocator)
8155 raise errors.OpPrereqError("Can't compute nodes using"
8156 " iallocator '%s': %s" %
8157 (self.lu.op.iallocator, ial.info),
8159 if len(ial.result) != ial.required_nodes:
8160 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8161 " of nodes (%s), required %s" %
8162 (self.lu.op.iallocator, len(ial.result),
8163 ial.required_nodes), errors.ECODE_FAULT)
8164 self.target_node = ial.result[0]
8165 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8166 self.instance_name, self.lu.op.iallocator,
8167 utils.CommaJoin(ial.result))
8169 def _WaitUntilSync(self):
8170 """Poll with custom rpc for disk sync.
8172 This uses our own step-based rpc call.
8175 self.feedback_fn("* wait until resync is done")
8179 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8181 (self.instance.disks,
8184 for node, nres in result.items():
8185 nres.Raise("Cannot resync disks on node %s" % node)
8186 node_done, node_percent = nres.payload
8187 all_done = all_done and node_done
8188 if node_percent is not None:
8189 min_percent = min(min_percent, node_percent)
8191 if min_percent < 100:
8192 self.feedback_fn(" - progress: %.1f%%" % min_percent)
8195 def _EnsureSecondary(self, node):
8196 """Demote a node to secondary.
8199 self.feedback_fn("* switching node %s to secondary mode" % node)
8201 for dev in self.instance.disks:
8202 self.cfg.SetDiskID(dev, node)
8204 result = self.rpc.call_blockdev_close(node, self.instance.name,
8205 self.instance.disks)
8206 result.Raise("Cannot change disk to secondary on node %s" % node)
8208 def _GoStandalone(self):
8209 """Disconnect from the network.
8212 self.feedback_fn("* changing into standalone mode")
8213 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
8214 self.instance.disks)
8215 for node, nres in result.items():
8216 nres.Raise("Cannot disconnect disks node %s" % node)
8218 def _GoReconnect(self, multimaster):
8219 """Reconnect to the network.
8225 msg = "single-master"
8226 self.feedback_fn("* changing disks into %s mode" % msg)
8227 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8228 (self.instance.disks, self.instance),
8229 self.instance.name, multimaster)
8230 for node, nres in result.items():
8231 nres.Raise("Cannot change disks config on node %s" % node)
8233 def _ExecCleanup(self):
8234 """Try to cleanup after a failed migration.
8236 The cleanup is done by:
8237 - check that the instance is running only on one node
8238 (and update the config if needed)
8239 - change disks on its secondary node to secondary
8240 - wait until disks are fully synchronized
8241 - disconnect from the network
8242 - change disks into single-master mode
8243 - wait again until disks are fully synchronized
8246 instance = self.instance
8247 target_node = self.target_node
8248 source_node = self.source_node
8250 # check running on only one node
8251 self.feedback_fn("* checking where the instance actually runs"
8252 " (if this hangs, the hypervisor might be in"
8254 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
8255 for node, result in ins_l.items():
8256 result.Raise("Can't contact node %s" % node)
8258 runningon_source = instance.name in ins_l[source_node].payload
8259 runningon_target = instance.name in ins_l[target_node].payload
8261 if runningon_source and runningon_target:
8262 raise errors.OpExecError("Instance seems to be running on two nodes,"
8263 " or the hypervisor is confused; you will have"
8264 " to ensure manually that it runs only on one"
8265 " and restart this operation")
8267 if not (runningon_source or runningon_target):
8268 raise errors.OpExecError("Instance does not seem to be running at all;"
8269 " in this case it's safer to repair by"
8270 " running 'gnt-instance stop' to ensure disk"
8271 " shutdown, and then restarting it")
8273 if runningon_target:
8274 # the migration has actually succeeded, we need to update the config
8275 self.feedback_fn("* instance running on secondary node (%s),"
8276 " updating config" % target_node)
8277 instance.primary_node = target_node
8278 self.cfg.Update(instance, self.feedback_fn)
8279 demoted_node = source_node
8281 self.feedback_fn("* instance confirmed to be running on its"
8282 " primary node (%s)" % source_node)
8283 demoted_node = target_node
8285 if instance.disk_template in constants.DTS_INT_MIRROR:
8286 self._EnsureSecondary(demoted_node)
8288 self._WaitUntilSync()
8289 except errors.OpExecError:
8290 # we ignore here errors, since if the device is standalone, it
8291 # won't be able to sync
8293 self._GoStandalone()
8294 self._GoReconnect(False)
8295 self._WaitUntilSync()
8297 self.feedback_fn("* done")
8299 def _RevertDiskStatus(self):
8300 """Try to revert the disk status after a failed migration.
8303 target_node = self.target_node
8304 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
8308 self._EnsureSecondary(target_node)
8309 self._GoStandalone()
8310 self._GoReconnect(False)
8311 self._WaitUntilSync()
8312 except errors.OpExecError, err:
8313 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
8314 " please try to recover the instance manually;"
8315 " error '%s'" % str(err))
8317 def _AbortMigration(self):
8318 """Call the hypervisor code to abort a started migration.
8321 instance = self.instance
8322 target_node = self.target_node
8323 source_node = self.source_node
8324 migration_info = self.migration_info
8326 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
8330 abort_msg = abort_result.fail_msg
8332 logging.error("Aborting migration failed on target node %s: %s",
8333 target_node, abort_msg)
8334 # Don't raise an exception here, as we stil have to try to revert the
8335 # disk status, even if this step failed.
8337 abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
8338 instance, False, self.live)
8339 abort_msg = abort_result.fail_msg
8341 logging.error("Aborting migration failed on source node %s: %s",
8342 source_node, abort_msg)
8344 def _ExecMigration(self):
8345 """Migrate an instance.
8347 The migrate is done by:
8348 - change the disks into dual-master mode
8349 - wait until disks are fully synchronized again
8350 - migrate the instance
8351 - change disks on the new secondary node (the old primary) to secondary
8352 - wait until disks are fully synchronized
8353 - change disks into single-master mode
8356 instance = self.instance
8357 target_node = self.target_node
8358 source_node = self.source_node
8360 # Check for hypervisor version mismatch and warn the user.
8361 nodeinfo = self.rpc.call_node_info([source_node, target_node],
8362 None, [self.instance.hypervisor])
8363 for ninfo in nodeinfo.values():
8364 ninfo.Raise("Unable to retrieve node information from node '%s'" %
8366 (_, _, (src_info, )) = nodeinfo[source_node].payload
8367 (_, _, (dst_info, )) = nodeinfo[target_node].payload
8369 if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
8370 (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
8371 src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
8372 dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
8373 if src_version != dst_version:
8374 self.feedback_fn("* warning: hypervisor version mismatch between"
8375 " source (%s) and target (%s) node" %
8376 (src_version, dst_version))
8378 self.feedback_fn("* checking disk consistency between source and target")
8379 for (idx, dev) in enumerate(instance.disks):
8380 if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8381 raise errors.OpExecError("Disk %s is degraded or not fully"
8382 " synchronized on target node,"
8383 " aborting migration" % idx)
8385 if self.current_mem > self.tgt_free_mem:
8386 if not self.allow_runtime_changes:
8387 raise errors.OpExecError("Memory ballooning not allowed and not enough"
8388 " free memory to fit instance %s on target"
8389 " node %s (have %dMB, need %dMB)" %
8390 (instance.name, target_node,
8391 self.tgt_free_mem, self.current_mem))
8392 self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
8393 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
8396 rpcres.Raise("Cannot modify instance runtime memory")
8398 # First get the migration information from the remote node
8399 result = self.rpc.call_migration_info(source_node, instance)
8400 msg = result.fail_msg
8402 log_err = ("Failed fetching source migration information from %s: %s" %
8404 logging.error(log_err)
8405 raise errors.OpExecError(log_err)
8407 self.migration_info = migration_info = result.payload
8409 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8410 # Then switch the disks to master/master mode
8411 self._EnsureSecondary(target_node)
8412 self._GoStandalone()
8413 self._GoReconnect(True)
8414 self._WaitUntilSync()
8416 self.feedback_fn("* preparing %s to accept the instance" % target_node)
8417 result = self.rpc.call_accept_instance(target_node,
8420 self.nodes_ip[target_node])
8422 msg = result.fail_msg
8424 logging.error("Instance pre-migration failed, trying to revert"
8425 " disk status: %s", msg)
8426 self.feedback_fn("Pre-migration failed, aborting")
8427 self._AbortMigration()
8428 self._RevertDiskStatus()
8429 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
8430 (instance.name, msg))
8432 self.feedback_fn("* migrating instance to %s" % target_node)
8433 result = self.rpc.call_instance_migrate(source_node, instance,
8434 self.nodes_ip[target_node],
8436 msg = result.fail_msg
8438 logging.error("Instance migration failed, trying to revert"
8439 " disk status: %s", msg)
8440 self.feedback_fn("Migration failed, aborting")
8441 self._AbortMigration()
8442 self._RevertDiskStatus()
8443 raise errors.OpExecError("Could not migrate instance %s: %s" %
8444 (instance.name, msg))
8446 self.feedback_fn("* starting memory transfer")
8447 last_feedback = time.time()
8449 result = self.rpc.call_instance_get_migration_status(source_node,
8451 msg = result.fail_msg
8452 ms = result.payload # MigrationStatus instance
8453 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
8454 logging.error("Instance migration failed, trying to revert"
8455 " disk status: %s", msg)
8456 self.feedback_fn("Migration failed, aborting")
8457 self._AbortMigration()
8458 self._RevertDiskStatus()
8459 raise errors.OpExecError("Could not migrate instance %s: %s" %
8460 (instance.name, msg))
8462 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
8463 self.feedback_fn("* memory transfer complete")
8466 if (utils.TimeoutExpired(last_feedback,
8467 self._MIGRATION_FEEDBACK_INTERVAL) and
8468 ms.transferred_ram is not None):
8469 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
8470 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
8471 last_feedback = time.time()
8473 time.sleep(self._MIGRATION_POLL_INTERVAL)
8475 result = self.rpc.call_instance_finalize_migration_src(source_node,
8479 msg = result.fail_msg
8481 logging.error("Instance migration succeeded, but finalization failed"
8482 " on the source node: %s", msg)
8483 raise errors.OpExecError("Could not finalize instance migration: %s" %
8486 instance.primary_node = target_node
8488 # distribute new instance config to the other nodes
8489 self.cfg.Update(instance, self.feedback_fn)
8491 result = self.rpc.call_instance_finalize_migration_dst(target_node,
8495 msg = result.fail_msg
8497 logging.error("Instance migration succeeded, but finalization failed"
8498 " on the target node: %s", msg)
8499 raise errors.OpExecError("Could not finalize instance migration: %s" %
8502 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8503 self._EnsureSecondary(source_node)
8504 self._WaitUntilSync()
8505 self._GoStandalone()
8506 self._GoReconnect(False)
8507 self._WaitUntilSync()
8509 # If the instance's disk template is `rbd' and there was a successful
8510 # migration, unmap the device from the source node.
8511 if self.instance.disk_template == constants.DT_RBD:
8512 disks = _ExpandCheckDisks(instance, instance.disks)
8513 self.feedback_fn("* unmapping instance's disks from %s" % source_node)
8515 result = self.rpc.call_blockdev_shutdown(source_node, disk)
8516 msg = result.fail_msg
8518 logging.error("Migration was successful, but couldn't unmap the"
8519 " block device %s on source node %s: %s",
8520 disk.iv_name, source_node, msg)
8521 logging.error("You need to unmap the device %s manually on %s",
8522 disk.iv_name, source_node)
8524 self.feedback_fn("* done")
8526 def _ExecFailover(self):
8527 """Failover an instance.
8529 The failover is done by shutting it down on its present node and
8530 starting it on the secondary.
8533 instance = self.instance
8534 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
8536 source_node = instance.primary_node
8537 target_node = self.target_node
8539 if instance.admin_state == constants.ADMINST_UP:
8540 self.feedback_fn("* checking disk consistency between source and target")
8541 for (idx, dev) in enumerate(instance.disks):
8542 # for drbd, these are drbd over lvm
8543 if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8545 if primary_node.offline:
8546 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8548 (primary_node.name, idx, target_node))
8549 elif not self.ignore_consistency:
8550 raise errors.OpExecError("Disk %s is degraded on target node,"
8551 " aborting failover" % idx)
8553 self.feedback_fn("* not checking disk consistency as instance is not"
8556 self.feedback_fn("* shutting down instance on source node")
8557 logging.info("Shutting down instance %s on node %s",
8558 instance.name, source_node)
8560 result = self.rpc.call_instance_shutdown(source_node, instance,
8561 self.shutdown_timeout)
8562 msg = result.fail_msg
8564 if self.ignore_consistency or primary_node.offline:
8565 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
8566 " proceeding anyway; please make sure node"
8567 " %s is down; error details: %s",
8568 instance.name, source_node, source_node, msg)
8570 raise errors.OpExecError("Could not shutdown instance %s on"
8572 (instance.name, source_node, msg))
8574 self.feedback_fn("* deactivating the instance's disks on source node")
8575 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
8576 raise errors.OpExecError("Can't shut down the instance's disks")
8578 instance.primary_node = target_node
8579 # distribute new instance config to the other nodes
8580 self.cfg.Update(instance, self.feedback_fn)
8582 # Only start the instance if it's marked as up
8583 if instance.admin_state == constants.ADMINST_UP:
8584 self.feedback_fn("* activating the instance's disks on target node %s" %
8586 logging.info("Starting instance %s on node %s",
8587 instance.name, target_node)
8589 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
8590 ignore_secondaries=True)
8592 _ShutdownInstanceDisks(self.lu, instance)
8593 raise errors.OpExecError("Can't activate the instance's disks")
8595 self.feedback_fn("* starting the instance on the target node %s" %
8597 result = self.rpc.call_instance_start(target_node, (instance, None, None),
8599 msg = result.fail_msg
8601 _ShutdownInstanceDisks(self.lu, instance)
8602 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8603 (instance.name, target_node, msg))
8605 def Exec(self, feedback_fn):
8606 """Perform the migration.
8609 self.feedback_fn = feedback_fn
8610 self.source_node = self.instance.primary_node
8612 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
8613 if self.instance.disk_template in constants.DTS_INT_MIRROR:
8614 self.target_node = self.instance.secondary_nodes[0]
8615 # Otherwise self.target_node has been populated either
8616 # directly, or through an iallocator.
8618 self.all_nodes = [self.source_node, self.target_node]
8619 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
8620 in self.cfg.GetMultiNodeInfo(self.all_nodes))
8623 feedback_fn("Failover instance %s" % self.instance.name)
8624 self._ExecFailover()
8626 feedback_fn("Migrating instance %s" % self.instance.name)
8629 return self._ExecCleanup()
8631 return self._ExecMigration()
8634 def _CreateBlockDev(lu, node, instance, device, force_create, info,
8636 """Wrapper around L{_CreateBlockDevInner}.
8638 This method annotates the root device first.
8641 (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
8642 return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
8646 def _CreateBlockDevInner(lu, node, instance, device, force_create,
8648 """Create a tree of block devices on a given node.
8650 If this device type has to be created on secondaries, create it and
8653 If not, just recurse to children keeping the same 'force' value.
8655 @attention: The device has to be annotated already.
8657 @param lu: the lu on whose behalf we execute
8658 @param node: the node on which to create the device
8659 @type instance: L{objects.Instance}
8660 @param instance: the instance which owns the device
8661 @type device: L{objects.Disk}
8662 @param device: the device to create
8663 @type force_create: boolean
8664 @param force_create: whether to force creation of this device; this
8665 will be change to True whenever we find a device which has
8666 CreateOnSecondary() attribute
8667 @param info: the extra 'metadata' we should attach to the device
8668 (this will be represented as a LVM tag)
8669 @type force_open: boolean
8670 @param force_open: this parameter will be passes to the
8671 L{backend.BlockdevCreate} function where it specifies
8672 whether we run on primary or not, and it affects both
8673 the child assembly and the device own Open() execution
8676 if device.CreateOnSecondary():
8680 for child in device.children:
8681 _CreateBlockDevInner(lu, node, instance, child, force_create,
8684 if not force_create:
8687 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
8690 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
8691 """Create a single block device on a given node.
8693 This will not recurse over children of the device, so they must be
8696 @param lu: the lu on whose behalf we execute
8697 @param node: the node on which to create the device
8698 @type instance: L{objects.Instance}
8699 @param instance: the instance which owns the device
8700 @type device: L{objects.Disk}
8701 @param device: the device to create
8702 @param info: the extra 'metadata' we should attach to the device
8703 (this will be represented as a LVM tag)
8704 @type force_open: boolean
8705 @param force_open: this parameter will be passes to the
8706 L{backend.BlockdevCreate} function where it specifies
8707 whether we run on primary or not, and it affects both
8708 the child assembly and the device own Open() execution
8711 lu.cfg.SetDiskID(device, node)
8712 result = lu.rpc.call_blockdev_create(node, device, device.size,
8713 instance.name, force_open, info)
8714 result.Raise("Can't create block device %s on"
8715 " node %s for instance %s" % (device, node, instance.name))
8716 if device.physical_id is None:
8717 device.physical_id = result.payload
8720 def _GenerateUniqueNames(lu, exts):
8721 """Generate a suitable LV name.
8723 This will generate a logical volume name for the given instance.
8728 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
8729 results.append("%s%s" % (new_id, val))
8733 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
8734 iv_name, p_minor, s_minor):
8735 """Generate a drbd8 device complete with its children.
8738 assert len(vgnames) == len(names) == 2
8739 port = lu.cfg.AllocatePort()
8740 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
8742 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
8743 logical_id=(vgnames[0], names[0]),
8745 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
8746 logical_id=(vgnames[1], names[1]),
8748 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
8749 logical_id=(primary, secondary, port,
8752 children=[dev_data, dev_meta],
8753 iv_name=iv_name, params={})
8757 _DISK_TEMPLATE_NAME_PREFIX = {
8758 constants.DT_PLAIN: "",
8759 constants.DT_RBD: ".rbd",
8763 _DISK_TEMPLATE_DEVICE_TYPE = {
8764 constants.DT_PLAIN: constants.LD_LV,
8765 constants.DT_FILE: constants.LD_FILE,
8766 constants.DT_SHARED_FILE: constants.LD_FILE,
8767 constants.DT_BLOCK: constants.LD_BLOCKDEV,
8768 constants.DT_RBD: constants.LD_RBD,
8772 def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
8773 secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
8774 feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8775 _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8776 """Generate the entire disk layout for a given template type.
8779 #TODO: compute space requirements
8781 vgname = lu.cfg.GetVGName()
8782 disk_count = len(disk_info)
8785 if template_name == constants.DT_DISKLESS:
8787 elif template_name == constants.DT_DRBD8:
8788 if len(secondary_nodes) != 1:
8789 raise errors.ProgrammerError("Wrong template configuration")
8790 remote_node = secondary_nodes[0]
8791 minors = lu.cfg.AllocateDRBDMinor(
8792 [primary_node, remote_node] * len(disk_info), instance_name)
8794 (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
8796 drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
8799 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
8800 for i in range(disk_count)]):
8801 names.append(lv_prefix + "_data")
8802 names.append(lv_prefix + "_meta")
8803 for idx, disk in enumerate(disk_info):
8804 disk_index = idx + base_index
8805 data_vg = disk.get(constants.IDISK_VG, vgname)
8806 meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
8807 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
8808 disk[constants.IDISK_SIZE],
8810 names[idx * 2:idx * 2 + 2],
8811 "disk/%d" % disk_index,
8812 minors[idx * 2], minors[idx * 2 + 1])
8813 disk_dev.mode = disk[constants.IDISK_MODE]
8814 disks.append(disk_dev)
8817 raise errors.ProgrammerError("Wrong template configuration")
8819 if template_name == constants.DT_FILE:
8821 elif template_name == constants.DT_SHARED_FILE:
8822 _req_shr_file_storage()
8824 name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
8825 if name_prefix is None:
8828 names = _GenerateUniqueNames(lu, ["%s.disk%s" %
8829 (name_prefix, base_index + i)
8830 for i in range(disk_count)])
8832 if template_name == constants.DT_PLAIN:
8833 def logical_id_fn(idx, _, disk):
8834 vg = disk.get(constants.IDISK_VG, vgname)
8835 return (vg, names[idx])
8836 elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
8838 lambda _, disk_index, disk: (file_driver,
8839 "%s/disk%d" % (file_storage_dir,
8841 elif template_name == constants.DT_BLOCK:
8843 lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
8844 disk[constants.IDISK_ADOPT])
8845 elif template_name == constants.DT_RBD:
8846 logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
8848 raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
8850 dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
8852 for idx, disk in enumerate(disk_info):
8853 disk_index = idx + base_index
8854 size = disk[constants.IDISK_SIZE]
8855 feedback_fn("* disk %s, size %s" %
8856 (disk_index, utils.FormatUnit(size, "h")))
8857 disks.append(objects.Disk(dev_type=dev_type, size=size,
8858 logical_id=logical_id_fn(idx, disk_index, disk),
8859 iv_name="disk/%d" % disk_index,
8860 mode=disk[constants.IDISK_MODE],
8866 def _GetInstanceInfoText(instance):
8867 """Compute that text that should be added to the disk's metadata.
8870 return "originstname+%s" % instance.name
8873 def _CalcEta(time_taken, written, total_size):
8874 """Calculates the ETA based on size written and total size.
8876 @param time_taken: The time taken so far
8877 @param written: amount written so far
8878 @param total_size: The total size of data to be written
8879 @return: The remaining time in seconds
8882 avg_time = time_taken / float(written)
8883 return (total_size - written) * avg_time
8886 def _WipeDisks(lu, instance):
8887 """Wipes instance disks.
8889 @type lu: L{LogicalUnit}
8890 @param lu: the logical unit on whose behalf we execute
8891 @type instance: L{objects.Instance}
8892 @param instance: the instance whose disks we should create
8893 @return: the success of the wipe
8896 node = instance.primary_node
8898 for device in instance.disks:
8899 lu.cfg.SetDiskID(device, node)
8901 logging.info("Pause sync of instance %s disks", instance.name)
8902 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8903 (instance.disks, instance),
8906 for idx, success in enumerate(result.payload):
8908 logging.warn("pause-sync of instance %s for disks %d failed",
8912 for idx, device in enumerate(instance.disks):
8913 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
8914 # MAX_WIPE_CHUNK at max
8915 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
8916 constants.MIN_WIPE_CHUNK_PERCENT)
8917 # we _must_ make this an int, otherwise rounding errors will
8919 wipe_chunk_size = int(wipe_chunk_size)
8921 lu.LogInfo("* Wiping disk %d", idx)
8922 logging.info("Wiping disk %d for instance %s, node %s using"
8923 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
8928 start_time = time.time()
8930 while offset < size:
8931 wipe_size = min(wipe_chunk_size, size - offset)
8932 logging.debug("Wiping disk %d, offset %s, chunk %s",
8933 idx, offset, wipe_size)
8934 result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
8936 result.Raise("Could not wipe disk %d at offset %d for size %d" %
8937 (idx, offset, wipe_size))
8940 if now - last_output >= 60:
8941 eta = _CalcEta(now - start_time, offset, size)
8942 lu.LogInfo(" - done: %.1f%% ETA: %s" %
8943 (offset / float(size) * 100, utils.FormatSeconds(eta)))
8946 logging.info("Resume sync of instance %s disks", instance.name)
8948 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8949 (instance.disks, instance),
8952 for idx, success in enumerate(result.payload):
8954 lu.LogWarning("Resume sync of disk %d failed, please have a"
8955 " look at the status and troubleshoot the issue", idx)
8956 logging.warn("resume-sync of instance %s for disks %d failed",
8960 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
8961 """Create all disks for an instance.
8963 This abstracts away some work from AddInstance.
8965 @type lu: L{LogicalUnit}
8966 @param lu: the logical unit on whose behalf we execute
8967 @type instance: L{objects.Instance}
8968 @param instance: the instance whose disks we should create
8970 @param to_skip: list of indices to skip
8971 @type target_node: string
8972 @param target_node: if passed, overrides the target node for creation
8974 @return: the success of the creation
8977 info = _GetInstanceInfoText(instance)
8978 if target_node is None:
8979 pnode = instance.primary_node
8980 all_nodes = instance.all_nodes
8985 if instance.disk_template in constants.DTS_FILEBASED:
8986 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8987 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
8989 result.Raise("Failed to create directory '%s' on"
8990 " node %s" % (file_storage_dir, pnode))
8992 # Note: this needs to be kept in sync with adding of disks in
8993 # LUInstanceSetParams
8994 for idx, device in enumerate(instance.disks):
8995 if to_skip and idx in to_skip:
8997 logging.info("Creating disk %s for instance '%s'", idx, instance.name)
8999 for node in all_nodes:
9000 f_create = node == pnode
9001 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
9004 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
9005 """Remove all disks for an instance.
9007 This abstracts away some work from `AddInstance()` and
9008 `RemoveInstance()`. Note that in case some of the devices couldn't
9009 be removed, the removal will continue with the other ones (compare
9010 with `_CreateDisks()`).
9012 @type lu: L{LogicalUnit}
9013 @param lu: the logical unit on whose behalf we execute
9014 @type instance: L{objects.Instance}
9015 @param instance: the instance whose disks we should remove
9016 @type target_node: string
9017 @param target_node: used to override the node on which to remove the disks
9019 @return: the success of the removal
9022 logging.info("Removing block devices for instance %s", instance.name)
9025 ports_to_release = set()
9026 for (idx, device) in enumerate(instance.disks):
9028 edata = [(target_node, device)]
9030 edata = device.ComputeNodeTree(instance.primary_node)
9031 for node, disk in edata:
9032 lu.cfg.SetDiskID(disk, node)
9033 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
9035 lu.LogWarning("Could not remove disk %s on node %s,"
9036 " continuing anyway: %s", idx, node, msg)
9039 # if this is a DRBD disk, return its port to the pool
9040 if device.dev_type in constants.LDS_DRBD:
9041 ports_to_release.add(device.logical_id[2])
9043 if all_result or ignore_failures:
9044 for port in ports_to_release:
9045 lu.cfg.AddTcpUdpPort(port)
9047 if instance.disk_template == constants.DT_FILE:
9048 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9052 tgt = instance.primary_node
9053 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
9055 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
9056 file_storage_dir, instance.primary_node, result.fail_msg)
9062 def _ComputeDiskSizePerVG(disk_template, disks):
9063 """Compute disk size requirements in the volume group
9066 def _compute(disks, payload):
9067 """Universal algorithm.
9072 vgs[disk[constants.IDISK_VG]] = \
9073 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
9077 # Required free disk space as a function of disk and swap space
9079 constants.DT_DISKLESS: {},
9080 constants.DT_PLAIN: _compute(disks, 0),
9081 # 128 MB are added for drbd metadata for each disk
9082 constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
9083 constants.DT_FILE: {},
9084 constants.DT_SHARED_FILE: {},
9087 if disk_template not in req_size_dict:
9088 raise errors.ProgrammerError("Disk template '%s' size requirement"
9089 " is unknown" % disk_template)
9091 return req_size_dict[disk_template]
9094 def _ComputeDiskSize(disk_template, disks):
9095 """Compute disk size requirements in the volume group
9098 # Required free disk space as a function of disk and swap space
9100 constants.DT_DISKLESS: None,
9101 constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
9102 # 128 MB are added for drbd metadata for each disk
9104 sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
9105 constants.DT_FILE: None,
9106 constants.DT_SHARED_FILE: 0,
9107 constants.DT_BLOCK: 0,
9108 constants.DT_RBD: 0,
9111 if disk_template not in req_size_dict:
9112 raise errors.ProgrammerError("Disk template '%s' size requirement"
9113 " is unknown" % disk_template)
9115 return req_size_dict[disk_template]
9118 def _FilterVmNodes(lu, nodenames):
9119 """Filters out non-vm_capable nodes from a list.
9121 @type lu: L{LogicalUnit}
9122 @param lu: the logical unit for which we check
9123 @type nodenames: list
9124 @param nodenames: the list of nodes on which we should check
9126 @return: the list of vm-capable nodes
9129 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
9130 return [name for name in nodenames if name not in vm_nodes]
9133 def _CheckHVParams(lu, nodenames, hvname, hvparams):
9134 """Hypervisor parameter validation.
9136 This function abstract the hypervisor parameter validation to be
9137 used in both instance create and instance modify.
9139 @type lu: L{LogicalUnit}
9140 @param lu: the logical unit for which we check
9141 @type nodenames: list
9142 @param nodenames: the list of nodes on which we should check
9143 @type hvname: string
9144 @param hvname: the name of the hypervisor we should use
9145 @type hvparams: dict
9146 @param hvparams: the parameters which we need to check
9147 @raise errors.OpPrereqError: if the parameters are not valid
9150 nodenames = _FilterVmNodes(lu, nodenames)
9152 cluster = lu.cfg.GetClusterInfo()
9153 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
9155 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
9156 for node in nodenames:
9160 info.Raise("Hypervisor parameter validation failed on node %s" % node)
9163 def _CheckOSParams(lu, required, nodenames, osname, osparams):
9164 """OS parameters validation.
9166 @type lu: L{LogicalUnit}
9167 @param lu: the logical unit for which we check
9168 @type required: boolean
9169 @param required: whether the validation should fail if the OS is not
9171 @type nodenames: list
9172 @param nodenames: the list of nodes on which we should check
9173 @type osname: string
9174 @param osname: the name of the hypervisor we should use
9175 @type osparams: dict
9176 @param osparams: the parameters which we need to check
9177 @raise errors.OpPrereqError: if the parameters are not valid
9180 nodenames = _FilterVmNodes(lu, nodenames)
9181 result = lu.rpc.call_os_validate(nodenames, required, osname,
9182 [constants.OS_VALIDATE_PARAMETERS],
9184 for node, nres in result.items():
9185 # we don't check for offline cases since this should be run only
9186 # against the master node and/or an instance's nodes
9187 nres.Raise("OS Parameters validation failed on node %s" % node)
9188 if not nres.payload:
9189 lu.LogInfo("OS %s not found on node %s, validation skipped",
9193 class LUInstanceCreate(LogicalUnit):
9194 """Create an instance.
9197 HPATH = "instance-add"
9198 HTYPE = constants.HTYPE_INSTANCE
9201 def CheckArguments(self):
9205 # do not require name_check to ease forward/backward compatibility
9207 if self.op.no_install and self.op.start:
9208 self.LogInfo("No-installation mode selected, disabling startup")
9209 self.op.start = False
9210 # validate/normalize the instance name
9211 self.op.instance_name = \
9212 netutils.Hostname.GetNormalizedName(self.op.instance_name)
9214 if self.op.ip_check and not self.op.name_check:
9215 # TODO: make the ip check more flexible and not depend on the name check
9216 raise errors.OpPrereqError("Cannot do IP address check without a name"
9217 " check", errors.ECODE_INVAL)
9219 # check nics' parameter names
9220 for nic in self.op.nics:
9221 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
9223 # check disks. parameter names and consistent adopt/no-adopt strategy
9224 has_adopt = has_no_adopt = False
9225 for disk in self.op.disks:
9226 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
9227 if constants.IDISK_ADOPT in disk:
9231 if has_adopt and has_no_adopt:
9232 raise errors.OpPrereqError("Either all disks are adopted or none is",
9235 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
9236 raise errors.OpPrereqError("Disk adoption is not supported for the"
9237 " '%s' disk template" %
9238 self.op.disk_template,
9240 if self.op.iallocator is not None:
9241 raise errors.OpPrereqError("Disk adoption not allowed with an"
9242 " iallocator script", errors.ECODE_INVAL)
9243 if self.op.mode == constants.INSTANCE_IMPORT:
9244 raise errors.OpPrereqError("Disk adoption not allowed for"
9245 " instance import", errors.ECODE_INVAL)
9247 if self.op.disk_template in constants.DTS_MUST_ADOPT:
9248 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
9249 " but no 'adopt' parameter given" %
9250 self.op.disk_template,
9253 self.adopt_disks = has_adopt
9255 # instance name verification
9256 if self.op.name_check:
9257 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
9258 self.op.instance_name = self.hostname1.name
9259 # used in CheckPrereq for ip ping check
9260 self.check_ip = self.hostname1.ip
9262 self.check_ip = None
9264 # file storage checks
9265 if (self.op.file_driver and
9266 not self.op.file_driver in constants.FILE_DRIVER):
9267 raise errors.OpPrereqError("Invalid file driver name '%s'" %
9268 self.op.file_driver, errors.ECODE_INVAL)
9270 if self.op.disk_template == constants.DT_FILE:
9271 opcodes.RequireFileStorage()
9272 elif self.op.disk_template == constants.DT_SHARED_FILE:
9273 opcodes.RequireSharedFileStorage()
9275 ### Node/iallocator related checks
9276 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
9278 if self.op.pnode is not None:
9279 if self.op.disk_template in constants.DTS_INT_MIRROR:
9280 if self.op.snode is None:
9281 raise errors.OpPrereqError("The networked disk templates need"
9282 " a mirror node", errors.ECODE_INVAL)
9284 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
9286 self.op.snode = None
9288 self._cds = _GetClusterDomainSecret()
9290 if self.op.mode == constants.INSTANCE_IMPORT:
9291 # On import force_variant must be True, because if we forced it at
9292 # initial install, our only chance when importing it back is that it
9294 self.op.force_variant = True
9296 if self.op.no_install:
9297 self.LogInfo("No-installation mode has no effect during import")
9299 elif self.op.mode == constants.INSTANCE_CREATE:
9300 if self.op.os_type is None:
9301 raise errors.OpPrereqError("No guest OS specified",
9303 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
9304 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
9305 " installation" % self.op.os_type,
9307 if self.op.disk_template is None:
9308 raise errors.OpPrereqError("No disk template specified",
9311 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9312 # Check handshake to ensure both clusters have the same domain secret
9313 src_handshake = self.op.source_handshake
9314 if not src_handshake:
9315 raise errors.OpPrereqError("Missing source handshake",
9318 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
9321 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
9324 # Load and check source CA
9325 self.source_x509_ca_pem = self.op.source_x509_ca
9326 if not self.source_x509_ca_pem:
9327 raise errors.OpPrereqError("Missing source X509 CA",
9331 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
9333 except OpenSSL.crypto.Error, err:
9334 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
9335 (err, ), errors.ECODE_INVAL)
9337 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9338 if errcode is not None:
9339 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
9342 self.source_x509_ca = cert
9344 src_instance_name = self.op.source_instance_name
9345 if not src_instance_name:
9346 raise errors.OpPrereqError("Missing source instance name",
9349 self.source_instance_name = \
9350 netutils.GetHostname(name=src_instance_name).name
9353 raise errors.OpPrereqError("Invalid instance creation mode %r" %
9354 self.op.mode, errors.ECODE_INVAL)
9356 def ExpandNames(self):
9357 """ExpandNames for CreateInstance.
9359 Figure out the right locks for instance creation.
9362 self.needed_locks = {}
9364 instance_name = self.op.instance_name
9365 # this is just a preventive check, but someone might still add this
9366 # instance in the meantime, and creation will fail at lock-add time
9367 if instance_name in self.cfg.GetInstanceList():
9368 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
9369 instance_name, errors.ECODE_EXISTS)
9371 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
9373 if self.op.iallocator:
9374 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
9375 # specifying a group on instance creation and then selecting nodes from
9377 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9378 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
9380 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
9381 nodelist = [self.op.pnode]
9382 if self.op.snode is not None:
9383 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
9384 nodelist.append(self.op.snode)
9385 self.needed_locks[locking.LEVEL_NODE] = nodelist
9386 # Lock resources of instance's primary and secondary nodes (copy to
9387 # prevent accidential modification)
9388 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
9390 # in case of import lock the source node too
9391 if self.op.mode == constants.INSTANCE_IMPORT:
9392 src_node = self.op.src_node
9393 src_path = self.op.src_path
9395 if src_path is None:
9396 self.op.src_path = src_path = self.op.instance_name
9398 if src_node is None:
9399 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9400 self.op.src_node = None
9401 if os.path.isabs(src_path):
9402 raise errors.OpPrereqError("Importing an instance from a path"
9403 " requires a source node option",
9406 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
9407 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
9408 self.needed_locks[locking.LEVEL_NODE].append(src_node)
9409 if not os.path.isabs(src_path):
9410 self.op.src_path = src_path = \
9411 utils.PathJoin(constants.EXPORT_DIR, src_path)
9413 def _RunAllocator(self):
9414 """Run the allocator based on input opcode.
9417 nics = [n.ToDict() for n in self.nics]
9418 ial = IAllocator(self.cfg, self.rpc,
9419 mode=constants.IALLOCATOR_MODE_ALLOC,
9420 name=self.op.instance_name,
9421 disk_template=self.op.disk_template,
9424 vcpus=self.be_full[constants.BE_VCPUS],
9425 memory=self.be_full[constants.BE_MAXMEM],
9426 spindle_use=self.be_full[constants.BE_SPINDLE_USE],
9429 hypervisor=self.op.hypervisor,
9432 ial.Run(self.op.iallocator)
9435 raise errors.OpPrereqError("Can't compute nodes using"
9436 " iallocator '%s': %s" %
9437 (self.op.iallocator, ial.info),
9439 if len(ial.result) != ial.required_nodes:
9440 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9441 " of nodes (%s), required %s" %
9442 (self.op.iallocator, len(ial.result),
9443 ial.required_nodes), errors.ECODE_FAULT)
9444 self.op.pnode = ial.result[0]
9445 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
9446 self.op.instance_name, self.op.iallocator,
9447 utils.CommaJoin(ial.result))
9448 if ial.required_nodes == 2:
9449 self.op.snode = ial.result[1]
9451 def BuildHooksEnv(self):
9454 This runs on master, primary and secondary nodes of the instance.
9458 "ADD_MODE": self.op.mode,
9460 if self.op.mode == constants.INSTANCE_IMPORT:
9461 env["SRC_NODE"] = self.op.src_node
9462 env["SRC_PATH"] = self.op.src_path
9463 env["SRC_IMAGES"] = self.src_images
9465 env.update(_BuildInstanceHookEnv(
9466 name=self.op.instance_name,
9467 primary_node=self.op.pnode,
9468 secondary_nodes=self.secondaries,
9469 status=self.op.start,
9470 os_type=self.op.os_type,
9471 minmem=self.be_full[constants.BE_MINMEM],
9472 maxmem=self.be_full[constants.BE_MAXMEM],
9473 vcpus=self.be_full[constants.BE_VCPUS],
9474 nics=_NICListToTuple(self, self.nics),
9475 disk_template=self.op.disk_template,
9476 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
9477 for d in self.disks],
9480 hypervisor_name=self.op.hypervisor,
9486 def BuildHooksNodes(self):
9487 """Build hooks nodes.
9490 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
9493 def _ReadExportInfo(self):
9494 """Reads the export information from disk.
9496 It will override the opcode source node and path with the actual
9497 information, if these two were not specified before.
9499 @return: the export information
9502 assert self.op.mode == constants.INSTANCE_IMPORT
9504 src_node = self.op.src_node
9505 src_path = self.op.src_path
9507 if src_node is None:
9508 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
9509 exp_list = self.rpc.call_export_list(locked_nodes)
9511 for node in exp_list:
9512 if exp_list[node].fail_msg:
9514 if src_path in exp_list[node].payload:
9516 self.op.src_node = src_node = node
9517 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
9521 raise errors.OpPrereqError("No export found for relative path %s" %
9522 src_path, errors.ECODE_INVAL)
9524 _CheckNodeOnline(self, src_node)
9525 result = self.rpc.call_export_info(src_node, src_path)
9526 result.Raise("No export or invalid export found in dir %s" % src_path)
9528 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
9529 if not export_info.has_section(constants.INISECT_EXP):
9530 raise errors.ProgrammerError("Corrupted export config",
9531 errors.ECODE_ENVIRON)
9533 ei_version = export_info.get(constants.INISECT_EXP, "version")
9534 if (int(ei_version) != constants.EXPORT_VERSION):
9535 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
9536 (ei_version, constants.EXPORT_VERSION),
9537 errors.ECODE_ENVIRON)
9540 def _ReadExportParams(self, einfo):
9541 """Use export parameters as defaults.
9543 In case the opcode doesn't specify (as in override) some instance
9544 parameters, then try to use them from the export information, if
9548 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
9550 if self.op.disk_template is None:
9551 if einfo.has_option(constants.INISECT_INS, "disk_template"):
9552 self.op.disk_template = einfo.get(constants.INISECT_INS,
9554 if self.op.disk_template not in constants.DISK_TEMPLATES:
9555 raise errors.OpPrereqError("Disk template specified in configuration"
9556 " file is not one of the allowed values:"
9557 " %s" % " ".join(constants.DISK_TEMPLATES))
9559 raise errors.OpPrereqError("No disk template specified and the export"
9560 " is missing the disk_template information",
9563 if not self.op.disks:
9565 # TODO: import the disk iv_name too
9566 for idx in range(constants.MAX_DISKS):
9567 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
9568 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
9569 disks.append({constants.IDISK_SIZE: disk_sz})
9570 self.op.disks = disks
9571 if not disks and self.op.disk_template != constants.DT_DISKLESS:
9572 raise errors.OpPrereqError("No disk info specified and the export"
9573 " is missing the disk information",
9576 if not self.op.nics:
9578 for idx in range(constants.MAX_NICS):
9579 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
9581 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
9582 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
9589 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
9590 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
9592 if (self.op.hypervisor is None and
9593 einfo.has_option(constants.INISECT_INS, "hypervisor")):
9594 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
9596 if einfo.has_section(constants.INISECT_HYP):
9597 # use the export parameters but do not override the ones
9598 # specified by the user
9599 for name, value in einfo.items(constants.INISECT_HYP):
9600 if name not in self.op.hvparams:
9601 self.op.hvparams[name] = value
9603 if einfo.has_section(constants.INISECT_BEP):
9604 # use the parameters, without overriding
9605 for name, value in einfo.items(constants.INISECT_BEP):
9606 if name not in self.op.beparams:
9607 self.op.beparams[name] = value
9608 # Compatibility for the old "memory" be param
9609 if name == constants.BE_MEMORY:
9610 if constants.BE_MAXMEM not in self.op.beparams:
9611 self.op.beparams[constants.BE_MAXMEM] = value
9612 if constants.BE_MINMEM not in self.op.beparams:
9613 self.op.beparams[constants.BE_MINMEM] = value
9615 # try to read the parameters old style, from the main section
9616 for name in constants.BES_PARAMETERS:
9617 if (name not in self.op.beparams and
9618 einfo.has_option(constants.INISECT_INS, name)):
9619 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
9621 if einfo.has_section(constants.INISECT_OSP):
9622 # use the parameters, without overriding
9623 for name, value in einfo.items(constants.INISECT_OSP):
9624 if name not in self.op.osparams:
9625 self.op.osparams[name] = value
9627 def _RevertToDefaults(self, cluster):
9628 """Revert the instance parameters to the default values.
9632 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
9633 for name in self.op.hvparams.keys():
9634 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
9635 del self.op.hvparams[name]
9637 be_defs = cluster.SimpleFillBE({})
9638 for name in self.op.beparams.keys():
9639 if name in be_defs and be_defs[name] == self.op.beparams[name]:
9640 del self.op.beparams[name]
9642 nic_defs = cluster.SimpleFillNIC({})
9643 for nic in self.op.nics:
9644 for name in constants.NICS_PARAMETERS:
9645 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
9648 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
9649 for name in self.op.osparams.keys():
9650 if name in os_defs and os_defs[name] == self.op.osparams[name]:
9651 del self.op.osparams[name]
9653 def _CalculateFileStorageDir(self):
9654 """Calculate final instance file storage dir.
9657 # file storage dir calculation/check
9658 self.instance_file_storage_dir = None
9659 if self.op.disk_template in constants.DTS_FILEBASED:
9660 # build the full file storage dir path
9663 if self.op.disk_template == constants.DT_SHARED_FILE:
9664 get_fsd_fn = self.cfg.GetSharedFileStorageDir
9666 get_fsd_fn = self.cfg.GetFileStorageDir
9668 cfg_storagedir = get_fsd_fn()
9669 if not cfg_storagedir:
9670 raise errors.OpPrereqError("Cluster file storage dir not defined")
9671 joinargs.append(cfg_storagedir)
9673 if self.op.file_storage_dir is not None:
9674 joinargs.append(self.op.file_storage_dir)
9676 joinargs.append(self.op.instance_name)
9678 # pylint: disable=W0142
9679 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
9681 def CheckPrereq(self): # pylint: disable=R0914
9682 """Check prerequisites.
9685 self._CalculateFileStorageDir()
9687 if self.op.mode == constants.INSTANCE_IMPORT:
9688 export_info = self._ReadExportInfo()
9689 self._ReadExportParams(export_info)
9690 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
9692 self._old_instance_name = None
9694 if (not self.cfg.GetVGName() and
9695 self.op.disk_template not in constants.DTS_NOT_LVM):
9696 raise errors.OpPrereqError("Cluster does not support lvm-based"
9697 " instances", errors.ECODE_STATE)
9699 if (self.op.hypervisor is None or
9700 self.op.hypervisor == constants.VALUE_AUTO):
9701 self.op.hypervisor = self.cfg.GetHypervisorType()
9703 cluster = self.cfg.GetClusterInfo()
9704 enabled_hvs = cluster.enabled_hypervisors
9705 if self.op.hypervisor not in enabled_hvs:
9706 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
9707 " cluster (%s)" % (self.op.hypervisor,
9708 ",".join(enabled_hvs)),
9711 # Check tag validity
9712 for tag in self.op.tags:
9713 objects.TaggableObject.ValidateTag(tag)
9715 # check hypervisor parameter syntax (locally)
9716 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
9717 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
9719 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
9720 hv_type.CheckParameterSyntax(filled_hvp)
9721 self.hv_full = filled_hvp
9722 # check that we don't specify global parameters on an instance
9723 _CheckGlobalHvParams(self.op.hvparams)
9725 # fill and remember the beparams dict
9726 default_beparams = cluster.beparams[constants.PP_DEFAULT]
9727 for param, value in self.op.beparams.iteritems():
9728 if value == constants.VALUE_AUTO:
9729 self.op.beparams[param] = default_beparams[param]
9730 objects.UpgradeBeParams(self.op.beparams)
9731 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
9732 self.be_full = cluster.SimpleFillBE(self.op.beparams)
9734 # build os parameters
9735 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
9737 # now that hvp/bep are in final format, let's reset to defaults,
9739 if self.op.identify_defaults:
9740 self._RevertToDefaults(cluster)
9744 for idx, nic in enumerate(self.op.nics):
9745 nic_mode_req = nic.get(constants.INIC_MODE, None)
9746 nic_mode = nic_mode_req
9747 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
9748 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
9750 # in routed mode, for the first nic, the default ip is 'auto'
9751 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
9752 default_ip_mode = constants.VALUE_AUTO
9754 default_ip_mode = constants.VALUE_NONE
9756 # ip validity checks
9757 ip = nic.get(constants.INIC_IP, default_ip_mode)
9758 if ip is None or ip.lower() == constants.VALUE_NONE:
9760 elif ip.lower() == constants.VALUE_AUTO:
9761 if not self.op.name_check:
9762 raise errors.OpPrereqError("IP address set to auto but name checks"
9763 " have been skipped",
9765 nic_ip = self.hostname1.ip
9767 if not netutils.IPAddress.IsValid(ip):
9768 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
9772 # TODO: check the ip address for uniqueness
9773 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
9774 raise errors.OpPrereqError("Routed nic mode requires an ip address",
9777 # MAC address verification
9778 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
9779 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9780 mac = utils.NormalizeAndValidateMac(mac)
9783 self.cfg.ReserveMAC(mac, self.proc.GetECId())
9784 except errors.ReservationError:
9785 raise errors.OpPrereqError("MAC address %s already in use"
9786 " in cluster" % mac,
9787 errors.ECODE_NOTUNIQUE)
9789 # Build nic parameters
9790 link = nic.get(constants.INIC_LINK, None)
9791 if link == constants.VALUE_AUTO:
9792 link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
9795 nicparams[constants.NIC_MODE] = nic_mode
9797 nicparams[constants.NIC_LINK] = link
9799 check_params = cluster.SimpleFillNIC(nicparams)
9800 objects.NIC.CheckParameterSyntax(check_params)
9801 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
9803 # disk checks/pre-build
9804 default_vg = self.cfg.GetVGName()
9806 for disk in self.op.disks:
9807 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
9808 if mode not in constants.DISK_ACCESS_SET:
9809 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
9810 mode, errors.ECODE_INVAL)
9811 size = disk.get(constants.IDISK_SIZE, None)
9813 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
9816 except (TypeError, ValueError):
9817 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
9820 data_vg = disk.get(constants.IDISK_VG, default_vg)
9822 constants.IDISK_SIZE: size,
9823 constants.IDISK_MODE: mode,
9824 constants.IDISK_VG: data_vg,
9826 if constants.IDISK_METAVG in disk:
9827 new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
9828 if constants.IDISK_ADOPT in disk:
9829 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
9830 self.disks.append(new_disk)
9832 if self.op.mode == constants.INSTANCE_IMPORT:
9834 for idx in range(len(self.disks)):
9835 option = "disk%d_dump" % idx
9836 if export_info.has_option(constants.INISECT_INS, option):
9837 # FIXME: are the old os-es, disk sizes, etc. useful?
9838 export_name = export_info.get(constants.INISECT_INS, option)
9839 image = utils.PathJoin(self.op.src_path, export_name)
9840 disk_images.append(image)
9842 disk_images.append(False)
9844 self.src_images = disk_images
9846 if self.op.instance_name == self._old_instance_name:
9847 for idx, nic in enumerate(self.nics):
9848 if nic.mac == constants.VALUE_AUTO:
9849 nic_mac_ini = "nic%d_mac" % idx
9850 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
9852 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
9854 # ip ping checks (we use the same ip that was resolved in ExpandNames)
9855 if self.op.ip_check:
9856 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
9857 raise errors.OpPrereqError("IP %s of instance %s already in use" %
9858 (self.check_ip, self.op.instance_name),
9859 errors.ECODE_NOTUNIQUE)
9861 #### mac address generation
9862 # By generating here the mac address both the allocator and the hooks get
9863 # the real final mac address rather than the 'auto' or 'generate' value.
9864 # There is a race condition between the generation and the instance object
9865 # creation, which means that we know the mac is valid now, but we're not
9866 # sure it will be when we actually add the instance. If things go bad
9867 # adding the instance will abort because of a duplicate mac, and the
9868 # creation job will fail.
9869 for nic in self.nics:
9870 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9871 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
9875 if self.op.iallocator is not None:
9876 self._RunAllocator()
9878 # Release all unneeded node locks
9879 _ReleaseLocks(self, locking.LEVEL_NODE,
9880 keep=filter(None, [self.op.pnode, self.op.snode,
9882 _ReleaseLocks(self, locking.LEVEL_NODE_RES,
9883 keep=filter(None, [self.op.pnode, self.op.snode,
9886 #### node related checks
9888 # check primary node
9889 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
9890 assert self.pnode is not None, \
9891 "Cannot retrieve locked node %s" % self.op.pnode
9893 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
9894 pnode.name, errors.ECODE_STATE)
9896 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
9897 pnode.name, errors.ECODE_STATE)
9898 if not pnode.vm_capable:
9899 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
9900 " '%s'" % pnode.name, errors.ECODE_STATE)
9902 self.secondaries = []
9904 # mirror node verification
9905 if self.op.disk_template in constants.DTS_INT_MIRROR:
9906 if self.op.snode == pnode.name:
9907 raise errors.OpPrereqError("The secondary node cannot be the"
9908 " primary node", errors.ECODE_INVAL)
9909 _CheckNodeOnline(self, self.op.snode)
9910 _CheckNodeNotDrained(self, self.op.snode)
9911 _CheckNodeVmCapable(self, self.op.snode)
9912 self.secondaries.append(self.op.snode)
9914 snode = self.cfg.GetNodeInfo(self.op.snode)
9915 if pnode.group != snode.group:
9916 self.LogWarning("The primary and secondary nodes are in two"
9917 " different node groups; the disk parameters"
9918 " from the first disk's node group will be"
9921 nodenames = [pnode.name] + self.secondaries
9923 # Verify instance specs
9924 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
9926 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
9927 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
9928 constants.ISPEC_DISK_COUNT: len(self.disks),
9929 constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
9930 constants.ISPEC_NIC_COUNT: len(self.nics),
9931 constants.ISPEC_SPINDLE_USE: spindle_use,
9934 group_info = self.cfg.GetNodeGroup(pnode.group)
9935 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
9936 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
9937 if not self.op.ignore_ipolicy and res:
9938 raise errors.OpPrereqError(("Instance allocation to group %s violates"
9939 " policy: %s") % (pnode.group,
9940 utils.CommaJoin(res)),
9943 if not self.adopt_disks:
9944 if self.op.disk_template == constants.DT_RBD:
9945 # _CheckRADOSFreeSpace() is just a placeholder.
9946 # Any function that checks prerequisites can be placed here.
9947 # Check if there is enough space on the RADOS cluster.
9948 _CheckRADOSFreeSpace()
9950 # Check lv size requirements, if not adopting
9951 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
9952 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
9954 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
9955 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
9956 disk[constants.IDISK_ADOPT])
9957 for disk in self.disks])
9958 if len(all_lvs) != len(self.disks):
9959 raise errors.OpPrereqError("Duplicate volume names given for adoption",
9961 for lv_name in all_lvs:
9963 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
9964 # to ReserveLV uses the same syntax
9965 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
9966 except errors.ReservationError:
9967 raise errors.OpPrereqError("LV named %s used by another instance" %
9968 lv_name, errors.ECODE_NOTUNIQUE)
9970 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
9971 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
9973 node_lvs = self.rpc.call_lv_list([pnode.name],
9974 vg_names.payload.keys())[pnode.name]
9975 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
9976 node_lvs = node_lvs.payload
9978 delta = all_lvs.difference(node_lvs.keys())
9980 raise errors.OpPrereqError("Missing logical volume(s): %s" %
9981 utils.CommaJoin(delta),
9983 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
9985 raise errors.OpPrereqError("Online logical volumes found, cannot"
9986 " adopt: %s" % utils.CommaJoin(online_lvs),
9988 # update the size of disk based on what is found
9989 for dsk in self.disks:
9990 dsk[constants.IDISK_SIZE] = \
9991 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
9992 dsk[constants.IDISK_ADOPT])][0]))
9994 elif self.op.disk_template == constants.DT_BLOCK:
9995 # Normalize and de-duplicate device paths
9996 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
9997 for disk in self.disks])
9998 if len(all_disks) != len(self.disks):
9999 raise errors.OpPrereqError("Duplicate disk names given for adoption",
10000 errors.ECODE_INVAL)
10001 baddisks = [d for d in all_disks
10002 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
10004 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
10005 " cannot be adopted" %
10006 (", ".join(baddisks),
10007 constants.ADOPTABLE_BLOCKDEV_ROOT),
10008 errors.ECODE_INVAL)
10010 node_disks = self.rpc.call_bdev_sizes([pnode.name],
10011 list(all_disks))[pnode.name]
10012 node_disks.Raise("Cannot get block device information from node %s" %
10014 node_disks = node_disks.payload
10015 delta = all_disks.difference(node_disks.keys())
10017 raise errors.OpPrereqError("Missing block device(s): %s" %
10018 utils.CommaJoin(delta),
10019 errors.ECODE_INVAL)
10020 for dsk in self.disks:
10021 dsk[constants.IDISK_SIZE] = \
10022 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
10024 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
10026 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
10027 # check OS parameters (remotely)
10028 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
10030 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
10032 # memory check on primary node
10033 #TODO(dynmem): use MINMEM for checking
10035 _CheckNodeFreeMemory(self, self.pnode.name,
10036 "creating instance %s" % self.op.instance_name,
10037 self.be_full[constants.BE_MAXMEM],
10038 self.op.hypervisor)
10040 self.dry_run_result = list(nodenames)
10042 def Exec(self, feedback_fn):
10043 """Create and add the instance to the cluster.
10046 instance = self.op.instance_name
10047 pnode_name = self.pnode.name
10049 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
10050 self.owned_locks(locking.LEVEL_NODE)), \
10051 "Node locks differ from node resource locks"
10053 ht_kind = self.op.hypervisor
10054 if ht_kind in constants.HTS_REQ_PORT:
10055 network_port = self.cfg.AllocatePort()
10057 network_port = None
10059 # This is ugly but we got a chicken-egg problem here
10060 # We can only take the group disk parameters, as the instance
10061 # has no disks yet (we are generating them right here).
10062 node = self.cfg.GetNodeInfo(pnode_name)
10063 nodegroup = self.cfg.GetNodeGroup(node.group)
10064 disks = _GenerateDiskTemplate(self,
10065 self.op.disk_template,
10066 instance, pnode_name,
10069 self.instance_file_storage_dir,
10070 self.op.file_driver,
10073 self.cfg.GetGroupDiskParams(nodegroup))
10075 iobj = objects.Instance(name=instance, os=self.op.os_type,
10076 primary_node=pnode_name,
10077 nics=self.nics, disks=disks,
10078 disk_template=self.op.disk_template,
10079 admin_state=constants.ADMINST_DOWN,
10080 network_port=network_port,
10081 beparams=self.op.beparams,
10082 hvparams=self.op.hvparams,
10083 hypervisor=self.op.hypervisor,
10084 osparams=self.op.osparams,
10088 for tag in self.op.tags:
10091 if self.adopt_disks:
10092 if self.op.disk_template == constants.DT_PLAIN:
10093 # rename LVs to the newly-generated names; we need to construct
10094 # 'fake' LV disks with the old data, plus the new unique_id
10095 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
10097 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
10098 rename_to.append(t_dsk.logical_id)
10099 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
10100 self.cfg.SetDiskID(t_dsk, pnode_name)
10101 result = self.rpc.call_blockdev_rename(pnode_name,
10102 zip(tmp_disks, rename_to))
10103 result.Raise("Failed to rename adoped LVs")
10105 feedback_fn("* creating instance disks...")
10107 _CreateDisks(self, iobj)
10108 except errors.OpExecError:
10109 self.LogWarning("Device creation failed, reverting...")
10111 _RemoveDisks(self, iobj)
10113 self.cfg.ReleaseDRBDMinors(instance)
10116 feedback_fn("adding instance %s to cluster config" % instance)
10118 self.cfg.AddInstance(iobj, self.proc.GetECId())
10120 # Declare that we don't want to remove the instance lock anymore, as we've
10121 # added the instance to the config
10122 del self.remove_locks[locking.LEVEL_INSTANCE]
10124 if self.op.mode == constants.INSTANCE_IMPORT:
10125 # Release unused nodes
10126 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
10128 # Release all nodes
10129 _ReleaseLocks(self, locking.LEVEL_NODE)
10132 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
10133 feedback_fn("* wiping instance disks...")
10135 _WipeDisks(self, iobj)
10136 except errors.OpExecError, err:
10137 logging.exception("Wiping disks failed")
10138 self.LogWarning("Wiping instance disks failed (%s)", err)
10142 # Something is already wrong with the disks, don't do anything else
10144 elif self.op.wait_for_sync:
10145 disk_abort = not _WaitForSync(self, iobj)
10146 elif iobj.disk_template in constants.DTS_INT_MIRROR:
10147 # make sure the disks are not degraded (still sync-ing is ok)
10148 feedback_fn("* checking mirrors status")
10149 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
10154 _RemoveDisks(self, iobj)
10155 self.cfg.RemoveInstance(iobj.name)
10156 # Make sure the instance lock gets removed
10157 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
10158 raise errors.OpExecError("There are some degraded disks for"
10161 # Release all node resource locks
10162 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
10164 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
10165 # we need to set the disks ID to the primary node, since the
10166 # preceding code might or might have not done it, depending on
10167 # disk template and other options
10168 for disk in iobj.disks:
10169 self.cfg.SetDiskID(disk, pnode_name)
10170 if self.op.mode == constants.INSTANCE_CREATE:
10171 if not self.op.no_install:
10172 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
10173 not self.op.wait_for_sync)
10175 feedback_fn("* pausing disk sync to install instance OS")
10176 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10179 for idx, success in enumerate(result.payload):
10181 logging.warn("pause-sync of instance %s for disk %d failed",
10184 feedback_fn("* running the instance OS create scripts...")
10185 # FIXME: pass debug option from opcode to backend
10187 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
10188 self.op.debug_level)
10190 feedback_fn("* resuming disk sync")
10191 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10194 for idx, success in enumerate(result.payload):
10196 logging.warn("resume-sync of instance %s for disk %d failed",
10199 os_add_result.Raise("Could not add os for instance %s"
10200 " on node %s" % (instance, pnode_name))
10203 if self.op.mode == constants.INSTANCE_IMPORT:
10204 feedback_fn("* running the instance OS import scripts...")
10208 for idx, image in enumerate(self.src_images):
10212 # FIXME: pass debug option from opcode to backend
10213 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
10214 constants.IEIO_FILE, (image, ),
10215 constants.IEIO_SCRIPT,
10216 (iobj.disks[idx], idx),
10218 transfers.append(dt)
10221 masterd.instance.TransferInstanceData(self, feedback_fn,
10222 self.op.src_node, pnode_name,
10223 self.pnode.secondary_ip,
10225 if not compat.all(import_result):
10226 self.LogWarning("Some disks for instance %s on node %s were not"
10227 " imported successfully" % (instance, pnode_name))
10229 rename_from = self._old_instance_name
10231 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
10232 feedback_fn("* preparing remote import...")
10233 # The source cluster will stop the instance before attempting to make
10234 # a connection. In some cases stopping an instance can take a long
10235 # time, hence the shutdown timeout is added to the connection
10237 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
10238 self.op.source_shutdown_timeout)
10239 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10241 assert iobj.primary_node == self.pnode.name
10243 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
10244 self.source_x509_ca,
10245 self._cds, timeouts)
10246 if not compat.all(disk_results):
10247 # TODO: Should the instance still be started, even if some disks
10248 # failed to import (valid for local imports, too)?
10249 self.LogWarning("Some disks for instance %s on node %s were not"
10250 " imported successfully" % (instance, pnode_name))
10252 rename_from = self.source_instance_name
10255 # also checked in the prereq part
10256 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
10259 # Run rename script on newly imported instance
10260 assert iobj.name == instance
10261 feedback_fn("Running rename script for %s" % instance)
10262 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
10264 self.op.debug_level)
10265 if result.fail_msg:
10266 self.LogWarning("Failed to run rename script for %s on node"
10267 " %s: %s" % (instance, pnode_name, result.fail_msg))
10269 assert not self.owned_locks(locking.LEVEL_NODE_RES)
10272 iobj.admin_state = constants.ADMINST_UP
10273 self.cfg.Update(iobj, feedback_fn)
10274 logging.info("Starting instance %s on node %s", instance, pnode_name)
10275 feedback_fn("* starting instance...")
10276 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
10278 result.Raise("Could not start instance")
10280 return list(iobj.all_nodes)
10283 def _CheckRADOSFreeSpace():
10284 """Compute disk size requirements inside the RADOS cluster.
10287 # For the RADOS cluster we assume there is always enough space.
10291 class LUInstanceConsole(NoHooksLU):
10292 """Connect to an instance's console.
10294 This is somewhat special in that it returns the command line that
10295 you need to run on the master node in order to connect to the
10301 def ExpandNames(self):
10302 self.share_locks = _ShareAll()
10303 self._ExpandAndLockInstance()
10305 def CheckPrereq(self):
10306 """Check prerequisites.
10308 This checks that the instance is in the cluster.
10311 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10312 assert self.instance is not None, \
10313 "Cannot retrieve locked instance %s" % self.op.instance_name
10314 _CheckNodeOnline(self, self.instance.primary_node)
10316 def Exec(self, feedback_fn):
10317 """Connect to the console of an instance
10320 instance = self.instance
10321 node = instance.primary_node
10323 node_insts = self.rpc.call_instance_list([node],
10324 [instance.hypervisor])[node]
10325 node_insts.Raise("Can't get node information from %s" % node)
10327 if instance.name not in node_insts.payload:
10328 if instance.admin_state == constants.ADMINST_UP:
10329 state = constants.INSTST_ERRORDOWN
10330 elif instance.admin_state == constants.ADMINST_DOWN:
10331 state = constants.INSTST_ADMINDOWN
10333 state = constants.INSTST_ADMINOFFLINE
10334 raise errors.OpExecError("Instance %s is not running (state %s)" %
10335 (instance.name, state))
10337 logging.debug("Connecting to console of %s on %s", instance.name, node)
10339 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
10342 def _GetInstanceConsole(cluster, instance):
10343 """Returns console information for an instance.
10345 @type cluster: L{objects.Cluster}
10346 @type instance: L{objects.Instance}
10350 hyper = hypervisor.GetHypervisor(instance.hypervisor)
10351 # beparams and hvparams are passed separately, to avoid editing the
10352 # instance and then saving the defaults in the instance itself.
10353 hvparams = cluster.FillHV(instance)
10354 beparams = cluster.FillBE(instance)
10355 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
10357 assert console.instance == instance.name
10358 assert console.Validate()
10360 return console.ToDict()
10363 class LUInstanceReplaceDisks(LogicalUnit):
10364 """Replace the disks of an instance.
10367 HPATH = "mirrors-replace"
10368 HTYPE = constants.HTYPE_INSTANCE
10371 def CheckArguments(self):
10372 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
10373 self.op.iallocator)
10375 def ExpandNames(self):
10376 self._ExpandAndLockInstance()
10378 assert locking.LEVEL_NODE not in self.needed_locks
10379 assert locking.LEVEL_NODE_RES not in self.needed_locks
10380 assert locking.LEVEL_NODEGROUP not in self.needed_locks
10382 assert self.op.iallocator is None or self.op.remote_node is None, \
10383 "Conflicting options"
10385 if self.op.remote_node is not None:
10386 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10388 # Warning: do not remove the locking of the new secondary here
10389 # unless DRBD8.AddChildren is changed to work in parallel;
10390 # currently it doesn't since parallel invocations of
10391 # FindUnusedMinor will conflict
10392 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
10393 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
10395 self.needed_locks[locking.LEVEL_NODE] = []
10396 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10398 if self.op.iallocator is not None:
10399 # iallocator will select a new node in the same group
10400 self.needed_locks[locking.LEVEL_NODEGROUP] = []
10402 self.needed_locks[locking.LEVEL_NODE_RES] = []
10404 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
10405 self.op.iallocator, self.op.remote_node,
10406 self.op.disks, False, self.op.early_release,
10407 self.op.ignore_ipolicy)
10409 self.tasklets = [self.replacer]
10411 def DeclareLocks(self, level):
10412 if level == locking.LEVEL_NODEGROUP:
10413 assert self.op.remote_node is None
10414 assert self.op.iallocator is not None
10415 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
10417 self.share_locks[locking.LEVEL_NODEGROUP] = 1
10418 # Lock all groups used by instance optimistically; this requires going
10419 # via the node before it's locked, requiring verification later on
10420 self.needed_locks[locking.LEVEL_NODEGROUP] = \
10421 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
10423 elif level == locking.LEVEL_NODE:
10424 if self.op.iallocator is not None:
10425 assert self.op.remote_node is None
10426 assert not self.needed_locks[locking.LEVEL_NODE]
10428 # Lock member nodes of all locked groups
10429 self.needed_locks[locking.LEVEL_NODE] = [node_name
10430 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10431 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10433 self._LockInstancesNodes()
10434 elif level == locking.LEVEL_NODE_RES:
10436 self.needed_locks[locking.LEVEL_NODE_RES] = \
10437 self.needed_locks[locking.LEVEL_NODE]
10439 def BuildHooksEnv(self):
10440 """Build hooks env.
10442 This runs on the master, the primary and all the secondaries.
10445 instance = self.replacer.instance
10447 "MODE": self.op.mode,
10448 "NEW_SECONDARY": self.op.remote_node,
10449 "OLD_SECONDARY": instance.secondary_nodes[0],
10451 env.update(_BuildInstanceHookEnvByObject(self, instance))
10454 def BuildHooksNodes(self):
10455 """Build hooks nodes.
10458 instance = self.replacer.instance
10460 self.cfg.GetMasterNode(),
10461 instance.primary_node,
10463 if self.op.remote_node is not None:
10464 nl.append(self.op.remote_node)
10467 def CheckPrereq(self):
10468 """Check prerequisites.
10471 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
10472 self.op.iallocator is None)
10474 # Verify if node group locks are still correct
10475 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10477 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
10479 return LogicalUnit.CheckPrereq(self)
10482 class TLReplaceDisks(Tasklet):
10483 """Replaces disks for an instance.
10485 Note: Locking is not within the scope of this class.
10488 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
10489 disks, delay_iallocator, early_release, ignore_ipolicy):
10490 """Initializes this class.
10493 Tasklet.__init__(self, lu)
10496 self.instance_name = instance_name
10498 self.iallocator_name = iallocator_name
10499 self.remote_node = remote_node
10501 self.delay_iallocator = delay_iallocator
10502 self.early_release = early_release
10503 self.ignore_ipolicy = ignore_ipolicy
10506 self.instance = None
10507 self.new_node = None
10508 self.target_node = None
10509 self.other_node = None
10510 self.remote_node_info = None
10511 self.node_secondary_ip = None
10514 def CheckArguments(mode, remote_node, iallocator):
10515 """Helper function for users of this class.
10518 # check for valid parameter combination
10519 if mode == constants.REPLACE_DISK_CHG:
10520 if remote_node is None and iallocator is None:
10521 raise errors.OpPrereqError("When changing the secondary either an"
10522 " iallocator script must be used or the"
10523 " new node given", errors.ECODE_INVAL)
10525 if remote_node is not None and iallocator is not None:
10526 raise errors.OpPrereqError("Give either the iallocator or the new"
10527 " secondary, not both", errors.ECODE_INVAL)
10529 elif remote_node is not None or iallocator is not None:
10530 # Not replacing the secondary
10531 raise errors.OpPrereqError("The iallocator and new node options can"
10532 " only be used when changing the"
10533 " secondary node", errors.ECODE_INVAL)
10536 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
10537 """Compute a new secondary node using an IAllocator.
10540 ial = IAllocator(lu.cfg, lu.rpc,
10541 mode=constants.IALLOCATOR_MODE_RELOC,
10542 name=instance_name,
10543 relocate_from=list(relocate_from))
10545 ial.Run(iallocator_name)
10547 if not ial.success:
10548 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
10549 " %s" % (iallocator_name, ial.info),
10550 errors.ECODE_NORES)
10552 if len(ial.result) != ial.required_nodes:
10553 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
10554 " of nodes (%s), required %s" %
10556 len(ial.result), ial.required_nodes),
10557 errors.ECODE_FAULT)
10559 remote_node_name = ial.result[0]
10561 lu.LogInfo("Selected new secondary for instance '%s': %s",
10562 instance_name, remote_node_name)
10564 return remote_node_name
10566 def _FindFaultyDisks(self, node_name):
10567 """Wrapper for L{_FindFaultyInstanceDisks}.
10570 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
10573 def _CheckDisksActivated(self, instance):
10574 """Checks if the instance disks are activated.
10576 @param instance: The instance to check disks
10577 @return: True if they are activated, False otherwise
10580 nodes = instance.all_nodes
10582 for idx, dev in enumerate(instance.disks):
10584 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
10585 self.cfg.SetDiskID(dev, node)
10587 result = _BlockdevFind(self, node, dev, instance)
10591 elif result.fail_msg or not result.payload:
10596 def CheckPrereq(self):
10597 """Check prerequisites.
10599 This checks that the instance is in the cluster.
10602 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
10603 assert instance is not None, \
10604 "Cannot retrieve locked instance %s" % self.instance_name
10606 if instance.disk_template != constants.DT_DRBD8:
10607 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
10608 " instances", errors.ECODE_INVAL)
10610 if len(instance.secondary_nodes) != 1:
10611 raise errors.OpPrereqError("The instance has a strange layout,"
10612 " expected one secondary but found %d" %
10613 len(instance.secondary_nodes),
10614 errors.ECODE_FAULT)
10616 if not self.delay_iallocator:
10617 self._CheckPrereq2()
10619 def _CheckPrereq2(self):
10620 """Check prerequisites, second part.
10622 This function should always be part of CheckPrereq. It was separated and is
10623 now called from Exec because during node evacuation iallocator was only
10624 called with an unmodified cluster model, not taking planned changes into
10628 instance = self.instance
10629 secondary_node = instance.secondary_nodes[0]
10631 if self.iallocator_name is None:
10632 remote_node = self.remote_node
10634 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
10635 instance.name, instance.secondary_nodes)
10637 if remote_node is None:
10638 self.remote_node_info = None
10640 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
10641 "Remote node '%s' is not locked" % remote_node
10643 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
10644 assert self.remote_node_info is not None, \
10645 "Cannot retrieve locked node %s" % remote_node
10647 if remote_node == self.instance.primary_node:
10648 raise errors.OpPrereqError("The specified node is the primary node of"
10649 " the instance", errors.ECODE_INVAL)
10651 if remote_node == secondary_node:
10652 raise errors.OpPrereqError("The specified node is already the"
10653 " secondary node of the instance",
10654 errors.ECODE_INVAL)
10656 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
10657 constants.REPLACE_DISK_CHG):
10658 raise errors.OpPrereqError("Cannot specify disks to be replaced",
10659 errors.ECODE_INVAL)
10661 if self.mode == constants.REPLACE_DISK_AUTO:
10662 if not self._CheckDisksActivated(instance):
10663 raise errors.OpPrereqError("Please run activate-disks on instance %s"
10664 " first" % self.instance_name,
10665 errors.ECODE_STATE)
10666 faulty_primary = self._FindFaultyDisks(instance.primary_node)
10667 faulty_secondary = self._FindFaultyDisks(secondary_node)
10669 if faulty_primary and faulty_secondary:
10670 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
10671 " one node and can not be repaired"
10672 " automatically" % self.instance_name,
10673 errors.ECODE_STATE)
10676 self.disks = faulty_primary
10677 self.target_node = instance.primary_node
10678 self.other_node = secondary_node
10679 check_nodes = [self.target_node, self.other_node]
10680 elif faulty_secondary:
10681 self.disks = faulty_secondary
10682 self.target_node = secondary_node
10683 self.other_node = instance.primary_node
10684 check_nodes = [self.target_node, self.other_node]
10690 # Non-automatic modes
10691 if self.mode == constants.REPLACE_DISK_PRI:
10692 self.target_node = instance.primary_node
10693 self.other_node = secondary_node
10694 check_nodes = [self.target_node, self.other_node]
10696 elif self.mode == constants.REPLACE_DISK_SEC:
10697 self.target_node = secondary_node
10698 self.other_node = instance.primary_node
10699 check_nodes = [self.target_node, self.other_node]
10701 elif self.mode == constants.REPLACE_DISK_CHG:
10702 self.new_node = remote_node
10703 self.other_node = instance.primary_node
10704 self.target_node = secondary_node
10705 check_nodes = [self.new_node, self.other_node]
10707 _CheckNodeNotDrained(self.lu, remote_node)
10708 _CheckNodeVmCapable(self.lu, remote_node)
10710 old_node_info = self.cfg.GetNodeInfo(secondary_node)
10711 assert old_node_info is not None
10712 if old_node_info.offline and not self.early_release:
10713 # doesn't make sense to delay the release
10714 self.early_release = True
10715 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
10716 " early-release mode", secondary_node)
10719 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
10722 # If not specified all disks should be replaced
10724 self.disks = range(len(self.instance.disks))
10726 # TODO: This is ugly, but right now we can't distinguish between internal
10727 # submitted opcode and external one. We should fix that.
10728 if self.remote_node_info:
10729 # We change the node, lets verify it still meets instance policy
10730 new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
10731 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
10733 _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
10734 ignore=self.ignore_ipolicy)
10736 for node in check_nodes:
10737 _CheckNodeOnline(self.lu, node)
10739 touched_nodes = frozenset(node_name for node_name in [self.new_node,
10742 if node_name is not None)
10744 # Release unneeded node and node resource locks
10745 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
10746 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
10748 # Release any owned node group
10749 if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
10750 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
10752 # Check whether disks are valid
10753 for disk_idx in self.disks:
10754 instance.FindDisk(disk_idx)
10756 # Get secondary node IP addresses
10757 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
10758 in self.cfg.GetMultiNodeInfo(touched_nodes))
10760 def Exec(self, feedback_fn):
10761 """Execute disk replacement.
10763 This dispatches the disk replacement to the appropriate handler.
10766 if self.delay_iallocator:
10767 self._CheckPrereq2()
10770 # Verify owned locks before starting operation
10771 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
10772 assert set(owned_nodes) == set(self.node_secondary_ip), \
10773 ("Incorrect node locks, owning %s, expected %s" %
10774 (owned_nodes, self.node_secondary_ip.keys()))
10775 assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
10776 self.lu.owned_locks(locking.LEVEL_NODE_RES))
10778 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
10779 assert list(owned_instances) == [self.instance_name], \
10780 "Instance '%s' not locked" % self.instance_name
10782 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
10783 "Should not own any node group lock at this point"
10786 feedback_fn("No disks need replacement")
10789 feedback_fn("Replacing disk(s) %s for %s" %
10790 (utils.CommaJoin(self.disks), self.instance.name))
10792 activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
10794 # Activate the instance disks if we're replacing them on a down instance
10796 _StartInstanceDisks(self.lu, self.instance, True)
10799 # Should we replace the secondary node?
10800 if self.new_node is not None:
10801 fn = self._ExecDrbd8Secondary
10803 fn = self._ExecDrbd8DiskOnly
10805 result = fn(feedback_fn)
10807 # Deactivate the instance disks if we're replacing them on a
10810 _SafeShutdownInstanceDisks(self.lu, self.instance)
10812 assert not self.lu.owned_locks(locking.LEVEL_NODE)
10815 # Verify owned locks
10816 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
10817 nodes = frozenset(self.node_secondary_ip)
10818 assert ((self.early_release and not owned_nodes) or
10819 (not self.early_release and not (set(owned_nodes) - nodes))), \
10820 ("Not owning the correct locks, early_release=%s, owned=%r,"
10821 " nodes=%r" % (self.early_release, owned_nodes, nodes))
10825 def _CheckVolumeGroup(self, nodes):
10826 self.lu.LogInfo("Checking volume groups")
10828 vgname = self.cfg.GetVGName()
10830 # Make sure volume group exists on all involved nodes
10831 results = self.rpc.call_vg_list(nodes)
10833 raise errors.OpExecError("Can't list volume groups on the nodes")
10836 res = results[node]
10837 res.Raise("Error checking node %s" % node)
10838 if vgname not in res.payload:
10839 raise errors.OpExecError("Volume group '%s' not found on node %s" %
10842 def _CheckDisksExistence(self, nodes):
10843 # Check disk existence
10844 for idx, dev in enumerate(self.instance.disks):
10845 if idx not in self.disks:
10849 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
10850 self.cfg.SetDiskID(dev, node)
10852 result = _BlockdevFind(self, node, dev, self.instance)
10854 msg = result.fail_msg
10855 if msg or not result.payload:
10857 msg = "disk not found"
10858 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
10861 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
10862 for idx, dev in enumerate(self.instance.disks):
10863 if idx not in self.disks:
10866 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
10869 if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
10870 on_primary, ldisk=ldisk):
10871 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
10872 " replace disks for instance %s" %
10873 (node_name, self.instance.name))
10875 def _CreateNewStorage(self, node_name):
10876 """Create new storage on the primary or secondary node.
10878 This is only used for same-node replaces, not for changing the
10879 secondary node, hence we don't want to modify the existing disk.
10884 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
10885 for idx, dev in enumerate(disks):
10886 if idx not in self.disks:
10889 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
10891 self.cfg.SetDiskID(dev, node_name)
10893 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
10894 names = _GenerateUniqueNames(self.lu, lv_names)
10896 (data_disk, meta_disk) = dev.children
10897 vg_data = data_disk.logical_id[0]
10898 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
10899 logical_id=(vg_data, names[0]),
10900 params=data_disk.params)
10901 vg_meta = meta_disk.logical_id[0]
10902 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
10903 logical_id=(vg_meta, names[1]),
10904 params=meta_disk.params)
10906 new_lvs = [lv_data, lv_meta]
10907 old_lvs = [child.Copy() for child in dev.children]
10908 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
10910 # we pass force_create=True to force the LVM creation
10911 for new_lv in new_lvs:
10912 _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
10913 _GetInstanceInfoText(self.instance), False)
10917 def _CheckDevices(self, node_name, iv_names):
10918 for name, (dev, _, _) in iv_names.iteritems():
10919 self.cfg.SetDiskID(dev, node_name)
10921 result = _BlockdevFind(self, node_name, dev, self.instance)
10923 msg = result.fail_msg
10924 if msg or not result.payload:
10926 msg = "disk not found"
10927 raise errors.OpExecError("Can't find DRBD device %s: %s" %
10930 if result.payload.is_degraded:
10931 raise errors.OpExecError("DRBD device %s is degraded!" % name)
10933 def _RemoveOldStorage(self, node_name, iv_names):
10934 for name, (_, old_lvs, _) in iv_names.iteritems():
10935 self.lu.LogInfo("Remove logical volumes for %s" % name)
10938 self.cfg.SetDiskID(lv, node_name)
10940 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
10942 self.lu.LogWarning("Can't remove old LV: %s" % msg,
10943 hint="remove unused LVs manually")
10945 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
10946 """Replace a disk on the primary or secondary for DRBD 8.
10948 The algorithm for replace is quite complicated:
10950 1. for each disk to be replaced:
10952 1. create new LVs on the target node with unique names
10953 1. detach old LVs from the drbd device
10954 1. rename old LVs to name_replaced.<time_t>
10955 1. rename new LVs to old LVs
10956 1. attach the new LVs (with the old names now) to the drbd device
10958 1. wait for sync across all devices
10960 1. for each modified disk:
10962 1. remove old LVs (which have the name name_replaces.<time_t>)
10964 Failures are not very well handled.
10969 # Step: check device activation
10970 self.lu.LogStep(1, steps_total, "Check device existence")
10971 self._CheckDisksExistence([self.other_node, self.target_node])
10972 self._CheckVolumeGroup([self.target_node, self.other_node])
10974 # Step: check other node consistency
10975 self.lu.LogStep(2, steps_total, "Check peer consistency")
10976 self._CheckDisksConsistency(self.other_node,
10977 self.other_node == self.instance.primary_node,
10980 # Step: create new storage
10981 self.lu.LogStep(3, steps_total, "Allocate new storage")
10982 iv_names = self._CreateNewStorage(self.target_node)
10984 # Step: for each lv, detach+rename*2+attach
10985 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
10986 for dev, old_lvs, new_lvs in iv_names.itervalues():
10987 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
10989 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
10991 result.Raise("Can't detach drbd from local storage on node"
10992 " %s for device %s" % (self.target_node, dev.iv_name))
10994 #cfg.Update(instance)
10996 # ok, we created the new LVs, so now we know we have the needed
10997 # storage; as such, we proceed on the target node to rename
10998 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
10999 # using the assumption that logical_id == physical_id (which in
11000 # turn is the unique_id on that node)
11002 # FIXME(iustin): use a better name for the replaced LVs
11003 temp_suffix = int(time.time())
11004 ren_fn = lambda d, suff: (d.physical_id[0],
11005 d.physical_id[1] + "_replaced-%s" % suff)
11007 # Build the rename list based on what LVs exist on the node
11008 rename_old_to_new = []
11009 for to_ren in old_lvs:
11010 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
11011 if not result.fail_msg and result.payload:
11013 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
11015 self.lu.LogInfo("Renaming the old LVs on the target node")
11016 result = self.rpc.call_blockdev_rename(self.target_node,
11018 result.Raise("Can't rename old LVs on node %s" % self.target_node)
11020 # Now we rename the new LVs to the old LVs
11021 self.lu.LogInfo("Renaming the new LVs on the target node")
11022 rename_new_to_old = [(new, old.physical_id)
11023 for old, new in zip(old_lvs, new_lvs)]
11024 result = self.rpc.call_blockdev_rename(self.target_node,
11026 result.Raise("Can't rename new LVs on node %s" % self.target_node)
11028 # Intermediate steps of in memory modifications
11029 for old, new in zip(old_lvs, new_lvs):
11030 new.logical_id = old.logical_id
11031 self.cfg.SetDiskID(new, self.target_node)
11033 # We need to modify old_lvs so that removal later removes the
11034 # right LVs, not the newly added ones; note that old_lvs is a
11036 for disk in old_lvs:
11037 disk.logical_id = ren_fn(disk, temp_suffix)
11038 self.cfg.SetDiskID(disk, self.target_node)
11040 # Now that the new lvs have the old name, we can add them to the device
11041 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
11042 result = self.rpc.call_blockdev_addchildren(self.target_node,
11043 (dev, self.instance), new_lvs)
11044 msg = result.fail_msg
11046 for new_lv in new_lvs:
11047 msg2 = self.rpc.call_blockdev_remove(self.target_node,
11050 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
11051 hint=("cleanup manually the unused logical"
11053 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
11055 cstep = itertools.count(5)
11057 if self.early_release:
11058 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11059 self._RemoveOldStorage(self.target_node, iv_names)
11060 # TODO: Check if releasing locks early still makes sense
11061 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11063 # Release all resource locks except those used by the instance
11064 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11065 keep=self.node_secondary_ip.keys())
11067 # Release all node locks while waiting for sync
11068 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11070 # TODO: Can the instance lock be downgraded here? Take the optional disk
11071 # shutdown in the caller into consideration.
11074 # This can fail as the old devices are degraded and _WaitForSync
11075 # does a combined result over all disks, so we don't check its return value
11076 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11077 _WaitForSync(self.lu, self.instance)
11079 # Check all devices manually
11080 self._CheckDevices(self.instance.primary_node, iv_names)
11082 # Step: remove old storage
11083 if not self.early_release:
11084 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11085 self._RemoveOldStorage(self.target_node, iv_names)
11087 def _ExecDrbd8Secondary(self, feedback_fn):
11088 """Replace the secondary node for DRBD 8.
11090 The algorithm for replace is quite complicated:
11091 - for all disks of the instance:
11092 - create new LVs on the new node with same names
11093 - shutdown the drbd device on the old secondary
11094 - disconnect the drbd network on the primary
11095 - create the drbd device on the new secondary
11096 - network attach the drbd on the primary, using an artifice:
11097 the drbd code for Attach() will connect to the network if it
11098 finds a device which is connected to the good local disks but
11099 not network enabled
11100 - wait for sync across all devices
11101 - remove all disks from the old secondary
11103 Failures are not very well handled.
11108 pnode = self.instance.primary_node
11110 # Step: check device activation
11111 self.lu.LogStep(1, steps_total, "Check device existence")
11112 self._CheckDisksExistence([self.instance.primary_node])
11113 self._CheckVolumeGroup([self.instance.primary_node])
11115 # Step: check other node consistency
11116 self.lu.LogStep(2, steps_total, "Check peer consistency")
11117 self._CheckDisksConsistency(self.instance.primary_node, True, True)
11119 # Step: create new storage
11120 self.lu.LogStep(3, steps_total, "Allocate new storage")
11121 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
11122 for idx, dev in enumerate(disks):
11123 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
11124 (self.new_node, idx))
11125 # we pass force_create=True to force LVM creation
11126 for new_lv in dev.children:
11127 _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
11128 True, _GetInstanceInfoText(self.instance), False)
11130 # Step 4: dbrd minors and drbd setups changes
11131 # after this, we must manually remove the drbd minors on both the
11132 # error and the success paths
11133 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11134 minors = self.cfg.AllocateDRBDMinor([self.new_node
11135 for dev in self.instance.disks],
11136 self.instance.name)
11137 logging.debug("Allocated minors %r", minors)
11140 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
11141 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
11142 (self.new_node, idx))
11143 # create new devices on new_node; note that we create two IDs:
11144 # one without port, so the drbd will be activated without
11145 # networking information on the new node at this stage, and one
11146 # with network, for the latter activation in step 4
11147 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
11148 if self.instance.primary_node == o_node1:
11151 assert self.instance.primary_node == o_node2, "Three-node instance?"
11154 new_alone_id = (self.instance.primary_node, self.new_node, None,
11155 p_minor, new_minor, o_secret)
11156 new_net_id = (self.instance.primary_node, self.new_node, o_port,
11157 p_minor, new_minor, o_secret)
11159 iv_names[idx] = (dev, dev.children, new_net_id)
11160 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
11162 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
11163 logical_id=new_alone_id,
11164 children=dev.children,
11167 (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
11170 _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
11172 _GetInstanceInfoText(self.instance), False)
11173 except errors.GenericError:
11174 self.cfg.ReleaseDRBDMinors(self.instance.name)
11177 # We have new devices, shutdown the drbd on the old secondary
11178 for idx, dev in enumerate(self.instance.disks):
11179 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
11180 self.cfg.SetDiskID(dev, self.target_node)
11181 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
11183 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
11184 "node: %s" % (idx, msg),
11185 hint=("Please cleanup this device manually as"
11186 " soon as possible"))
11188 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
11189 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
11190 self.instance.disks)[pnode]
11192 msg = result.fail_msg
11194 # detaches didn't succeed (unlikely)
11195 self.cfg.ReleaseDRBDMinors(self.instance.name)
11196 raise errors.OpExecError("Can't detach the disks from the network on"
11197 " old node: %s" % (msg,))
11199 # if we managed to detach at least one, we update all the disks of
11200 # the instance to point to the new secondary
11201 self.lu.LogInfo("Updating instance configuration")
11202 for dev, _, new_logical_id in iv_names.itervalues():
11203 dev.logical_id = new_logical_id
11204 self.cfg.SetDiskID(dev, self.instance.primary_node)
11206 self.cfg.Update(self.instance, feedback_fn)
11208 # Release all node locks (the configuration has been updated)
11209 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11211 # and now perform the drbd attach
11212 self.lu.LogInfo("Attaching primary drbds to new secondary"
11213 " (standalone => connected)")
11214 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11216 self.node_secondary_ip,
11217 (self.instance.disks, self.instance),
11218 self.instance.name,
11220 for to_node, to_result in result.items():
11221 msg = to_result.fail_msg
11223 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
11225 hint=("please do a gnt-instance info to see the"
11226 " status of disks"))
11228 cstep = itertools.count(5)
11230 if self.early_release:
11231 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11232 self._RemoveOldStorage(self.target_node, iv_names)
11233 # TODO: Check if releasing locks early still makes sense
11234 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11236 # Release all resource locks except those used by the instance
11237 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11238 keep=self.node_secondary_ip.keys())
11240 # TODO: Can the instance lock be downgraded here? Take the optional disk
11241 # shutdown in the caller into consideration.
11244 # This can fail as the old devices are degraded and _WaitForSync
11245 # does a combined result over all disks, so we don't check its return value
11246 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11247 _WaitForSync(self.lu, self.instance)
11249 # Check all devices manually
11250 self._CheckDevices(self.instance.primary_node, iv_names)
11252 # Step: remove old storage
11253 if not self.early_release:
11254 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11255 self._RemoveOldStorage(self.target_node, iv_names)
11258 class LURepairNodeStorage(NoHooksLU):
11259 """Repairs the volume group on a node.
11264 def CheckArguments(self):
11265 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11267 storage_type = self.op.storage_type
11269 if (constants.SO_FIX_CONSISTENCY not in
11270 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
11271 raise errors.OpPrereqError("Storage units of type '%s' can not be"
11272 " repaired" % storage_type,
11273 errors.ECODE_INVAL)
11275 def ExpandNames(self):
11276 self.needed_locks = {
11277 locking.LEVEL_NODE: [self.op.node_name],
11280 def _CheckFaultyDisks(self, instance, node_name):
11281 """Ensure faulty disks abort the opcode or at least warn."""
11283 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
11285 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
11286 " node '%s'" % (instance.name, node_name),
11287 errors.ECODE_STATE)
11288 except errors.OpPrereqError, err:
11289 if self.op.ignore_consistency:
11290 self.proc.LogWarning(str(err.args[0]))
11294 def CheckPrereq(self):
11295 """Check prerequisites.
11298 # Check whether any instance on this node has faulty disks
11299 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
11300 if inst.admin_state != constants.ADMINST_UP:
11302 check_nodes = set(inst.all_nodes)
11303 check_nodes.discard(self.op.node_name)
11304 for inst_node_name in check_nodes:
11305 self._CheckFaultyDisks(inst, inst_node_name)
11307 def Exec(self, feedback_fn):
11308 feedback_fn("Repairing storage unit '%s' on %s ..." %
11309 (self.op.name, self.op.node_name))
11311 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
11312 result = self.rpc.call_storage_execute(self.op.node_name,
11313 self.op.storage_type, st_args,
11315 constants.SO_FIX_CONSISTENCY)
11316 result.Raise("Failed to repair storage unit '%s' on %s" %
11317 (self.op.name, self.op.node_name))
11320 class LUNodeEvacuate(NoHooksLU):
11321 """Evacuates instances off a list of nodes.
11326 _MODE2IALLOCATOR = {
11327 constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
11328 constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
11329 constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
11331 assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
11332 assert (frozenset(_MODE2IALLOCATOR.values()) ==
11333 constants.IALLOCATOR_NEVAC_MODES)
11335 def CheckArguments(self):
11336 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
11338 def ExpandNames(self):
11339 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11341 if self.op.remote_node is not None:
11342 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
11343 assert self.op.remote_node
11345 if self.op.remote_node == self.op.node_name:
11346 raise errors.OpPrereqError("Can not use evacuated node as a new"
11347 " secondary node", errors.ECODE_INVAL)
11349 if self.op.mode != constants.NODE_EVAC_SEC:
11350 raise errors.OpPrereqError("Without the use of an iallocator only"
11351 " secondary instances can be evacuated",
11352 errors.ECODE_INVAL)
11355 self.share_locks = _ShareAll()
11356 self.needed_locks = {
11357 locking.LEVEL_INSTANCE: [],
11358 locking.LEVEL_NODEGROUP: [],
11359 locking.LEVEL_NODE: [],
11362 # Determine nodes (via group) optimistically, needs verification once locks
11363 # have been acquired
11364 self.lock_nodes = self._DetermineNodes()
11366 def _DetermineNodes(self):
11367 """Gets the list of nodes to operate on.
11370 if self.op.remote_node is None:
11371 # Iallocator will choose any node(s) in the same group
11372 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
11374 group_nodes = frozenset([self.op.remote_node])
11376 # Determine nodes to be locked
11377 return set([self.op.node_name]) | group_nodes
11379 def _DetermineInstances(self):
11380 """Builds list of instances to operate on.
11383 assert self.op.mode in constants.NODE_EVAC_MODES
11385 if self.op.mode == constants.NODE_EVAC_PRI:
11386 # Primary instances only
11387 inst_fn = _GetNodePrimaryInstances
11388 assert self.op.remote_node is None, \
11389 "Evacuating primary instances requires iallocator"
11390 elif self.op.mode == constants.NODE_EVAC_SEC:
11391 # Secondary instances only
11392 inst_fn = _GetNodeSecondaryInstances
11395 assert self.op.mode == constants.NODE_EVAC_ALL
11396 inst_fn = _GetNodeInstances
11397 # TODO: In 2.6, change the iallocator interface to take an evacuation mode
11399 raise errors.OpPrereqError("Due to an issue with the iallocator"
11400 " interface it is not possible to evacuate"
11401 " all instances at once; specify explicitly"
11402 " whether to evacuate primary or secondary"
11404 errors.ECODE_INVAL)
11406 return inst_fn(self.cfg, self.op.node_name)
11408 def DeclareLocks(self, level):
11409 if level == locking.LEVEL_INSTANCE:
11410 # Lock instances optimistically, needs verification once node and group
11411 # locks have been acquired
11412 self.needed_locks[locking.LEVEL_INSTANCE] = \
11413 set(i.name for i in self._DetermineInstances())
11415 elif level == locking.LEVEL_NODEGROUP:
11416 # Lock node groups for all potential target nodes optimistically, needs
11417 # verification once nodes have been acquired
11418 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11419 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
11421 elif level == locking.LEVEL_NODE:
11422 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
11424 def CheckPrereq(self):
11426 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11427 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
11428 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
11430 need_nodes = self._DetermineNodes()
11432 if not owned_nodes.issuperset(need_nodes):
11433 raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
11434 " locks were acquired, current nodes are"
11435 " are '%s', used to be '%s'; retry the"
11437 (self.op.node_name,
11438 utils.CommaJoin(need_nodes),
11439 utils.CommaJoin(owned_nodes)),
11440 errors.ECODE_STATE)
11442 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
11443 if owned_groups != wanted_groups:
11444 raise errors.OpExecError("Node groups changed since locks were acquired,"
11445 " current groups are '%s', used to be '%s';"
11446 " retry the operation" %
11447 (utils.CommaJoin(wanted_groups),
11448 utils.CommaJoin(owned_groups)))
11450 # Determine affected instances
11451 self.instances = self._DetermineInstances()
11452 self.instance_names = [i.name for i in self.instances]
11454 if set(self.instance_names) != owned_instances:
11455 raise errors.OpExecError("Instances on node '%s' changed since locks"
11456 " were acquired, current instances are '%s',"
11457 " used to be '%s'; retry the operation" %
11458 (self.op.node_name,
11459 utils.CommaJoin(self.instance_names),
11460 utils.CommaJoin(owned_instances)))
11462 if self.instance_names:
11463 self.LogInfo("Evacuating instances from node '%s': %s",
11465 utils.CommaJoin(utils.NiceSort(self.instance_names)))
11467 self.LogInfo("No instances to evacuate from node '%s'",
11470 if self.op.remote_node is not None:
11471 for i in self.instances:
11472 if i.primary_node == self.op.remote_node:
11473 raise errors.OpPrereqError("Node %s is the primary node of"
11474 " instance %s, cannot use it as"
11476 (self.op.remote_node, i.name),
11477 errors.ECODE_INVAL)
11479 def Exec(self, feedback_fn):
11480 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
11482 if not self.instance_names:
11483 # No instances to evacuate
11486 elif self.op.iallocator is not None:
11487 # TODO: Implement relocation to other group
11488 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
11489 evac_mode=self._MODE2IALLOCATOR[self.op.mode],
11490 instances=list(self.instance_names))
11492 ial.Run(self.op.iallocator)
11494 if not ial.success:
11495 raise errors.OpPrereqError("Can't compute node evacuation using"
11496 " iallocator '%s': %s" %
11497 (self.op.iallocator, ial.info),
11498 errors.ECODE_NORES)
11500 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
11502 elif self.op.remote_node is not None:
11503 assert self.op.mode == constants.NODE_EVAC_SEC
11505 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
11506 remote_node=self.op.remote_node,
11508 mode=constants.REPLACE_DISK_CHG,
11509 early_release=self.op.early_release)]
11510 for instance_name in self.instance_names
11514 raise errors.ProgrammerError("No iallocator or remote node")
11516 return ResultWithJobs(jobs)
11519 def _SetOpEarlyRelease(early_release, op):
11520 """Sets C{early_release} flag on opcodes if available.
11524 op.early_release = early_release
11525 except AttributeError:
11526 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
11531 def _NodeEvacDest(use_nodes, group, nodes):
11532 """Returns group or nodes depending on caller's choice.
11536 return utils.CommaJoin(nodes)
11541 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
11542 """Unpacks the result of change-group and node-evacuate iallocator requests.
11544 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
11545 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
11547 @type lu: L{LogicalUnit}
11548 @param lu: Logical unit instance
11549 @type alloc_result: tuple/list
11550 @param alloc_result: Result from iallocator
11551 @type early_release: bool
11552 @param early_release: Whether to release locks early if possible
11553 @type use_nodes: bool
11554 @param use_nodes: Whether to display node names instead of groups
11557 (moved, failed, jobs) = alloc_result
11560 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
11561 for (name, reason) in failed)
11562 lu.LogWarning("Unable to evacuate instances %s", failreason)
11563 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
11566 lu.LogInfo("Instances to be moved: %s",
11567 utils.CommaJoin("%s (to %s)" %
11568 (name, _NodeEvacDest(use_nodes, group, nodes))
11569 for (name, group, nodes) in moved))
11571 return [map(compat.partial(_SetOpEarlyRelease, early_release),
11572 map(opcodes.OpCode.LoadOpCode, ops))
11576 class LUInstanceGrowDisk(LogicalUnit):
11577 """Grow a disk of an instance.
11580 HPATH = "disk-grow"
11581 HTYPE = constants.HTYPE_INSTANCE
11584 def ExpandNames(self):
11585 self._ExpandAndLockInstance()
11586 self.needed_locks[locking.LEVEL_NODE] = []
11587 self.needed_locks[locking.LEVEL_NODE_RES] = []
11588 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11589 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
11591 def DeclareLocks(self, level):
11592 if level == locking.LEVEL_NODE:
11593 self._LockInstancesNodes()
11594 elif level == locking.LEVEL_NODE_RES:
11596 self.needed_locks[locking.LEVEL_NODE_RES] = \
11597 self.needed_locks[locking.LEVEL_NODE][:]
11599 def BuildHooksEnv(self):
11600 """Build hooks env.
11602 This runs on the master, the primary and all the secondaries.
11606 "DISK": self.op.disk,
11607 "AMOUNT": self.op.amount,
11608 "ABSOLUTE": self.op.absolute,
11610 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11613 def BuildHooksNodes(self):
11614 """Build hooks nodes.
11617 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
11620 def CheckPrereq(self):
11621 """Check prerequisites.
11623 This checks that the instance is in the cluster.
11626 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11627 assert instance is not None, \
11628 "Cannot retrieve locked instance %s" % self.op.instance_name
11629 nodenames = list(instance.all_nodes)
11630 for node in nodenames:
11631 _CheckNodeOnline(self, node)
11633 self.instance = instance
11635 if instance.disk_template not in constants.DTS_GROWABLE:
11636 raise errors.OpPrereqError("Instance's disk layout does not support"
11637 " growing", errors.ECODE_INVAL)
11639 self.disk = instance.FindDisk(self.op.disk)
11641 if self.op.absolute:
11642 self.target = self.op.amount
11643 self.delta = self.target - self.disk.size
11645 raise errors.OpPrereqError("Requested size (%s) is smaller than "
11646 "current disk size (%s)" %
11647 (utils.FormatUnit(self.target, "h"),
11648 utils.FormatUnit(self.disk.size, "h")),
11649 errors.ECODE_STATE)
11651 self.delta = self.op.amount
11652 self.target = self.disk.size + self.delta
11654 raise errors.OpPrereqError("Requested increment (%s) is negative" %
11655 utils.FormatUnit(self.delta, "h"),
11656 errors.ECODE_INVAL)
11658 if instance.disk_template not in (constants.DT_FILE,
11659 constants.DT_SHARED_FILE,
11661 # TODO: check the free disk space for file, when that feature will be
11663 _CheckNodesFreeDiskPerVG(self, nodenames,
11664 self.disk.ComputeGrowth(self.delta))
11666 def Exec(self, feedback_fn):
11667 """Execute disk grow.
11670 instance = self.instance
11673 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11674 assert (self.owned_locks(locking.LEVEL_NODE) ==
11675 self.owned_locks(locking.LEVEL_NODE_RES))
11677 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
11679 raise errors.OpExecError("Cannot activate block device to grow")
11681 feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
11682 (self.op.disk, instance.name,
11683 utils.FormatUnit(self.delta, "h"),
11684 utils.FormatUnit(self.target, "h")))
11686 # First run all grow ops in dry-run mode
11687 for node in instance.all_nodes:
11688 self.cfg.SetDiskID(disk, node)
11689 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11691 result.Raise("Grow request failed to node %s" % node)
11693 # We know that (as far as we can test) operations across different
11694 # nodes will succeed, time to run it for real on the backing storage
11695 for node in instance.all_nodes:
11696 self.cfg.SetDiskID(disk, node)
11697 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11699 result.Raise("Grow request failed to node %s" % node)
11701 # And now execute it for logical storage, on the primary node
11702 node = instance.primary_node
11703 self.cfg.SetDiskID(disk, node)
11704 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11706 result.Raise("Grow request failed to node %s" % node)
11708 disk.RecordGrow(self.delta)
11709 self.cfg.Update(instance, feedback_fn)
11711 # Changes have been recorded, release node lock
11712 _ReleaseLocks(self, locking.LEVEL_NODE)
11714 # Downgrade lock while waiting for sync
11715 self.glm.downgrade(locking.LEVEL_INSTANCE)
11717 if self.op.wait_for_sync:
11718 disk_abort = not _WaitForSync(self, instance, disks=[disk])
11720 self.proc.LogWarning("Disk sync-ing has not returned a good"
11721 " status; please check the instance")
11722 if instance.admin_state != constants.ADMINST_UP:
11723 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
11724 elif instance.admin_state != constants.ADMINST_UP:
11725 self.proc.LogWarning("Not shutting down the disk even if the instance is"
11726 " not supposed to be running because no wait for"
11727 " sync mode was requested")
11729 assert self.owned_locks(locking.LEVEL_NODE_RES)
11730 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11733 class LUInstanceQueryData(NoHooksLU):
11734 """Query runtime instance data.
11739 def ExpandNames(self):
11740 self.needed_locks = {}
11742 # Use locking if requested or when non-static information is wanted
11743 if not (self.op.static or self.op.use_locking):
11744 self.LogWarning("Non-static data requested, locks need to be acquired")
11745 self.op.use_locking = True
11747 if self.op.instances or not self.op.use_locking:
11748 # Expand instance names right here
11749 self.wanted_names = _GetWantedInstances(self, self.op.instances)
11751 # Will use acquired locks
11752 self.wanted_names = None
11754 if self.op.use_locking:
11755 self.share_locks = _ShareAll()
11757 if self.wanted_names is None:
11758 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
11760 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
11762 self.needed_locks[locking.LEVEL_NODEGROUP] = []
11763 self.needed_locks[locking.LEVEL_NODE] = []
11764 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11766 def DeclareLocks(self, level):
11767 if self.op.use_locking:
11768 if level == locking.LEVEL_NODEGROUP:
11769 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11771 # Lock all groups used by instances optimistically; this requires going
11772 # via the node before it's locked, requiring verification later on
11773 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11774 frozenset(group_uuid
11775 for instance_name in owned_instances
11777 self.cfg.GetInstanceNodeGroups(instance_name))
11779 elif level == locking.LEVEL_NODE:
11780 self._LockInstancesNodes()
11782 def CheckPrereq(self):
11783 """Check prerequisites.
11785 This only checks the optional instance list against the existing names.
11788 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11789 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11790 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11792 if self.wanted_names is None:
11793 assert self.op.use_locking, "Locking was not used"
11794 self.wanted_names = owned_instances
11796 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
11798 if self.op.use_locking:
11799 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
11802 assert not (owned_instances or owned_groups or owned_nodes)
11804 self.wanted_instances = instances.values()
11806 def _ComputeBlockdevStatus(self, node, instance, dev):
11807 """Returns the status of a block device
11810 if self.op.static or not node:
11813 self.cfg.SetDiskID(dev, node)
11815 result = self.rpc.call_blockdev_find(node, dev)
11819 result.Raise("Can't compute disk status for %s" % instance.name)
11821 status = result.payload
11825 return (status.dev_path, status.major, status.minor,
11826 status.sync_percent, status.estimated_time,
11827 status.is_degraded, status.ldisk_status)
11829 def _ComputeDiskStatus(self, instance, snode, dev):
11830 """Compute block device status.
11833 (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
11835 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
11837 def _ComputeDiskStatusInner(self, instance, snode, dev):
11838 """Compute block device status.
11840 @attention: The device has to be annotated already.
11843 if dev.dev_type in constants.LDS_DRBD:
11844 # we change the snode then (otherwise we use the one passed in)
11845 if dev.logical_id[0] == instance.primary_node:
11846 snode = dev.logical_id[1]
11848 snode = dev.logical_id[0]
11850 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
11852 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
11855 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
11862 "iv_name": dev.iv_name,
11863 "dev_type": dev.dev_type,
11864 "logical_id": dev.logical_id,
11865 "physical_id": dev.physical_id,
11866 "pstatus": dev_pstatus,
11867 "sstatus": dev_sstatus,
11868 "children": dev_children,
11873 def Exec(self, feedback_fn):
11874 """Gather and return data"""
11877 cluster = self.cfg.GetClusterInfo()
11879 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
11880 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
11882 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
11883 for node in nodes.values()))
11885 group2name_fn = lambda uuid: groups[uuid].name
11887 for instance in self.wanted_instances:
11888 pnode = nodes[instance.primary_node]
11890 if self.op.static or pnode.offline:
11891 remote_state = None
11893 self.LogWarning("Primary node %s is marked offline, returning static"
11894 " information only for instance %s" %
11895 (pnode.name, instance.name))
11897 remote_info = self.rpc.call_instance_info(instance.primary_node,
11899 instance.hypervisor)
11900 remote_info.Raise("Error checking node %s" % instance.primary_node)
11901 remote_info = remote_info.payload
11902 if remote_info and "state" in remote_info:
11903 remote_state = "up"
11905 if instance.admin_state == constants.ADMINST_UP:
11906 remote_state = "down"
11908 remote_state = instance.admin_state
11910 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
11913 snodes_group_uuids = [nodes[snode_name].group
11914 for snode_name in instance.secondary_nodes]
11916 result[instance.name] = {
11917 "name": instance.name,
11918 "config_state": instance.admin_state,
11919 "run_state": remote_state,
11920 "pnode": instance.primary_node,
11921 "pnode_group_uuid": pnode.group,
11922 "pnode_group_name": group2name_fn(pnode.group),
11923 "snodes": instance.secondary_nodes,
11924 "snodes_group_uuids": snodes_group_uuids,
11925 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
11927 # this happens to be the same format used for hooks
11928 "nics": _NICListToTuple(self, instance.nics),
11929 "disk_template": instance.disk_template,
11931 "hypervisor": instance.hypervisor,
11932 "network_port": instance.network_port,
11933 "hv_instance": instance.hvparams,
11934 "hv_actual": cluster.FillHV(instance, skip_globals=True),
11935 "be_instance": instance.beparams,
11936 "be_actual": cluster.FillBE(instance),
11937 "os_instance": instance.osparams,
11938 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
11939 "serial_no": instance.serial_no,
11940 "mtime": instance.mtime,
11941 "ctime": instance.ctime,
11942 "uuid": instance.uuid,
11948 def PrepareContainerMods(mods, private_fn):
11949 """Prepares a list of container modifications by adding a private data field.
11951 @type mods: list of tuples; (operation, index, parameters)
11952 @param mods: List of modifications
11953 @type private_fn: callable or None
11954 @param private_fn: Callable for constructing a private data field for a
11959 if private_fn is None:
11964 return [(op, idx, params, fn()) for (op, idx, params) in mods]
11967 #: Type description for changes as returned by L{ApplyContainerMods}'s
11969 _TApplyContModsCbChanges = \
11970 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
11971 ht.TNonEmptyString,
11976 def ApplyContainerMods(kind, container, chgdesc, mods,
11977 create_fn, modify_fn, remove_fn):
11978 """Applies descriptions in C{mods} to C{container}.
11981 @param kind: One-word item description
11982 @type container: list
11983 @param container: Container to modify
11984 @type chgdesc: None or list
11985 @param chgdesc: List of applied changes
11987 @param mods: Modifications as returned by L{PrepareContainerMods}
11988 @type create_fn: callable
11989 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
11990 receives absolute item index, parameters and private data object as added
11991 by L{PrepareContainerMods}, returns tuple containing new item and changes
11993 @type modify_fn: callable
11994 @param modify_fn: Callback for modifying an existing item
11995 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
11996 and private data object as added by L{PrepareContainerMods}, returns
11998 @type remove_fn: callable
11999 @param remove_fn: Callback on removing item; receives absolute item index,
12000 item and private data object as added by L{PrepareContainerMods}
12003 for (op, idx, params, private) in mods:
12006 absidx = len(container) - 1
12008 raise IndexError("Not accepting negative indices other than -1")
12009 elif idx > len(container):
12010 raise IndexError("Got %s index %s, but there are only %s" %
12011 (kind, idx, len(container)))
12017 if op == constants.DDM_ADD:
12018 # Calculate where item will be added
12020 addidx = len(container)
12024 if create_fn is None:
12027 (item, changes) = create_fn(addidx, params, private)
12030 container.append(item)
12033 assert idx <= len(container)
12034 # list.insert does so before the specified index
12035 container.insert(idx, item)
12037 # Retrieve existing item
12039 item = container[absidx]
12041 raise IndexError("Invalid %s index %s" % (kind, idx))
12043 if op == constants.DDM_REMOVE:
12046 if remove_fn is not None:
12047 remove_fn(absidx, item, private)
12049 changes = [("%s/%s" % (kind, absidx), "remove")]
12051 assert container[absidx] == item
12052 del container[absidx]
12053 elif op == constants.DDM_MODIFY:
12054 if modify_fn is not None:
12055 changes = modify_fn(absidx, item, params, private)
12057 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12059 assert _TApplyContModsCbChanges(changes)
12061 if not (chgdesc is None or changes is None):
12062 chgdesc.extend(changes)
12065 def _UpdateIvNames(base_index, disks):
12066 """Updates the C{iv_name} attribute of disks.
12068 @type disks: list of L{objects.Disk}
12071 for (idx, disk) in enumerate(disks):
12072 disk.iv_name = "disk/%s" % (base_index + idx, )
12075 class _InstNicModPrivate:
12076 """Data structure for network interface modifications.
12078 Used by L{LUInstanceSetParams}.
12081 def __init__(self):
12086 class LUInstanceSetParams(LogicalUnit):
12087 """Modifies an instances's parameters.
12090 HPATH = "instance-modify"
12091 HTYPE = constants.HTYPE_INSTANCE
12095 def _UpgradeDiskNicMods(kind, mods, verify_fn):
12096 assert ht.TList(mods)
12097 assert not mods or len(mods[0]) in (2, 3)
12099 if mods and len(mods[0]) == 2:
12103 for op, params in mods:
12104 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
12105 result.append((op, -1, params))
12109 raise errors.OpPrereqError("Only one %s add or remove operation is"
12110 " supported at a time" % kind,
12111 errors.ECODE_INVAL)
12113 result.append((constants.DDM_MODIFY, op, params))
12115 assert verify_fn(result)
12122 def _CheckMods(kind, mods, key_types, item_fn):
12123 """Ensures requested disk/NIC modifications are valid.
12126 for (op, _, params) in mods:
12127 assert ht.TDict(params)
12129 utils.ForceDictType(params, key_types)
12131 if op == constants.DDM_REMOVE:
12133 raise errors.OpPrereqError("No settings should be passed when"
12134 " removing a %s" % kind,
12135 errors.ECODE_INVAL)
12136 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
12137 item_fn(op, params)
12139 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12142 def _VerifyDiskModification(op, params):
12143 """Verifies a disk modification.
12146 if op == constants.DDM_ADD:
12147 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
12148 if mode not in constants.DISK_ACCESS_SET:
12149 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
12150 errors.ECODE_INVAL)
12152 size = params.get(constants.IDISK_SIZE, None)
12154 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
12155 constants.IDISK_SIZE, errors.ECODE_INVAL)
12159 except (TypeError, ValueError), err:
12160 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
12161 errors.ECODE_INVAL)
12163 params[constants.IDISK_SIZE] = size
12165 elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
12166 raise errors.OpPrereqError("Disk size change not possible, use"
12167 " grow-disk", errors.ECODE_INVAL)
12170 def _VerifyNicModification(op, params):
12171 """Verifies a network interface modification.
12174 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
12175 ip = params.get(constants.INIC_IP, None)
12178 elif ip.lower() == constants.VALUE_NONE:
12179 params[constants.INIC_IP] = None
12180 elif not netutils.IPAddress.IsValid(ip):
12181 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
12182 errors.ECODE_INVAL)
12184 bridge = params.get("bridge", None)
12185 link = params.get(constants.INIC_LINK, None)
12186 if bridge and link:
12187 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
12188 " at the same time", errors.ECODE_INVAL)
12189 elif bridge and bridge.lower() == constants.VALUE_NONE:
12190 params["bridge"] = None
12191 elif link and link.lower() == constants.VALUE_NONE:
12192 params[constants.INIC_LINK] = None
12194 if op == constants.DDM_ADD:
12195 macaddr = params.get(constants.INIC_MAC, None)
12196 if macaddr is None:
12197 params[constants.INIC_MAC] = constants.VALUE_AUTO
12199 if constants.INIC_MAC in params:
12200 macaddr = params[constants.INIC_MAC]
12201 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12202 macaddr = utils.NormalizeAndValidateMac(macaddr)
12204 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
12205 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
12206 " modifying an existing NIC",
12207 errors.ECODE_INVAL)
12209 def CheckArguments(self):
12210 if not (self.op.nics or self.op.disks or self.op.disk_template or
12211 self.op.hvparams or self.op.beparams or self.op.os_name or
12212 self.op.offline is not None or self.op.runtime_mem):
12213 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
12215 if self.op.hvparams:
12216 _CheckGlobalHvParams(self.op.hvparams)
12219 self._UpgradeDiskNicMods("disk", self.op.disks,
12220 opcodes.OpInstanceSetParams.TestDiskModifications)
12222 self._UpgradeDiskNicMods("NIC", self.op.nics,
12223 opcodes.OpInstanceSetParams.TestNicModifications)
12225 # Check disk modifications
12226 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
12227 self._VerifyDiskModification)
12229 if self.op.disks and self.op.disk_template is not None:
12230 raise errors.OpPrereqError("Disk template conversion and other disk"
12231 " changes not supported at the same time",
12232 errors.ECODE_INVAL)
12234 if (self.op.disk_template and
12235 self.op.disk_template in constants.DTS_INT_MIRROR and
12236 self.op.remote_node is None):
12237 raise errors.OpPrereqError("Changing the disk template to a mirrored"
12238 " one requires specifying a secondary node",
12239 errors.ECODE_INVAL)
12241 # Check NIC modifications
12242 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
12243 self._VerifyNicModification)
12245 def ExpandNames(self):
12246 self._ExpandAndLockInstance()
12247 # Can't even acquire node locks in shared mode as upcoming changes in
12248 # Ganeti 2.6 will start to modify the node object on disk conversion
12249 self.needed_locks[locking.LEVEL_NODE] = []
12250 self.needed_locks[locking.LEVEL_NODE_RES] = []
12251 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12253 def DeclareLocks(self, level):
12254 # TODO: Acquire group lock in shared mode (disk parameters)
12255 if level == locking.LEVEL_NODE:
12256 self._LockInstancesNodes()
12257 if self.op.disk_template and self.op.remote_node:
12258 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
12259 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
12260 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
12262 self.needed_locks[locking.LEVEL_NODE_RES] = \
12263 self.needed_locks[locking.LEVEL_NODE][:]
12265 def BuildHooksEnv(self):
12266 """Build hooks env.
12268 This runs on the master, primary and secondaries.
12272 if constants.BE_MINMEM in self.be_new:
12273 args["minmem"] = self.be_new[constants.BE_MINMEM]
12274 if constants.BE_MAXMEM in self.be_new:
12275 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
12276 if constants.BE_VCPUS in self.be_new:
12277 args["vcpus"] = self.be_new[constants.BE_VCPUS]
12278 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
12279 # information at all.
12281 if self._new_nics is not None:
12284 for nic in self._new_nics:
12285 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
12286 mode = nicparams[constants.NIC_MODE]
12287 link = nicparams[constants.NIC_LINK]
12288 nics.append((nic.ip, nic.mac, mode, link))
12290 args["nics"] = nics
12292 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
12293 if self.op.disk_template:
12294 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
12295 if self.op.runtime_mem:
12296 env["RUNTIME_MEMORY"] = self.op.runtime_mem
12300 def BuildHooksNodes(self):
12301 """Build hooks nodes.
12304 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
12307 def _PrepareNicModification(self, params, private, old_ip, old_params,
12309 update_params_dict = dict([(key, params[key])
12310 for key in constants.NICS_PARAMETERS
12313 if "bridge" in params:
12314 update_params_dict[constants.NIC_LINK] = params["bridge"]
12316 new_params = _GetUpdatedParams(old_params, update_params_dict)
12317 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
12319 new_filled_params = cluster.SimpleFillNIC(new_params)
12320 objects.NIC.CheckParameterSyntax(new_filled_params)
12322 new_mode = new_filled_params[constants.NIC_MODE]
12323 if new_mode == constants.NIC_MODE_BRIDGED:
12324 bridge = new_filled_params[constants.NIC_LINK]
12325 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
12327 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
12329 self.warn.append(msg)
12331 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
12333 elif new_mode == constants.NIC_MODE_ROUTED:
12334 ip = params.get(constants.INIC_IP, old_ip)
12336 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
12337 " on a routed NIC", errors.ECODE_INVAL)
12339 if constants.INIC_MAC in params:
12340 mac = params[constants.INIC_MAC]
12342 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
12343 errors.ECODE_INVAL)
12344 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12345 # otherwise generate the MAC address
12346 params[constants.INIC_MAC] = \
12347 self.cfg.GenerateMAC(self.proc.GetECId())
12349 # or validate/reserve the current one
12351 self.cfg.ReserveMAC(mac, self.proc.GetECId())
12352 except errors.ReservationError:
12353 raise errors.OpPrereqError("MAC address '%s' already in use"
12354 " in cluster" % mac,
12355 errors.ECODE_NOTUNIQUE)
12357 private.params = new_params
12358 private.filled = new_filled_params
12360 return (None, None)
12362 def CheckPrereq(self):
12363 """Check prerequisites.
12365 This only checks the instance list against the existing names.
12368 # checking the new params on the primary/secondary nodes
12370 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12371 cluster = self.cluster = self.cfg.GetClusterInfo()
12372 assert self.instance is not None, \
12373 "Cannot retrieve locked instance %s" % self.op.instance_name
12374 pnode = instance.primary_node
12375 nodelist = list(instance.all_nodes)
12376 pnode_info = self.cfg.GetNodeInfo(pnode)
12377 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
12379 # Prepare disk/NIC modifications
12380 self.diskmod = PrepareContainerMods(self.op.disks, None)
12381 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
12384 if self.op.os_name and not self.op.force:
12385 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
12386 self.op.force_variant)
12387 instance_os = self.op.os_name
12389 instance_os = instance.os
12391 assert not (self.op.disk_template and self.op.disks), \
12392 "Can't modify disk template and apply disk changes at the same time"
12394 if self.op.disk_template:
12395 if instance.disk_template == self.op.disk_template:
12396 raise errors.OpPrereqError("Instance already has disk template %s" %
12397 instance.disk_template, errors.ECODE_INVAL)
12399 if (instance.disk_template,
12400 self.op.disk_template) not in self._DISK_CONVERSIONS:
12401 raise errors.OpPrereqError("Unsupported disk template conversion from"
12402 " %s to %s" % (instance.disk_template,
12403 self.op.disk_template),
12404 errors.ECODE_INVAL)
12405 _CheckInstanceState(self, instance, INSTANCE_DOWN,
12406 msg="cannot change disk template")
12407 if self.op.disk_template in constants.DTS_INT_MIRROR:
12408 if self.op.remote_node == pnode:
12409 raise errors.OpPrereqError("Given new secondary node %s is the same"
12410 " as the primary node of the instance" %
12411 self.op.remote_node, errors.ECODE_STATE)
12412 _CheckNodeOnline(self, self.op.remote_node)
12413 _CheckNodeNotDrained(self, self.op.remote_node)
12414 # FIXME: here we assume that the old instance type is DT_PLAIN
12415 assert instance.disk_template == constants.DT_PLAIN
12416 disks = [{constants.IDISK_SIZE: d.size,
12417 constants.IDISK_VG: d.logical_id[0]}
12418 for d in instance.disks]
12419 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
12420 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
12422 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
12423 snode_group = self.cfg.GetNodeGroup(snode_info.group)
12424 ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
12425 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
12426 ignore=self.op.ignore_ipolicy)
12427 if pnode_info.group != snode_info.group:
12428 self.LogWarning("The primary and secondary nodes are in two"
12429 " different node groups; the disk parameters"
12430 " from the first disk's node group will be"
12433 # hvparams processing
12434 if self.op.hvparams:
12435 hv_type = instance.hypervisor
12436 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
12437 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
12438 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
12441 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
12442 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
12443 self.hv_proposed = self.hv_new = hv_new # the new actual values
12444 self.hv_inst = i_hvdict # the new dict (without defaults)
12446 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
12448 self.hv_new = self.hv_inst = {}
12450 # beparams processing
12451 if self.op.beparams:
12452 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
12454 objects.UpgradeBeParams(i_bedict)
12455 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
12456 be_new = cluster.SimpleFillBE(i_bedict)
12457 self.be_proposed = self.be_new = be_new # the new actual values
12458 self.be_inst = i_bedict # the new dict (without defaults)
12460 self.be_new = self.be_inst = {}
12461 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
12462 be_old = cluster.FillBE(instance)
12464 # CPU param validation -- checking every time a parameter is
12465 # changed to cover all cases where either CPU mask or vcpus have
12467 if (constants.BE_VCPUS in self.be_proposed and
12468 constants.HV_CPU_MASK in self.hv_proposed):
12470 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
12471 # Verify mask is consistent with number of vCPUs. Can skip this
12472 # test if only 1 entry in the CPU mask, which means same mask
12473 # is applied to all vCPUs.
12474 if (len(cpu_list) > 1 and
12475 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
12476 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
12478 (self.be_proposed[constants.BE_VCPUS],
12479 self.hv_proposed[constants.HV_CPU_MASK]),
12480 errors.ECODE_INVAL)
12482 # Only perform this test if a new CPU mask is given
12483 if constants.HV_CPU_MASK in self.hv_new:
12484 # Calculate the largest CPU number requested
12485 max_requested_cpu = max(map(max, cpu_list))
12486 # Check that all of the instance's nodes have enough physical CPUs to
12487 # satisfy the requested CPU mask
12488 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
12489 max_requested_cpu + 1, instance.hypervisor)
12491 # osparams processing
12492 if self.op.osparams:
12493 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
12494 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
12495 self.os_inst = i_osdict # the new dict (without defaults)
12501 #TODO(dynmem): do the appropriate check involving MINMEM
12502 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
12503 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
12504 mem_check_list = [pnode]
12505 if be_new[constants.BE_AUTO_BALANCE]:
12506 # either we changed auto_balance to yes or it was from before
12507 mem_check_list.extend(instance.secondary_nodes)
12508 instance_info = self.rpc.call_instance_info(pnode, instance.name,
12509 instance.hypervisor)
12510 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
12511 [instance.hypervisor])
12512 pninfo = nodeinfo[pnode]
12513 msg = pninfo.fail_msg
12515 # Assume the primary node is unreachable and go ahead
12516 self.warn.append("Can't get info from primary node %s: %s" %
12519 (_, _, (pnhvinfo, )) = pninfo.payload
12520 if not isinstance(pnhvinfo.get("memory_free", None), int):
12521 self.warn.append("Node data from primary node %s doesn't contain"
12522 " free memory information" % pnode)
12523 elif instance_info.fail_msg:
12524 self.warn.append("Can't get instance runtime information: %s" %
12525 instance_info.fail_msg)
12527 if instance_info.payload:
12528 current_mem = int(instance_info.payload["memory"])
12530 # Assume instance not running
12531 # (there is a slight race condition here, but it's not very
12532 # probable, and we have no other way to check)
12533 # TODO: Describe race condition
12535 #TODO(dynmem): do the appropriate check involving MINMEM
12536 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
12537 pnhvinfo["memory_free"])
12539 raise errors.OpPrereqError("This change will prevent the instance"
12540 " from starting, due to %d MB of memory"
12541 " missing on its primary node" %
12543 errors.ECODE_NORES)
12545 if be_new[constants.BE_AUTO_BALANCE]:
12546 for node, nres in nodeinfo.items():
12547 if node not in instance.secondary_nodes:
12549 nres.Raise("Can't get info from secondary node %s" % node,
12550 prereq=True, ecode=errors.ECODE_STATE)
12551 (_, _, (nhvinfo, )) = nres.payload
12552 if not isinstance(nhvinfo.get("memory_free", None), int):
12553 raise errors.OpPrereqError("Secondary node %s didn't return free"
12554 " memory information" % node,
12555 errors.ECODE_STATE)
12556 #TODO(dynmem): do the appropriate check involving MINMEM
12557 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
12558 raise errors.OpPrereqError("This change will prevent the instance"
12559 " from failover to its secondary node"
12560 " %s, due to not enough memory" % node,
12561 errors.ECODE_STATE)
12563 if self.op.runtime_mem:
12564 remote_info = self.rpc.call_instance_info(instance.primary_node,
12566 instance.hypervisor)
12567 remote_info.Raise("Error checking node %s" % instance.primary_node)
12568 if not remote_info.payload: # not running already
12569 raise errors.OpPrereqError("Instance %s is not running" % instance.name,
12570 errors.ECODE_STATE)
12572 current_memory = remote_info.payload["memory"]
12573 if (not self.op.force and
12574 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
12575 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
12576 raise errors.OpPrereqError("Instance %s must have memory between %d"
12577 " and %d MB of memory unless --force is"
12578 " given" % (instance.name,
12579 self.be_proposed[constants.BE_MINMEM],
12580 self.be_proposed[constants.BE_MAXMEM]),
12581 errors.ECODE_INVAL)
12583 if self.op.runtime_mem > current_memory:
12584 _CheckNodeFreeMemory(self, instance.primary_node,
12585 "ballooning memory for instance %s" %
12587 self.op.memory - current_memory,
12588 instance.hypervisor)
12590 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
12591 raise errors.OpPrereqError("Disk operations not supported for"
12592 " diskless instances",
12593 errors.ECODE_INVAL)
12595 def _PrepareNicCreate(_, params, private):
12596 return self._PrepareNicModification(params, private, None, {},
12599 def _PrepareNicMod(_, nic, params, private):
12600 return self._PrepareNicModification(params, private, nic.ip,
12601 nic.nicparams, cluster, pnode)
12603 # Verify NIC changes (operating on copy)
12604 nics = instance.nics[:]
12605 ApplyContainerMods("NIC", nics, None, self.nicmod,
12606 _PrepareNicCreate, _PrepareNicMod, None)
12607 if len(nics) > constants.MAX_NICS:
12608 raise errors.OpPrereqError("Instance has too many network interfaces"
12609 " (%d), cannot add more" % constants.MAX_NICS,
12610 errors.ECODE_STATE)
12612 # Verify disk changes (operating on a copy)
12613 disks = instance.disks[:]
12614 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
12615 if len(disks) > constants.MAX_DISKS:
12616 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
12617 " more" % constants.MAX_DISKS,
12618 errors.ECODE_STATE)
12620 if self.op.offline is not None:
12621 if self.op.offline:
12622 msg = "can't change to offline"
12624 msg = "can't change to online"
12625 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
12627 # Pre-compute NIC changes (necessary to use result in hooks)
12628 self._nic_chgdesc = []
12630 # Operate on copies as this is still in prereq
12631 nics = [nic.Copy() for nic in instance.nics]
12632 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
12633 self._CreateNewNic, self._ApplyNicMods, None)
12634 self._new_nics = nics
12636 self._new_nics = None
12638 def _ConvertPlainToDrbd(self, feedback_fn):
12639 """Converts an instance from plain to drbd.
12642 feedback_fn("Converting template to drbd")
12643 instance = self.instance
12644 pnode = instance.primary_node
12645 snode = self.op.remote_node
12647 assert instance.disk_template == constants.DT_PLAIN
12649 # create a fake disk info for _GenerateDiskTemplate
12650 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
12651 constants.IDISK_VG: d.logical_id[0]}
12652 for d in instance.disks]
12653 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
12654 instance.name, pnode, [snode],
12655 disk_info, None, None, 0, feedback_fn,
12657 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
12659 info = _GetInstanceInfoText(instance)
12660 feedback_fn("Creating additional volumes...")
12661 # first, create the missing data and meta devices
12662 for disk in anno_disks:
12663 # unfortunately this is... not too nice
12664 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
12666 for child in disk.children:
12667 _CreateSingleBlockDev(self, snode, instance, child, info, True)
12668 # at this stage, all new LVs have been created, we can rename the
12670 feedback_fn("Renaming original volumes...")
12671 rename_list = [(o, n.children[0].logical_id)
12672 for (o, n) in zip(instance.disks, new_disks)]
12673 result = self.rpc.call_blockdev_rename(pnode, rename_list)
12674 result.Raise("Failed to rename original LVs")
12676 feedback_fn("Initializing DRBD devices...")
12677 # all child devices are in place, we can now create the DRBD devices
12678 for disk in anno_disks:
12679 for node in [pnode, snode]:
12680 f_create = node == pnode
12681 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
12683 # at this point, the instance has been modified
12684 instance.disk_template = constants.DT_DRBD8
12685 instance.disks = new_disks
12686 self.cfg.Update(instance, feedback_fn)
12688 # Release node locks while waiting for sync
12689 _ReleaseLocks(self, locking.LEVEL_NODE)
12691 # disks are created, waiting for sync
12692 disk_abort = not _WaitForSync(self, instance,
12693 oneshot=not self.op.wait_for_sync)
12695 raise errors.OpExecError("There are some degraded disks for"
12696 " this instance, please cleanup manually")
12698 # Node resource locks will be released by caller
12700 def _ConvertDrbdToPlain(self, feedback_fn):
12701 """Converts an instance from drbd to plain.
12704 instance = self.instance
12706 assert len(instance.secondary_nodes) == 1
12707 assert instance.disk_template == constants.DT_DRBD8
12709 pnode = instance.primary_node
12710 snode = instance.secondary_nodes[0]
12711 feedback_fn("Converting template to plain")
12713 old_disks = instance.disks
12714 new_disks = [d.children[0] for d in old_disks]
12716 # copy over size and mode
12717 for parent, child in zip(old_disks, new_disks):
12718 child.size = parent.size
12719 child.mode = parent.mode
12721 # this is a DRBD disk, return its port to the pool
12722 # NOTE: this must be done right before the call to cfg.Update!
12723 for disk in old_disks:
12724 tcp_port = disk.logical_id[2]
12725 self.cfg.AddTcpUdpPort(tcp_port)
12727 # update instance structure
12728 instance.disks = new_disks
12729 instance.disk_template = constants.DT_PLAIN
12730 self.cfg.Update(instance, feedback_fn)
12732 # Release locks in case removing disks takes a while
12733 _ReleaseLocks(self, locking.LEVEL_NODE)
12735 feedback_fn("Removing volumes on the secondary node...")
12736 for disk in old_disks:
12737 self.cfg.SetDiskID(disk, snode)
12738 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
12740 self.LogWarning("Could not remove block device %s on node %s,"
12741 " continuing anyway: %s", disk.iv_name, snode, msg)
12743 feedback_fn("Removing unneeded volumes on the primary node...")
12744 for idx, disk in enumerate(old_disks):
12745 meta = disk.children[1]
12746 self.cfg.SetDiskID(meta, pnode)
12747 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
12749 self.LogWarning("Could not remove metadata for disk %d on node %s,"
12750 " continuing anyway: %s", idx, pnode, msg)
12752 def _CreateNewDisk(self, idx, params, _):
12753 """Creates a new disk.
12756 instance = self.instance
12759 if instance.disk_template in constants.DTS_FILEBASED:
12760 (file_driver, file_path) = instance.disks[0].logical_id
12761 file_path = os.path.dirname(file_path)
12763 file_driver = file_path = None
12766 _GenerateDiskTemplate(self, instance.disk_template, instance.name,
12767 instance.primary_node, instance.secondary_nodes,
12768 [params], file_path, file_driver, idx,
12769 self.Log, self.diskparams)[0]
12771 info = _GetInstanceInfoText(instance)
12773 logging.info("Creating volume %s for instance %s",
12774 disk.iv_name, instance.name)
12775 # Note: this needs to be kept in sync with _CreateDisks
12777 for node in instance.all_nodes:
12778 f_create = (node == instance.primary_node)
12780 _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
12781 except errors.OpExecError, err:
12782 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
12783 disk.iv_name, disk, node, err)
12786 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
12790 def _ModifyDisk(idx, disk, params, _):
12791 """Modifies a disk.
12794 disk.mode = params[constants.IDISK_MODE]
12797 ("disk.mode/%d" % idx, disk.mode),
12800 def _RemoveDisk(self, idx, root, _):
12804 for node, disk in root.ComputeNodeTree(self.instance.primary_node):
12805 self.cfg.SetDiskID(disk, node)
12806 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
12808 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
12809 " continuing anyway", idx, node, msg)
12811 # if this is a DRBD disk, return its port to the pool
12812 if root.dev_type in constants.LDS_DRBD:
12813 self.cfg.AddTcpUdpPort(root.logical_id[2])
12816 def _CreateNewNic(idx, params, private):
12817 """Creates data structure for a new network interface.
12820 mac = params[constants.INIC_MAC]
12821 ip = params.get(constants.INIC_IP, None)
12822 nicparams = private.params
12824 return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
12826 "add:mac=%s,ip=%s,mode=%s,link=%s" %
12827 (mac, ip, private.filled[constants.NIC_MODE],
12828 private.filled[constants.NIC_LINK])),
12832 def _ApplyNicMods(idx, nic, params, private):
12833 """Modifies a network interface.
12838 for key in [constants.INIC_MAC, constants.INIC_IP]:
12840 changes.append(("nic.%s/%d" % (key, idx), params[key]))
12841 setattr(nic, key, params[key])
12844 nic.nicparams = private.params
12846 for (key, val) in params.items():
12847 changes.append(("nic.%s/%d" % (key, idx), val))
12851 def Exec(self, feedback_fn):
12852 """Modifies an instance.
12854 All parameters take effect only at the next restart of the instance.
12857 # Process here the warnings from CheckPrereq, as we don't have a
12858 # feedback_fn there.
12859 # TODO: Replace with self.LogWarning
12860 for warn in self.warn:
12861 feedback_fn("WARNING: %s" % warn)
12863 assert ((self.op.disk_template is None) ^
12864 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
12865 "Not owning any node resource locks"
12868 instance = self.instance
12871 if self.op.runtime_mem:
12872 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
12874 self.op.runtime_mem)
12875 rpcres.Raise("Cannot modify instance runtime memory")
12876 result.append(("runtime_memory", self.op.runtime_mem))
12878 # Apply disk changes
12879 ApplyContainerMods("disk", instance.disks, result, self.diskmod,
12880 self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
12881 _UpdateIvNames(0, instance.disks)
12883 if self.op.disk_template:
12885 check_nodes = set(instance.all_nodes)
12886 if self.op.remote_node:
12887 check_nodes.add(self.op.remote_node)
12888 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
12889 owned = self.owned_locks(level)
12890 assert not (check_nodes - owned), \
12891 ("Not owning the correct locks, owning %r, expected at least %r" %
12892 (owned, check_nodes))
12894 r_shut = _ShutdownInstanceDisks(self, instance)
12896 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
12897 " proceed with disk template conversion")
12898 mode = (instance.disk_template, self.op.disk_template)
12900 self._DISK_CONVERSIONS[mode](self, feedback_fn)
12902 self.cfg.ReleaseDRBDMinors(instance.name)
12904 result.append(("disk_template", self.op.disk_template))
12906 assert instance.disk_template == self.op.disk_template, \
12907 ("Expected disk template '%s', found '%s'" %
12908 (self.op.disk_template, instance.disk_template))
12910 # Release node and resource locks if there are any (they might already have
12911 # been released during disk conversion)
12912 _ReleaseLocks(self, locking.LEVEL_NODE)
12913 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
12915 # Apply NIC changes
12916 if self._new_nics is not None:
12917 instance.nics = self._new_nics
12918 result.extend(self._nic_chgdesc)
12921 if self.op.hvparams:
12922 instance.hvparams = self.hv_inst
12923 for key, val in self.op.hvparams.iteritems():
12924 result.append(("hv/%s" % key, val))
12927 if self.op.beparams:
12928 instance.beparams = self.be_inst
12929 for key, val in self.op.beparams.iteritems():
12930 result.append(("be/%s" % key, val))
12933 if self.op.os_name:
12934 instance.os = self.op.os_name
12937 if self.op.osparams:
12938 instance.osparams = self.os_inst
12939 for key, val in self.op.osparams.iteritems():
12940 result.append(("os/%s" % key, val))
12942 if self.op.offline is None:
12945 elif self.op.offline:
12946 # Mark instance as offline
12947 self.cfg.MarkInstanceOffline(instance.name)
12948 result.append(("admin_state", constants.ADMINST_OFFLINE))
12950 # Mark instance as online, but stopped
12951 self.cfg.MarkInstanceDown(instance.name)
12952 result.append(("admin_state", constants.ADMINST_DOWN))
12954 self.cfg.Update(instance, feedback_fn)
12956 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
12957 self.owned_locks(locking.LEVEL_NODE)), \
12958 "All node locks should have been released by now"
12962 _DISK_CONVERSIONS = {
12963 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
12964 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
12968 class LUInstanceChangeGroup(LogicalUnit):
12969 HPATH = "instance-change-group"
12970 HTYPE = constants.HTYPE_INSTANCE
12973 def ExpandNames(self):
12974 self.share_locks = _ShareAll()
12975 self.needed_locks = {
12976 locking.LEVEL_NODEGROUP: [],
12977 locking.LEVEL_NODE: [],
12980 self._ExpandAndLockInstance()
12982 if self.op.target_groups:
12983 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12984 self.op.target_groups)
12986 self.req_target_uuids = None
12988 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12990 def DeclareLocks(self, level):
12991 if level == locking.LEVEL_NODEGROUP:
12992 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12994 if self.req_target_uuids:
12995 lock_groups = set(self.req_target_uuids)
12997 # Lock all groups used by instance optimistically; this requires going
12998 # via the node before it's locked, requiring verification later on
12999 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
13000 lock_groups.update(instance_groups)
13002 # No target groups, need to lock all of them
13003 lock_groups = locking.ALL_SET
13005 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
13007 elif level == locking.LEVEL_NODE:
13008 if self.req_target_uuids:
13009 # Lock all nodes used by instances
13010 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
13011 self._LockInstancesNodes()
13013 # Lock all nodes in all potential target groups
13014 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
13015 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
13016 member_nodes = [node_name
13017 for group in lock_groups
13018 for node_name in self.cfg.GetNodeGroup(group).members]
13019 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
13021 # Lock all nodes as all groups are potential targets
13022 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13024 def CheckPrereq(self):
13025 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13026 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
13027 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
13029 assert (self.req_target_uuids is None or
13030 owned_groups.issuperset(self.req_target_uuids))
13031 assert owned_instances == set([self.op.instance_name])
13033 # Get instance information
13034 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
13036 # Check if node groups for locked instance are still correct
13037 assert owned_nodes.issuperset(self.instance.all_nodes), \
13038 ("Instance %s's nodes changed while we kept the lock" %
13039 self.op.instance_name)
13041 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
13044 if self.req_target_uuids:
13045 # User requested specific target groups
13046 self.target_uuids = frozenset(self.req_target_uuids)
13048 # All groups except those used by the instance are potential targets
13049 self.target_uuids = owned_groups - inst_groups
13051 conflicting_groups = self.target_uuids & inst_groups
13052 if conflicting_groups:
13053 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
13054 " used by the instance '%s'" %
13055 (utils.CommaJoin(conflicting_groups),
13056 self.op.instance_name),
13057 errors.ECODE_INVAL)
13059 if not self.target_uuids:
13060 raise errors.OpPrereqError("There are no possible target groups",
13061 errors.ECODE_INVAL)
13063 def BuildHooksEnv(self):
13064 """Build hooks env.
13067 assert self.target_uuids
13070 "TARGET_GROUPS": " ".join(self.target_uuids),
13073 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13077 def BuildHooksNodes(self):
13078 """Build hooks nodes.
13081 mn = self.cfg.GetMasterNode()
13082 return ([mn], [mn])
13084 def Exec(self, feedback_fn):
13085 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
13087 assert instances == [self.op.instance_name], "Instance not locked"
13089 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
13090 instances=instances, target_groups=list(self.target_uuids))
13092 ial.Run(self.op.iallocator)
13094 if not ial.success:
13095 raise errors.OpPrereqError("Can't compute solution for changing group of"
13096 " instance '%s' using iallocator '%s': %s" %
13097 (self.op.instance_name, self.op.iallocator,
13099 errors.ECODE_NORES)
13101 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
13103 self.LogInfo("Iallocator returned %s job(s) for changing group of"
13104 " instance '%s'", len(jobs), self.op.instance_name)
13106 return ResultWithJobs(jobs)
13109 class LUBackupQuery(NoHooksLU):
13110 """Query the exports list
13115 def CheckArguments(self):
13116 self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
13117 ["node", "export"], self.op.use_locking)
13119 def ExpandNames(self):
13120 self.expq.ExpandNames(self)
13122 def DeclareLocks(self, level):
13123 self.expq.DeclareLocks(self, level)
13125 def Exec(self, feedback_fn):
13128 for (node, expname) in self.expq.OldStyleQuery(self):
13129 if expname is None:
13130 result[node] = False
13132 result.setdefault(node, []).append(expname)
13137 class _ExportQuery(_QueryBase):
13138 FIELDS = query.EXPORT_FIELDS
13140 #: The node name is not a unique key for this query
13141 SORT_FIELD = "node"
13143 def ExpandNames(self, lu):
13144 lu.needed_locks = {}
13146 # The following variables interact with _QueryBase._GetNames
13148 self.wanted = _GetWantedNodes(lu, self.names)
13150 self.wanted = locking.ALL_SET
13152 self.do_locking = self.use_locking
13154 if self.do_locking:
13155 lu.share_locks = _ShareAll()
13156 lu.needed_locks = {
13157 locking.LEVEL_NODE: self.wanted,
13160 def DeclareLocks(self, lu, level):
13163 def _GetQueryData(self, lu):
13164 """Computes the list of nodes and their attributes.
13167 # Locking is not used
13169 assert not (compat.any(lu.glm.is_owned(level)
13170 for level in locking.LEVELS
13171 if level != locking.LEVEL_CLUSTER) or
13172 self.do_locking or self.use_locking)
13174 nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
13178 for (node, nres) in lu.rpc.call_export_list(nodes).items():
13180 result.append((node, None))
13182 result.extend((node, expname) for expname in nres.payload)
13187 class LUBackupPrepare(NoHooksLU):
13188 """Prepares an instance for an export and returns useful information.
13193 def ExpandNames(self):
13194 self._ExpandAndLockInstance()
13196 def CheckPrereq(self):
13197 """Check prerequisites.
13200 instance_name = self.op.instance_name
13202 self.instance = self.cfg.GetInstanceInfo(instance_name)
13203 assert self.instance is not None, \
13204 "Cannot retrieve locked instance %s" % self.op.instance_name
13205 _CheckNodeOnline(self, self.instance.primary_node)
13207 self._cds = _GetClusterDomainSecret()
13209 def Exec(self, feedback_fn):
13210 """Prepares an instance for an export.
13213 instance = self.instance
13215 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13216 salt = utils.GenerateSecret(8)
13218 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
13219 result = self.rpc.call_x509_cert_create(instance.primary_node,
13220 constants.RIE_CERT_VALIDITY)
13221 result.Raise("Can't create X509 key and certificate on %s" % result.node)
13223 (name, cert_pem) = result.payload
13225 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
13229 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
13230 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
13232 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
13238 class LUBackupExport(LogicalUnit):
13239 """Export an instance to an image in the cluster.
13242 HPATH = "instance-export"
13243 HTYPE = constants.HTYPE_INSTANCE
13246 def CheckArguments(self):
13247 """Check the arguments.
13250 self.x509_key_name = self.op.x509_key_name
13251 self.dest_x509_ca_pem = self.op.destination_x509_ca
13253 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13254 if not self.x509_key_name:
13255 raise errors.OpPrereqError("Missing X509 key name for encryption",
13256 errors.ECODE_INVAL)
13258 if not self.dest_x509_ca_pem:
13259 raise errors.OpPrereqError("Missing destination X509 CA",
13260 errors.ECODE_INVAL)
13262 def ExpandNames(self):
13263 self._ExpandAndLockInstance()
13265 # Lock all nodes for local exports
13266 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13267 # FIXME: lock only instance primary and destination node
13269 # Sad but true, for now we have do lock all nodes, as we don't know where
13270 # the previous export might be, and in this LU we search for it and
13271 # remove it from its current node. In the future we could fix this by:
13272 # - making a tasklet to search (share-lock all), then create the
13273 # new one, then one to remove, after
13274 # - removing the removal operation altogether
13275 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13277 def DeclareLocks(self, level):
13278 """Last minute lock declaration."""
13279 # All nodes are locked anyway, so nothing to do here.
13281 def BuildHooksEnv(self):
13282 """Build hooks env.
13284 This will run on the master, primary node and target node.
13288 "EXPORT_MODE": self.op.mode,
13289 "EXPORT_NODE": self.op.target_node,
13290 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
13291 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
13292 # TODO: Generic function for boolean env variables
13293 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
13296 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13300 def BuildHooksNodes(self):
13301 """Build hooks nodes.
13304 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
13306 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13307 nl.append(self.op.target_node)
13311 def CheckPrereq(self):
13312 """Check prerequisites.
13314 This checks that the instance and node names are valid.
13317 instance_name = self.op.instance_name
13319 self.instance = self.cfg.GetInstanceInfo(instance_name)
13320 assert self.instance is not None, \
13321 "Cannot retrieve locked instance %s" % self.op.instance_name
13322 _CheckNodeOnline(self, self.instance.primary_node)
13324 if (self.op.remove_instance and
13325 self.instance.admin_state == constants.ADMINST_UP and
13326 not self.op.shutdown):
13327 raise errors.OpPrereqError("Can not remove instance without shutting it"
13330 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13331 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
13332 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
13333 assert self.dst_node is not None
13335 _CheckNodeOnline(self, self.dst_node.name)
13336 _CheckNodeNotDrained(self, self.dst_node.name)
13339 self.dest_disk_info = None
13340 self.dest_x509_ca = None
13342 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13343 self.dst_node = None
13345 if len(self.op.target_node) != len(self.instance.disks):
13346 raise errors.OpPrereqError(("Received destination information for %s"
13347 " disks, but instance %s has %s disks") %
13348 (len(self.op.target_node), instance_name,
13349 len(self.instance.disks)),
13350 errors.ECODE_INVAL)
13352 cds = _GetClusterDomainSecret()
13354 # Check X509 key name
13356 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
13357 except (TypeError, ValueError), err:
13358 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
13360 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
13361 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
13362 errors.ECODE_INVAL)
13364 # Load and verify CA
13366 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
13367 except OpenSSL.crypto.Error, err:
13368 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
13369 (err, ), errors.ECODE_INVAL)
13371 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
13372 if errcode is not None:
13373 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
13374 (msg, ), errors.ECODE_INVAL)
13376 self.dest_x509_ca = cert
13378 # Verify target information
13380 for idx, disk_data in enumerate(self.op.target_node):
13382 (host, port, magic) = \
13383 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
13384 except errors.GenericError, err:
13385 raise errors.OpPrereqError("Target info for disk %s: %s" %
13386 (idx, err), errors.ECODE_INVAL)
13388 disk_info.append((host, port, magic))
13390 assert len(disk_info) == len(self.op.target_node)
13391 self.dest_disk_info = disk_info
13394 raise errors.ProgrammerError("Unhandled export mode %r" %
13397 # instance disk type verification
13398 # TODO: Implement export support for file-based disks
13399 for disk in self.instance.disks:
13400 if disk.dev_type == constants.LD_FILE:
13401 raise errors.OpPrereqError("Export not supported for instances with"
13402 " file-based disks", errors.ECODE_INVAL)
13404 def _CleanupExports(self, feedback_fn):
13405 """Removes exports of current instance from all other nodes.
13407 If an instance in a cluster with nodes A..D was exported to node C, its
13408 exports will be removed from the nodes A, B and D.
13411 assert self.op.mode != constants.EXPORT_MODE_REMOTE
13413 nodelist = self.cfg.GetNodeList()
13414 nodelist.remove(self.dst_node.name)
13416 # on one-node clusters nodelist will be empty after the removal
13417 # if we proceed the backup would be removed because OpBackupQuery
13418 # substitutes an empty list with the full cluster node list.
13419 iname = self.instance.name
13421 feedback_fn("Removing old exports for instance %s" % iname)
13422 exportlist = self.rpc.call_export_list(nodelist)
13423 for node in exportlist:
13424 if exportlist[node].fail_msg:
13426 if iname in exportlist[node].payload:
13427 msg = self.rpc.call_export_remove(node, iname).fail_msg
13429 self.LogWarning("Could not remove older export for instance %s"
13430 " on node %s: %s", iname, node, msg)
13432 def Exec(self, feedback_fn):
13433 """Export an instance to an image in the cluster.
13436 assert self.op.mode in constants.EXPORT_MODES
13438 instance = self.instance
13439 src_node = instance.primary_node
13441 if self.op.shutdown:
13442 # shutdown the instance, but not the disks
13443 feedback_fn("Shutting down instance %s" % instance.name)
13444 result = self.rpc.call_instance_shutdown(src_node, instance,
13445 self.op.shutdown_timeout)
13446 # TODO: Maybe ignore failures if ignore_remove_failures is set
13447 result.Raise("Could not shutdown instance %s on"
13448 " node %s" % (instance.name, src_node))
13450 # set the disks ID correctly since call_instance_start needs the
13451 # correct drbd minor to create the symlinks
13452 for disk in instance.disks:
13453 self.cfg.SetDiskID(disk, src_node)
13455 activate_disks = (instance.admin_state != constants.ADMINST_UP)
13458 # Activate the instance disks if we'exporting a stopped instance
13459 feedback_fn("Activating disks for %s" % instance.name)
13460 _StartInstanceDisks(self, instance, None)
13463 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
13466 helper.CreateSnapshots()
13468 if (self.op.shutdown and
13469 instance.admin_state == constants.ADMINST_UP and
13470 not self.op.remove_instance):
13471 assert not activate_disks
13472 feedback_fn("Starting instance %s" % instance.name)
13473 result = self.rpc.call_instance_start(src_node,
13474 (instance, None, None), False)
13475 msg = result.fail_msg
13477 feedback_fn("Failed to start instance: %s" % msg)
13478 _ShutdownInstanceDisks(self, instance)
13479 raise errors.OpExecError("Could not start instance: %s" % msg)
13481 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13482 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
13483 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13484 connect_timeout = constants.RIE_CONNECT_TIMEOUT
13485 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
13487 (key_name, _, _) = self.x509_key_name
13490 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
13493 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
13494 key_name, dest_ca_pem,
13499 # Check for backwards compatibility
13500 assert len(dresults) == len(instance.disks)
13501 assert compat.all(isinstance(i, bool) for i in dresults), \
13502 "Not all results are boolean: %r" % dresults
13506 feedback_fn("Deactivating disks for %s" % instance.name)
13507 _ShutdownInstanceDisks(self, instance)
13509 if not (compat.all(dresults) and fin_resu):
13512 failures.append("export finalization")
13513 if not compat.all(dresults):
13514 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
13516 failures.append("disk export: disk(s) %s" % fdsk)
13518 raise errors.OpExecError("Export failed, errors in %s" %
13519 utils.CommaJoin(failures))
13521 # At this point, the export was successful, we can cleanup/finish
13523 # Remove instance if requested
13524 if self.op.remove_instance:
13525 feedback_fn("Removing instance %s" % instance.name)
13526 _RemoveInstance(self, feedback_fn, instance,
13527 self.op.ignore_remove_failures)
13529 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13530 self._CleanupExports(feedback_fn)
13532 return fin_resu, dresults
13535 class LUBackupRemove(NoHooksLU):
13536 """Remove exports related to the named instance.
13541 def ExpandNames(self):
13542 self.needed_locks = {}
13543 # We need all nodes to be locked in order for RemoveExport to work, but we
13544 # don't need to lock the instance itself, as nothing will happen to it (and
13545 # we can remove exports also for a removed instance)
13546 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13548 def Exec(self, feedback_fn):
13549 """Remove any export.
13552 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
13553 # If the instance was not found we'll try with the name that was passed in.
13554 # This will only work if it was an FQDN, though.
13556 if not instance_name:
13558 instance_name = self.op.instance_name
13560 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
13561 exportlist = self.rpc.call_export_list(locked_nodes)
13563 for node in exportlist:
13564 msg = exportlist[node].fail_msg
13566 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
13568 if instance_name in exportlist[node].payload:
13570 result = self.rpc.call_export_remove(node, instance_name)
13571 msg = result.fail_msg
13573 logging.error("Could not remove export for instance %s"
13574 " on node %s: %s", instance_name, node, msg)
13576 if fqdn_warn and not found:
13577 feedback_fn("Export not found. If trying to remove an export belonging"
13578 " to a deleted instance please use its Fully Qualified"
13582 class LUGroupAdd(LogicalUnit):
13583 """Logical unit for creating node groups.
13586 HPATH = "group-add"
13587 HTYPE = constants.HTYPE_GROUP
13590 def ExpandNames(self):
13591 # We need the new group's UUID here so that we can create and acquire the
13592 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
13593 # that it should not check whether the UUID exists in the configuration.
13594 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
13595 self.needed_locks = {}
13596 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
13598 def CheckPrereq(self):
13599 """Check prerequisites.
13601 This checks that the given group name is not an existing node group
13606 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13607 except errors.OpPrereqError:
13610 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
13611 " node group (UUID: %s)" %
13612 (self.op.group_name, existing_uuid),
13613 errors.ECODE_EXISTS)
13615 if self.op.ndparams:
13616 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13618 if self.op.hv_state:
13619 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
13621 self.new_hv_state = None
13623 if self.op.disk_state:
13624 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
13626 self.new_disk_state = None
13628 if self.op.diskparams:
13629 for templ in constants.DISK_TEMPLATES:
13630 if templ in self.op.diskparams:
13631 utils.ForceDictType(self.op.diskparams[templ],
13632 constants.DISK_DT_TYPES)
13633 self.new_diskparams = self.op.diskparams
13635 self.new_diskparams = {}
13637 if self.op.ipolicy:
13638 cluster = self.cfg.GetClusterInfo()
13639 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
13641 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
13642 except errors.ConfigurationError, err:
13643 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
13644 errors.ECODE_INVAL)
13646 def BuildHooksEnv(self):
13647 """Build hooks env.
13651 "GROUP_NAME": self.op.group_name,
13654 def BuildHooksNodes(self):
13655 """Build hooks nodes.
13658 mn = self.cfg.GetMasterNode()
13659 return ([mn], [mn])
13661 def Exec(self, feedback_fn):
13662 """Add the node group to the cluster.
13665 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
13666 uuid=self.group_uuid,
13667 alloc_policy=self.op.alloc_policy,
13668 ndparams=self.op.ndparams,
13669 diskparams=self.new_diskparams,
13670 ipolicy=self.op.ipolicy,
13671 hv_state_static=self.new_hv_state,
13672 disk_state_static=self.new_disk_state)
13674 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
13675 del self.remove_locks[locking.LEVEL_NODEGROUP]
13678 class LUGroupAssignNodes(NoHooksLU):
13679 """Logical unit for assigning nodes to groups.
13684 def ExpandNames(self):
13685 # These raise errors.OpPrereqError on their own:
13686 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13687 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
13689 # We want to lock all the affected nodes and groups. We have readily
13690 # available the list of nodes, and the *destination* group. To gather the
13691 # list of "source" groups, we need to fetch node information later on.
13692 self.needed_locks = {
13693 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
13694 locking.LEVEL_NODE: self.op.nodes,
13697 def DeclareLocks(self, level):
13698 if level == locking.LEVEL_NODEGROUP:
13699 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
13701 # Try to get all affected nodes' groups without having the group or node
13702 # lock yet. Needs verification later in the code flow.
13703 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
13705 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
13707 def CheckPrereq(self):
13708 """Check prerequisites.
13711 assert self.needed_locks[locking.LEVEL_NODEGROUP]
13712 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
13713 frozenset(self.op.nodes))
13715 expected_locks = (set([self.group_uuid]) |
13716 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
13717 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
13718 if actual_locks != expected_locks:
13719 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
13720 " current groups are '%s', used to be '%s'" %
13721 (utils.CommaJoin(expected_locks),
13722 utils.CommaJoin(actual_locks)))
13724 self.node_data = self.cfg.GetAllNodesInfo()
13725 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13726 instance_data = self.cfg.GetAllInstancesInfo()
13728 if self.group is None:
13729 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13730 (self.op.group_name, self.group_uuid))
13732 (new_splits, previous_splits) = \
13733 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
13734 for node in self.op.nodes],
13735 self.node_data, instance_data)
13738 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
13740 if not self.op.force:
13741 raise errors.OpExecError("The following instances get split by this"
13742 " change and --force was not given: %s" %
13745 self.LogWarning("This operation will split the following instances: %s",
13748 if previous_splits:
13749 self.LogWarning("In addition, these already-split instances continue"
13750 " to be split across groups: %s",
13751 utils.CommaJoin(utils.NiceSort(previous_splits)))
13753 def Exec(self, feedback_fn):
13754 """Assign nodes to a new group.
13757 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
13759 self.cfg.AssignGroupNodes(mods)
13762 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
13763 """Check for split instances after a node assignment.
13765 This method considers a series of node assignments as an atomic operation,
13766 and returns information about split instances after applying the set of
13769 In particular, it returns information about newly split instances, and
13770 instances that were already split, and remain so after the change.
13772 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
13775 @type changes: list of (node_name, new_group_uuid) pairs.
13776 @param changes: list of node assignments to consider.
13777 @param node_data: a dict with data for all nodes
13778 @param instance_data: a dict with all instances to consider
13779 @rtype: a two-tuple
13780 @return: a list of instances that were previously okay and result split as a
13781 consequence of this change, and a list of instances that were previously
13782 split and this change does not fix.
13785 changed_nodes = dict((node, group) for node, group in changes
13786 if node_data[node].group != group)
13788 all_split_instances = set()
13789 previously_split_instances = set()
13791 def InstanceNodes(instance):
13792 return [instance.primary_node] + list(instance.secondary_nodes)
13794 for inst in instance_data.values():
13795 if inst.disk_template not in constants.DTS_INT_MIRROR:
13798 instance_nodes = InstanceNodes(inst)
13800 if len(set(node_data[node].group for node in instance_nodes)) > 1:
13801 previously_split_instances.add(inst.name)
13803 if len(set(changed_nodes.get(node, node_data[node].group)
13804 for node in instance_nodes)) > 1:
13805 all_split_instances.add(inst.name)
13807 return (list(all_split_instances - previously_split_instances),
13808 list(previously_split_instances & all_split_instances))
13811 class _GroupQuery(_QueryBase):
13812 FIELDS = query.GROUP_FIELDS
13814 def ExpandNames(self, lu):
13815 lu.needed_locks = {}
13817 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
13818 self._cluster = lu.cfg.GetClusterInfo()
13819 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
13822 self.wanted = [name_to_uuid[name]
13823 for name in utils.NiceSort(name_to_uuid.keys())]
13825 # Accept names to be either names or UUIDs.
13828 all_uuid = frozenset(self._all_groups.keys())
13830 for name in self.names:
13831 if name in all_uuid:
13832 self.wanted.append(name)
13833 elif name in name_to_uuid:
13834 self.wanted.append(name_to_uuid[name])
13836 missing.append(name)
13839 raise errors.OpPrereqError("Some groups do not exist: %s" %
13840 utils.CommaJoin(missing),
13841 errors.ECODE_NOENT)
13843 def DeclareLocks(self, lu, level):
13846 def _GetQueryData(self, lu):
13847 """Computes the list of node groups and their attributes.
13850 do_nodes = query.GQ_NODE in self.requested_data
13851 do_instances = query.GQ_INST in self.requested_data
13853 group_to_nodes = None
13854 group_to_instances = None
13856 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
13857 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
13858 # latter GetAllInstancesInfo() is not enough, for we have to go through
13859 # instance->node. Hence, we will need to process nodes even if we only need
13860 # instance information.
13861 if do_nodes or do_instances:
13862 all_nodes = lu.cfg.GetAllNodesInfo()
13863 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
13866 for node in all_nodes.values():
13867 if node.group in group_to_nodes:
13868 group_to_nodes[node.group].append(node.name)
13869 node_to_group[node.name] = node.group
13872 all_instances = lu.cfg.GetAllInstancesInfo()
13873 group_to_instances = dict((uuid, []) for uuid in self.wanted)
13875 for instance in all_instances.values():
13876 node = instance.primary_node
13877 if node in node_to_group:
13878 group_to_instances[node_to_group[node]].append(instance.name)
13881 # Do not pass on node information if it was not requested.
13882 group_to_nodes = None
13884 return query.GroupQueryData(self._cluster,
13885 [self._all_groups[uuid]
13886 for uuid in self.wanted],
13887 group_to_nodes, group_to_instances,
13888 query.GQ_DISKPARAMS in self.requested_data)
13891 class LUGroupQuery(NoHooksLU):
13892 """Logical unit for querying node groups.
13897 def CheckArguments(self):
13898 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
13899 self.op.output_fields, False)
13901 def ExpandNames(self):
13902 self.gq.ExpandNames(self)
13904 def DeclareLocks(self, level):
13905 self.gq.DeclareLocks(self, level)
13907 def Exec(self, feedback_fn):
13908 return self.gq.OldStyleQuery(self)
13911 class LUGroupSetParams(LogicalUnit):
13912 """Modifies the parameters of a node group.
13915 HPATH = "group-modify"
13916 HTYPE = constants.HTYPE_GROUP
13919 def CheckArguments(self):
13922 self.op.diskparams,
13923 self.op.alloc_policy,
13925 self.op.disk_state,
13929 if all_changes.count(None) == len(all_changes):
13930 raise errors.OpPrereqError("Please pass at least one modification",
13931 errors.ECODE_INVAL)
13933 def ExpandNames(self):
13934 # This raises errors.OpPrereqError on its own:
13935 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13937 self.needed_locks = {
13938 locking.LEVEL_INSTANCE: [],
13939 locking.LEVEL_NODEGROUP: [self.group_uuid],
13942 self.share_locks[locking.LEVEL_INSTANCE] = 1
13944 def DeclareLocks(self, level):
13945 if level == locking.LEVEL_INSTANCE:
13946 assert not self.needed_locks[locking.LEVEL_INSTANCE]
13948 # Lock instances optimistically, needs verification once group lock has
13950 self.needed_locks[locking.LEVEL_INSTANCE] = \
13951 self.cfg.GetNodeGroupInstances(self.group_uuid)
13954 def _UpdateAndVerifyDiskParams(old, new):
13955 """Updates and verifies disk parameters.
13958 new_params = _GetUpdatedParams(old, new)
13959 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
13962 def CheckPrereq(self):
13963 """Check prerequisites.
13966 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13968 # Check if locked instances are still correct
13969 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
13971 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13972 cluster = self.cfg.GetClusterInfo()
13974 if self.group is None:
13975 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13976 (self.op.group_name, self.group_uuid))
13978 if self.op.ndparams:
13979 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
13980 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13981 self.new_ndparams = new_ndparams
13983 if self.op.diskparams:
13984 diskparams = self.group.diskparams
13985 uavdp = self._UpdateAndVerifyDiskParams
13986 # For each disktemplate subdict update and verify the values
13987 new_diskparams = dict((dt,
13988 uavdp(diskparams.get(dt, {}),
13989 self.op.diskparams[dt]))
13990 for dt in constants.DISK_TEMPLATES
13991 if dt in self.op.diskparams)
13992 # As we've all subdicts of diskparams ready, lets merge the actual
13993 # dict with all updated subdicts
13994 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
13996 if self.op.hv_state:
13997 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
13998 self.group.hv_state_static)
14000 if self.op.disk_state:
14001 self.new_disk_state = \
14002 _MergeAndVerifyDiskState(self.op.disk_state,
14003 self.group.disk_state_static)
14005 if self.op.ipolicy:
14006 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
14010 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
14011 inst_filter = lambda inst: inst.name in owned_instances
14012 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
14014 _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
14016 new_ipolicy, instances)
14019 self.LogWarning("After the ipolicy change the following instances"
14020 " violate them: %s",
14021 utils.CommaJoin(violations))
14023 def BuildHooksEnv(self):
14024 """Build hooks env.
14028 "GROUP_NAME": self.op.group_name,
14029 "NEW_ALLOC_POLICY": self.op.alloc_policy,
14032 def BuildHooksNodes(self):
14033 """Build hooks nodes.
14036 mn = self.cfg.GetMasterNode()
14037 return ([mn], [mn])
14039 def Exec(self, feedback_fn):
14040 """Modifies the node group.
14045 if self.op.ndparams:
14046 self.group.ndparams = self.new_ndparams
14047 result.append(("ndparams", str(self.group.ndparams)))
14049 if self.op.diskparams:
14050 self.group.diskparams = self.new_diskparams
14051 result.append(("diskparams", str(self.group.diskparams)))
14053 if self.op.alloc_policy:
14054 self.group.alloc_policy = self.op.alloc_policy
14056 if self.op.hv_state:
14057 self.group.hv_state_static = self.new_hv_state
14059 if self.op.disk_state:
14060 self.group.disk_state_static = self.new_disk_state
14062 if self.op.ipolicy:
14063 self.group.ipolicy = self.new_ipolicy
14065 self.cfg.Update(self.group, feedback_fn)
14069 class LUGroupRemove(LogicalUnit):
14070 HPATH = "group-remove"
14071 HTYPE = constants.HTYPE_GROUP
14074 def ExpandNames(self):
14075 # This will raises errors.OpPrereqError on its own:
14076 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14077 self.needed_locks = {
14078 locking.LEVEL_NODEGROUP: [self.group_uuid],
14081 def CheckPrereq(self):
14082 """Check prerequisites.
14084 This checks that the given group name exists as a node group, that is
14085 empty (i.e., contains no nodes), and that is not the last group of the
14089 # Verify that the group is empty.
14090 group_nodes = [node.name
14091 for node in self.cfg.GetAllNodesInfo().values()
14092 if node.group == self.group_uuid]
14095 raise errors.OpPrereqError("Group '%s' not empty, has the following"
14097 (self.op.group_name,
14098 utils.CommaJoin(utils.NiceSort(group_nodes))),
14099 errors.ECODE_STATE)
14101 # Verify the cluster would not be left group-less.
14102 if len(self.cfg.GetNodeGroupList()) == 1:
14103 raise errors.OpPrereqError("Group '%s' is the only group,"
14104 " cannot be removed" %
14105 self.op.group_name,
14106 errors.ECODE_STATE)
14108 def BuildHooksEnv(self):
14109 """Build hooks env.
14113 "GROUP_NAME": self.op.group_name,
14116 def BuildHooksNodes(self):
14117 """Build hooks nodes.
14120 mn = self.cfg.GetMasterNode()
14121 return ([mn], [mn])
14123 def Exec(self, feedback_fn):
14124 """Remove the node group.
14128 self.cfg.RemoveNodeGroup(self.group_uuid)
14129 except errors.ConfigurationError:
14130 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
14131 (self.op.group_name, self.group_uuid))
14133 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
14136 class LUGroupRename(LogicalUnit):
14137 HPATH = "group-rename"
14138 HTYPE = constants.HTYPE_GROUP
14141 def ExpandNames(self):
14142 # This raises errors.OpPrereqError on its own:
14143 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14145 self.needed_locks = {
14146 locking.LEVEL_NODEGROUP: [self.group_uuid],
14149 def CheckPrereq(self):
14150 """Check prerequisites.
14152 Ensures requested new name is not yet used.
14156 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
14157 except errors.OpPrereqError:
14160 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
14161 " node group (UUID: %s)" %
14162 (self.op.new_name, new_name_uuid),
14163 errors.ECODE_EXISTS)
14165 def BuildHooksEnv(self):
14166 """Build hooks env.
14170 "OLD_NAME": self.op.group_name,
14171 "NEW_NAME": self.op.new_name,
14174 def BuildHooksNodes(self):
14175 """Build hooks nodes.
14178 mn = self.cfg.GetMasterNode()
14180 all_nodes = self.cfg.GetAllNodesInfo()
14181 all_nodes.pop(mn, None)
14184 run_nodes.extend(node.name for node in all_nodes.values()
14185 if node.group == self.group_uuid)
14187 return (run_nodes, run_nodes)
14189 def Exec(self, feedback_fn):
14190 """Rename the node group.
14193 group = self.cfg.GetNodeGroup(self.group_uuid)
14196 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14197 (self.op.group_name, self.group_uuid))
14199 group.name = self.op.new_name
14200 self.cfg.Update(group, feedback_fn)
14202 return self.op.new_name
14205 class LUGroupEvacuate(LogicalUnit):
14206 HPATH = "group-evacuate"
14207 HTYPE = constants.HTYPE_GROUP
14210 def ExpandNames(self):
14211 # This raises errors.OpPrereqError on its own:
14212 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14214 if self.op.target_groups:
14215 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
14216 self.op.target_groups)
14218 self.req_target_uuids = []
14220 if self.group_uuid in self.req_target_uuids:
14221 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
14222 " as a target group (targets are %s)" %
14224 utils.CommaJoin(self.req_target_uuids)),
14225 errors.ECODE_INVAL)
14227 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
14229 self.share_locks = _ShareAll()
14230 self.needed_locks = {
14231 locking.LEVEL_INSTANCE: [],
14232 locking.LEVEL_NODEGROUP: [],
14233 locking.LEVEL_NODE: [],
14236 def DeclareLocks(self, level):
14237 if level == locking.LEVEL_INSTANCE:
14238 assert not self.needed_locks[locking.LEVEL_INSTANCE]
14240 # Lock instances optimistically, needs verification once node and group
14241 # locks have been acquired
14242 self.needed_locks[locking.LEVEL_INSTANCE] = \
14243 self.cfg.GetNodeGroupInstances(self.group_uuid)
14245 elif level == locking.LEVEL_NODEGROUP:
14246 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
14248 if self.req_target_uuids:
14249 lock_groups = set([self.group_uuid] + self.req_target_uuids)
14251 # Lock all groups used by instances optimistically; this requires going
14252 # via the node before it's locked, requiring verification later on
14253 lock_groups.update(group_uuid
14254 for instance_name in
14255 self.owned_locks(locking.LEVEL_INSTANCE)
14257 self.cfg.GetInstanceNodeGroups(instance_name))
14259 # No target groups, need to lock all of them
14260 lock_groups = locking.ALL_SET
14262 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
14264 elif level == locking.LEVEL_NODE:
14265 # This will only lock the nodes in the group to be evacuated which
14266 # contain actual instances
14267 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
14268 self._LockInstancesNodes()
14270 # Lock all nodes in group to be evacuated and target groups
14271 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14272 assert self.group_uuid in owned_groups
14273 member_nodes = [node_name
14274 for group in owned_groups
14275 for node_name in self.cfg.GetNodeGroup(group).members]
14276 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
14278 def CheckPrereq(self):
14279 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
14280 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14281 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
14283 assert owned_groups.issuperset(self.req_target_uuids)
14284 assert self.group_uuid in owned_groups
14286 # Check if locked instances are still correct
14287 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
14289 # Get instance information
14290 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
14292 # Check if node groups for locked instances are still correct
14293 _CheckInstancesNodeGroups(self.cfg, self.instances,
14294 owned_groups, owned_nodes, self.group_uuid)
14296 if self.req_target_uuids:
14297 # User requested specific target groups
14298 self.target_uuids = self.req_target_uuids
14300 # All groups except the one to be evacuated are potential targets
14301 self.target_uuids = [group_uuid for group_uuid in owned_groups
14302 if group_uuid != self.group_uuid]
14304 if not self.target_uuids:
14305 raise errors.OpPrereqError("There are no possible target groups",
14306 errors.ECODE_INVAL)
14308 def BuildHooksEnv(self):
14309 """Build hooks env.
14313 "GROUP_NAME": self.op.group_name,
14314 "TARGET_GROUPS": " ".join(self.target_uuids),
14317 def BuildHooksNodes(self):
14318 """Build hooks nodes.
14321 mn = self.cfg.GetMasterNode()
14323 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
14325 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
14327 return (run_nodes, run_nodes)
14329 def Exec(self, feedback_fn):
14330 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
14332 assert self.group_uuid not in self.target_uuids
14334 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
14335 instances=instances, target_groups=self.target_uuids)
14337 ial.Run(self.op.iallocator)
14339 if not ial.success:
14340 raise errors.OpPrereqError("Can't compute group evacuation using"
14341 " iallocator '%s': %s" %
14342 (self.op.iallocator, ial.info),
14343 errors.ECODE_NORES)
14345 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
14347 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
14348 len(jobs), self.op.group_name)
14350 return ResultWithJobs(jobs)
14353 class TagsLU(NoHooksLU): # pylint: disable=W0223
14354 """Generic tags LU.
14356 This is an abstract class which is the parent of all the other tags LUs.
14359 def ExpandNames(self):
14360 self.group_uuid = None
14361 self.needed_locks = {}
14363 if self.op.kind == constants.TAG_NODE:
14364 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
14365 lock_level = locking.LEVEL_NODE
14366 lock_name = self.op.name
14367 elif self.op.kind == constants.TAG_INSTANCE:
14368 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
14369 lock_level = locking.LEVEL_INSTANCE
14370 lock_name = self.op.name
14371 elif self.op.kind == constants.TAG_NODEGROUP:
14372 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
14373 lock_level = locking.LEVEL_NODEGROUP
14374 lock_name = self.group_uuid
14379 if lock_level and getattr(self.op, "use_locking", True):
14380 self.needed_locks[lock_level] = lock_name
14382 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
14383 # not possible to acquire the BGL based on opcode parameters)
14385 def CheckPrereq(self):
14386 """Check prerequisites.
14389 if self.op.kind == constants.TAG_CLUSTER:
14390 self.target = self.cfg.GetClusterInfo()
14391 elif self.op.kind == constants.TAG_NODE:
14392 self.target = self.cfg.GetNodeInfo(self.op.name)
14393 elif self.op.kind == constants.TAG_INSTANCE:
14394 self.target = self.cfg.GetInstanceInfo(self.op.name)
14395 elif self.op.kind == constants.TAG_NODEGROUP:
14396 self.target = self.cfg.GetNodeGroup(self.group_uuid)
14398 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
14399 str(self.op.kind), errors.ECODE_INVAL)
14402 class LUTagsGet(TagsLU):
14403 """Returns the tags of a given object.
14408 def ExpandNames(self):
14409 TagsLU.ExpandNames(self)
14411 # Share locks as this is only a read operation
14412 self.share_locks = _ShareAll()
14414 def Exec(self, feedback_fn):
14415 """Returns the tag list.
14418 return list(self.target.GetTags())
14421 class LUTagsSearch(NoHooksLU):
14422 """Searches the tags for a given pattern.
14427 def ExpandNames(self):
14428 self.needed_locks = {}
14430 def CheckPrereq(self):
14431 """Check prerequisites.
14433 This checks the pattern passed for validity by compiling it.
14437 self.re = re.compile(self.op.pattern)
14438 except re.error, err:
14439 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
14440 (self.op.pattern, err), errors.ECODE_INVAL)
14442 def Exec(self, feedback_fn):
14443 """Returns the tag list.
14447 tgts = [("/cluster", cfg.GetClusterInfo())]
14448 ilist = cfg.GetAllInstancesInfo().values()
14449 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
14450 nlist = cfg.GetAllNodesInfo().values()
14451 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
14452 tgts.extend(("/nodegroup/%s" % n.name, n)
14453 for n in cfg.GetAllNodeGroupsInfo().values())
14455 for path, target in tgts:
14456 for tag in target.GetTags():
14457 if self.re.search(tag):
14458 results.append((path, tag))
14462 class LUTagsSet(TagsLU):
14463 """Sets a tag on a given object.
14468 def CheckPrereq(self):
14469 """Check prerequisites.
14471 This checks the type and length of the tag name and value.
14474 TagsLU.CheckPrereq(self)
14475 for tag in self.op.tags:
14476 objects.TaggableObject.ValidateTag(tag)
14478 def Exec(self, feedback_fn):
14483 for tag in self.op.tags:
14484 self.target.AddTag(tag)
14485 except errors.TagError, err:
14486 raise errors.OpExecError("Error while setting tag: %s" % str(err))
14487 self.cfg.Update(self.target, feedback_fn)
14490 class LUTagsDel(TagsLU):
14491 """Delete a list of tags from a given object.
14496 def CheckPrereq(self):
14497 """Check prerequisites.
14499 This checks that we have the given tag.
14502 TagsLU.CheckPrereq(self)
14503 for tag in self.op.tags:
14504 objects.TaggableObject.ValidateTag(tag)
14505 del_tags = frozenset(self.op.tags)
14506 cur_tags = self.target.GetTags()
14508 diff_tags = del_tags - cur_tags
14510 diff_names = ("'%s'" % i for i in sorted(diff_tags))
14511 raise errors.OpPrereqError("Tag(s) %s not found" %
14512 (utils.CommaJoin(diff_names), ),
14513 errors.ECODE_NOENT)
14515 def Exec(self, feedback_fn):
14516 """Remove the tag from the object.
14519 for tag in self.op.tags:
14520 self.target.RemoveTag(tag)
14521 self.cfg.Update(self.target, feedback_fn)
14524 class LUTestDelay(NoHooksLU):
14525 """Sleep for a specified amount of time.
14527 This LU sleeps on the master and/or nodes for a specified amount of
14533 def ExpandNames(self):
14534 """Expand names and set required locks.
14536 This expands the node list, if any.
14539 self.needed_locks = {}
14540 if self.op.on_nodes:
14541 # _GetWantedNodes can be used here, but is not always appropriate to use
14542 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
14543 # more information.
14544 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
14545 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
14547 def _TestDelay(self):
14548 """Do the actual sleep.
14551 if self.op.on_master:
14552 if not utils.TestDelay(self.op.duration):
14553 raise errors.OpExecError("Error during master delay test")
14554 if self.op.on_nodes:
14555 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
14556 for node, node_result in result.items():
14557 node_result.Raise("Failure during rpc call to node %s" % node)
14559 def Exec(self, feedback_fn):
14560 """Execute the test delay opcode, with the wanted repetitions.
14563 if self.op.repeat == 0:
14566 top_value = self.op.repeat - 1
14567 for i in range(self.op.repeat):
14568 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
14572 class LUTestJqueue(NoHooksLU):
14573 """Utility LU to test some aspects of the job queue.
14578 # Must be lower than default timeout for WaitForJobChange to see whether it
14579 # notices changed jobs
14580 _CLIENT_CONNECT_TIMEOUT = 20.0
14581 _CLIENT_CONFIRM_TIMEOUT = 60.0
14584 def _NotifyUsingSocket(cls, cb, errcls):
14585 """Opens a Unix socket and waits for another program to connect.
14588 @param cb: Callback to send socket name to client
14589 @type errcls: class
14590 @param errcls: Exception class to use for errors
14593 # Using a temporary directory as there's no easy way to create temporary
14594 # sockets without writing a custom loop around tempfile.mktemp and
14596 tmpdir = tempfile.mkdtemp()
14598 tmpsock = utils.PathJoin(tmpdir, "sock")
14600 logging.debug("Creating temporary socket at %s", tmpsock)
14601 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
14606 # Send details to client
14609 # Wait for client to connect before continuing
14610 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
14612 (conn, _) = sock.accept()
14613 except socket.error, err:
14614 raise errcls("Client didn't connect in time (%s)" % err)
14618 # Remove as soon as client is connected
14619 shutil.rmtree(tmpdir)
14621 # Wait for client to close
14624 # pylint: disable=E1101
14625 # Instance of '_socketobject' has no ... member
14626 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
14628 except socket.error, err:
14629 raise errcls("Client failed to confirm notification (%s)" % err)
14633 def _SendNotification(self, test, arg, sockname):
14634 """Sends a notification to the client.
14637 @param test: Test name
14638 @param arg: Test argument (depends on test)
14639 @type sockname: string
14640 @param sockname: Socket path
14643 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
14645 def _Notify(self, prereq, test, arg):
14646 """Notifies the client of a test.
14649 @param prereq: Whether this is a prereq-phase test
14651 @param test: Test name
14652 @param arg: Test argument (depends on test)
14656 errcls = errors.OpPrereqError
14658 errcls = errors.OpExecError
14660 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
14664 def CheckArguments(self):
14665 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
14666 self.expandnames_calls = 0
14668 def ExpandNames(self):
14669 checkargs_calls = getattr(self, "checkargs_calls", 0)
14670 if checkargs_calls < 1:
14671 raise errors.ProgrammerError("CheckArguments was not called")
14673 self.expandnames_calls += 1
14675 if self.op.notify_waitlock:
14676 self._Notify(True, constants.JQT_EXPANDNAMES, None)
14678 self.LogInfo("Expanding names")
14680 # Get lock on master node (just to get a lock, not for a particular reason)
14681 self.needed_locks = {
14682 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
14685 def Exec(self, feedback_fn):
14686 if self.expandnames_calls < 1:
14687 raise errors.ProgrammerError("ExpandNames was not called")
14689 if self.op.notify_exec:
14690 self._Notify(False, constants.JQT_EXEC, None)
14692 self.LogInfo("Executing")
14694 if self.op.log_messages:
14695 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
14696 for idx, msg in enumerate(self.op.log_messages):
14697 self.LogInfo("Sending log message %s", idx + 1)
14698 feedback_fn(constants.JQT_MSGPREFIX + msg)
14699 # Report how many test messages have been sent
14700 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
14703 raise errors.OpExecError("Opcode failure was requested")
14708 class IAllocator(object):
14709 """IAllocator framework.
14711 An IAllocator instance has three sets of attributes:
14712 - cfg that is needed to query the cluster
14713 - input data (all members of the _KEYS class attribute are required)
14714 - four buffer attributes (in|out_data|text), that represent the
14715 input (to the external script) in text and data structure format,
14716 and the output from it, again in two formats
14717 - the result variables from the script (success, info, nodes) for
14721 # pylint: disable=R0902
14722 # lots of instance attributes
14724 def __init__(self, cfg, rpc_runner, mode, **kwargs):
14726 self.rpc = rpc_runner
14727 # init buffer variables
14728 self.in_text = self.out_text = self.in_data = self.out_data = None
14729 # init all input fields so that pylint is happy
14731 self.memory = self.disks = self.disk_template = self.spindle_use = None
14732 self.os = self.tags = self.nics = self.vcpus = None
14733 self.hypervisor = None
14734 self.relocate_from = None
14736 self.instances = None
14737 self.evac_mode = None
14738 self.target_groups = []
14740 self.required_nodes = None
14741 # init result fields
14742 self.success = self.info = self.result = None
14745 (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
14747 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
14748 " IAllocator" % self.mode)
14750 keyset = [n for (n, _) in keydata]
14753 if key not in keyset:
14754 raise errors.ProgrammerError("Invalid input parameter '%s' to"
14755 " IAllocator" % key)
14756 setattr(self, key, kwargs[key])
14759 if key not in kwargs:
14760 raise errors.ProgrammerError("Missing input parameter '%s' to"
14761 " IAllocator" % key)
14762 self._BuildInputData(compat.partial(fn, self), keydata)
14764 def _ComputeClusterData(self):
14765 """Compute the generic allocator input data.
14767 This is the data that is independent of the actual operation.
14771 cluster_info = cfg.GetClusterInfo()
14774 "version": constants.IALLOCATOR_VERSION,
14775 "cluster_name": cfg.GetClusterName(),
14776 "cluster_tags": list(cluster_info.GetTags()),
14777 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
14778 "ipolicy": cluster_info.ipolicy,
14780 ninfo = cfg.GetAllNodesInfo()
14781 iinfo = cfg.GetAllInstancesInfo().values()
14782 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
14785 node_list = [n.name for n in ninfo.values() if n.vm_capable]
14787 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
14788 hypervisor_name = self.hypervisor
14789 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
14790 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
14792 hypervisor_name = cluster_info.primary_hypervisor
14794 node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
14797 self.rpc.call_all_instances_info(node_list,
14798 cluster_info.enabled_hypervisors)
14800 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
14802 config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
14803 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
14804 i_list, config_ndata)
14805 assert len(data["nodes"]) == len(ninfo), \
14806 "Incomplete node data computed"
14808 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
14810 self.in_data = data
14813 def _ComputeNodeGroupData(cfg):
14814 """Compute node groups data.
14817 cluster = cfg.GetClusterInfo()
14818 ng = dict((guuid, {
14819 "name": gdata.name,
14820 "alloc_policy": gdata.alloc_policy,
14821 "ipolicy": _CalculateGroupIPolicy(cluster, gdata),
14823 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
14828 def _ComputeBasicNodeData(cfg, node_cfg):
14829 """Compute global node data.
14832 @returns: a dict of name: (node dict, node config)
14835 # fill in static (config-based) values
14836 node_results = dict((ninfo.name, {
14837 "tags": list(ninfo.GetTags()),
14838 "primary_ip": ninfo.primary_ip,
14839 "secondary_ip": ninfo.secondary_ip,
14840 "offline": ninfo.offline,
14841 "drained": ninfo.drained,
14842 "master_candidate": ninfo.master_candidate,
14843 "group": ninfo.group,
14844 "master_capable": ninfo.master_capable,
14845 "vm_capable": ninfo.vm_capable,
14846 "ndparams": cfg.GetNdParams(ninfo),
14848 for ninfo in node_cfg.values())
14850 return node_results
14853 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
14855 """Compute global node data.
14857 @param node_results: the basic node structures as filled from the config
14860 #TODO(dynmem): compute the right data on MAX and MIN memory
14861 # make a copy of the current dict
14862 node_results = dict(node_results)
14863 for nname, nresult in node_data.items():
14864 assert nname in node_results, "Missing basic data for node %s" % nname
14865 ninfo = node_cfg[nname]
14867 if not (ninfo.offline or ninfo.drained):
14868 nresult.Raise("Can't get data for node %s" % nname)
14869 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
14871 remote_info = _MakeLegacyNodeInfo(nresult.payload)
14873 for attr in ["memory_total", "memory_free", "memory_dom0",
14874 "vg_size", "vg_free", "cpu_total"]:
14875 if attr not in remote_info:
14876 raise errors.OpExecError("Node '%s' didn't return attribute"
14877 " '%s'" % (nname, attr))
14878 if not isinstance(remote_info[attr], int):
14879 raise errors.OpExecError("Node '%s' returned invalid value"
14881 (nname, attr, remote_info[attr]))
14882 # compute memory used by primary instances
14883 i_p_mem = i_p_up_mem = 0
14884 for iinfo, beinfo in i_list:
14885 if iinfo.primary_node == nname:
14886 i_p_mem += beinfo[constants.BE_MAXMEM]
14887 if iinfo.name not in node_iinfo[nname].payload:
14890 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
14891 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
14892 remote_info["memory_free"] -= max(0, i_mem_diff)
14894 if iinfo.admin_state == constants.ADMINST_UP:
14895 i_p_up_mem += beinfo[constants.BE_MAXMEM]
14897 # compute memory used by instances
14899 "total_memory": remote_info["memory_total"],
14900 "reserved_memory": remote_info["memory_dom0"],
14901 "free_memory": remote_info["memory_free"],
14902 "total_disk": remote_info["vg_size"],
14903 "free_disk": remote_info["vg_free"],
14904 "total_cpus": remote_info["cpu_total"],
14905 "i_pri_memory": i_p_mem,
14906 "i_pri_up_memory": i_p_up_mem,
14908 pnr_dyn.update(node_results[nname])
14909 node_results[nname] = pnr_dyn
14911 return node_results
14914 def _ComputeInstanceData(cluster_info, i_list):
14915 """Compute global instance data.
14919 for iinfo, beinfo in i_list:
14921 for nic in iinfo.nics:
14922 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
14926 "mode": filled_params[constants.NIC_MODE],
14927 "link": filled_params[constants.NIC_LINK],
14929 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
14930 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
14931 nic_data.append(nic_dict)
14933 "tags": list(iinfo.GetTags()),
14934 "admin_state": iinfo.admin_state,
14935 "vcpus": beinfo[constants.BE_VCPUS],
14936 "memory": beinfo[constants.BE_MAXMEM],
14937 "spindle_use": beinfo[constants.BE_SPINDLE_USE],
14939 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
14941 "disks": [{constants.IDISK_SIZE: dsk.size,
14942 constants.IDISK_MODE: dsk.mode}
14943 for dsk in iinfo.disks],
14944 "disk_template": iinfo.disk_template,
14945 "hypervisor": iinfo.hypervisor,
14947 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
14949 instance_data[iinfo.name] = pir
14951 return instance_data
14953 def _AddNewInstance(self):
14954 """Add new instance data to allocator structure.
14956 This in combination with _AllocatorGetClusterData will create the
14957 correct structure needed as input for the allocator.
14959 The checks for the completeness of the opcode must have already been
14963 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
14965 if self.disk_template in constants.DTS_INT_MIRROR:
14966 self.required_nodes = 2
14968 self.required_nodes = 1
14972 "disk_template": self.disk_template,
14975 "vcpus": self.vcpus,
14976 "memory": self.memory,
14977 "spindle_use": self.spindle_use,
14978 "disks": self.disks,
14979 "disk_space_total": disk_space,
14981 "required_nodes": self.required_nodes,
14982 "hypervisor": self.hypervisor,
14987 def _AddRelocateInstance(self):
14988 """Add relocate instance data to allocator structure.
14990 This in combination with _IAllocatorGetClusterData will create the
14991 correct structure needed as input for the allocator.
14993 The checks for the completeness of the opcode must have already been
14997 instance = self.cfg.GetInstanceInfo(self.name)
14998 if instance is None:
14999 raise errors.ProgrammerError("Unknown instance '%s' passed to"
15000 " IAllocator" % self.name)
15002 if instance.disk_template not in constants.DTS_MIRRORED:
15003 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
15004 errors.ECODE_INVAL)
15006 if instance.disk_template in constants.DTS_INT_MIRROR and \
15007 len(instance.secondary_nodes) != 1:
15008 raise errors.OpPrereqError("Instance has not exactly one secondary node",
15009 errors.ECODE_STATE)
15011 self.required_nodes = 1
15012 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
15013 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
15017 "disk_space_total": disk_space,
15018 "required_nodes": self.required_nodes,
15019 "relocate_from": self.relocate_from,
15023 def _AddNodeEvacuate(self):
15024 """Get data for node-evacuate requests.
15028 "instances": self.instances,
15029 "evac_mode": self.evac_mode,
15032 def _AddChangeGroup(self):
15033 """Get data for node-evacuate requests.
15037 "instances": self.instances,
15038 "target_groups": self.target_groups,
15041 def _BuildInputData(self, fn, keydata):
15042 """Build input data structures.
15045 self._ComputeClusterData()
15048 request["type"] = self.mode
15049 for keyname, keytype in keydata:
15050 if keyname not in request:
15051 raise errors.ProgrammerError("Request parameter %s is missing" %
15053 val = request[keyname]
15054 if not keytype(val):
15055 raise errors.ProgrammerError("Request parameter %s doesn't pass"
15056 " validation, value %s, expected"
15057 " type %s" % (keyname, val, keytype))
15058 self.in_data["request"] = request
15060 self.in_text = serializer.Dump(self.in_data)
15062 _STRING_LIST = ht.TListOf(ht.TString)
15063 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
15064 # pylint: disable=E1101
15065 # Class '...' has no 'OP_ID' member
15066 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
15067 opcodes.OpInstanceMigrate.OP_ID,
15068 opcodes.OpInstanceReplaceDisks.OP_ID])
15072 ht.TListOf(ht.TAnd(ht.TIsLength(3),
15073 ht.TItems([ht.TNonEmptyString,
15074 ht.TNonEmptyString,
15075 ht.TListOf(ht.TNonEmptyString),
15078 ht.TListOf(ht.TAnd(ht.TIsLength(2),
15079 ht.TItems([ht.TNonEmptyString,
15082 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
15083 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
15086 constants.IALLOCATOR_MODE_ALLOC:
15089 ("name", ht.TString),
15090 ("memory", ht.TInt),
15091 ("spindle_use", ht.TInt),
15092 ("disks", ht.TListOf(ht.TDict)),
15093 ("disk_template", ht.TString),
15094 ("os", ht.TString),
15095 ("tags", _STRING_LIST),
15096 ("nics", ht.TListOf(ht.TDict)),
15097 ("vcpus", ht.TInt),
15098 ("hypervisor", ht.TString),
15100 constants.IALLOCATOR_MODE_RELOC:
15101 (_AddRelocateInstance,
15102 [("name", ht.TString), ("relocate_from", _STRING_LIST)],
15104 constants.IALLOCATOR_MODE_NODE_EVAC:
15105 (_AddNodeEvacuate, [
15106 ("instances", _STRING_LIST),
15107 ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
15109 constants.IALLOCATOR_MODE_CHG_GROUP:
15110 (_AddChangeGroup, [
15111 ("instances", _STRING_LIST),
15112 ("target_groups", _STRING_LIST),
15116 def Run(self, name, validate=True, call_fn=None):
15117 """Run an instance allocator and return the results.
15120 if call_fn is None:
15121 call_fn = self.rpc.call_iallocator_runner
15123 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
15124 result.Raise("Failure while running the iallocator script")
15126 self.out_text = result.payload
15128 self._ValidateResult()
15130 def _ValidateResult(self):
15131 """Process the allocator results.
15133 This will process and if successful save the result in
15134 self.out_data and the other parameters.
15138 rdict = serializer.Load(self.out_text)
15139 except Exception, err:
15140 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
15142 if not isinstance(rdict, dict):
15143 raise errors.OpExecError("Can't parse iallocator results: not a dict")
15145 # TODO: remove backwards compatiblity in later versions
15146 if "nodes" in rdict and "result" not in rdict:
15147 rdict["result"] = rdict["nodes"]
15150 for key in "success", "info", "result":
15151 if key not in rdict:
15152 raise errors.OpExecError("Can't parse iallocator results:"
15153 " missing key '%s'" % key)
15154 setattr(self, key, rdict[key])
15156 if not self._result_check(self.result):
15157 raise errors.OpExecError("Iallocator returned invalid result,"
15158 " expected %s, got %s" %
15159 (self._result_check, self.result),
15160 errors.ECODE_INVAL)
15162 if self.mode == constants.IALLOCATOR_MODE_RELOC:
15163 assert self.relocate_from is not None
15164 assert self.required_nodes == 1
15166 node2group = dict((name, ndata["group"])
15167 for (name, ndata) in self.in_data["nodes"].items())
15169 fn = compat.partial(self._NodesToGroups, node2group,
15170 self.in_data["nodegroups"])
15172 instance = self.cfg.GetInstanceInfo(self.name)
15173 request_groups = fn(self.relocate_from + [instance.primary_node])
15174 result_groups = fn(rdict["result"] + [instance.primary_node])
15176 if self.success and not set(result_groups).issubset(request_groups):
15177 raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
15178 " differ from original groups (%s)" %
15179 (utils.CommaJoin(result_groups),
15180 utils.CommaJoin(request_groups)))
15182 elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15183 assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
15185 self.out_data = rdict
15188 def _NodesToGroups(node2group, groups, nodes):
15189 """Returns a list of unique group names for a list of nodes.
15191 @type node2group: dict
15192 @param node2group: Map from node name to group UUID
15194 @param groups: Group information
15196 @param nodes: Node names
15203 group_uuid = node2group[node]
15205 # Ignore unknown node
15209 group = groups[group_uuid]
15211 # Can't find group, let's use UUID
15212 group_name = group_uuid
15214 group_name = group["name"]
15216 result.add(group_name)
15218 return sorted(result)
15221 class LUTestAllocator(NoHooksLU):
15222 """Run allocator tests.
15224 This LU runs the allocator tests
15227 def CheckPrereq(self):
15228 """Check prerequisites.
15230 This checks the opcode parameters depending on the director and mode test.
15233 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15234 for attr in ["memory", "disks", "disk_template",
15235 "os", "tags", "nics", "vcpus"]:
15236 if not hasattr(self.op, attr):
15237 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
15238 attr, errors.ECODE_INVAL)
15239 iname = self.cfg.ExpandInstanceName(self.op.name)
15240 if iname is not None:
15241 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
15242 iname, errors.ECODE_EXISTS)
15243 if not isinstance(self.op.nics, list):
15244 raise errors.OpPrereqError("Invalid parameter 'nics'",
15245 errors.ECODE_INVAL)
15246 if not isinstance(self.op.disks, list):
15247 raise errors.OpPrereqError("Invalid parameter 'disks'",
15248 errors.ECODE_INVAL)
15249 for row in self.op.disks:
15250 if (not isinstance(row, dict) or
15251 constants.IDISK_SIZE not in row or
15252 not isinstance(row[constants.IDISK_SIZE], int) or
15253 constants.IDISK_MODE not in row or
15254 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
15255 raise errors.OpPrereqError("Invalid contents of the 'disks'"
15256 " parameter", errors.ECODE_INVAL)
15257 if self.op.hypervisor is None:
15258 self.op.hypervisor = self.cfg.GetHypervisorType()
15259 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15260 fname = _ExpandInstanceName(self.cfg, self.op.name)
15261 self.op.name = fname
15262 self.relocate_from = \
15263 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
15264 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
15265 constants.IALLOCATOR_MODE_NODE_EVAC):
15266 if not self.op.instances:
15267 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
15268 self.op.instances = _GetWantedInstances(self, self.op.instances)
15270 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
15271 self.op.mode, errors.ECODE_INVAL)
15273 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
15274 if self.op.allocator is None:
15275 raise errors.OpPrereqError("Missing allocator name",
15276 errors.ECODE_INVAL)
15277 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
15278 raise errors.OpPrereqError("Wrong allocator test '%s'" %
15279 self.op.direction, errors.ECODE_INVAL)
15281 def Exec(self, feedback_fn):
15282 """Run the allocator test.
15285 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15286 ial = IAllocator(self.cfg, self.rpc,
15289 memory=self.op.memory,
15290 disks=self.op.disks,
15291 disk_template=self.op.disk_template,
15295 vcpus=self.op.vcpus,
15296 hypervisor=self.op.hypervisor,
15298 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15299 ial = IAllocator(self.cfg, self.rpc,
15302 relocate_from=list(self.relocate_from),
15304 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
15305 ial = IAllocator(self.cfg, self.rpc,
15307 instances=self.op.instances,
15308 target_groups=self.op.target_groups)
15309 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15310 ial = IAllocator(self.cfg, self.rpc,
15312 instances=self.op.instances,
15313 evac_mode=self.op.evac_mode)
15315 raise errors.ProgrammerError("Uncatched mode %s in"
15316 " LUTestAllocator.Exec", self.op.mode)
15318 if self.op.direction == constants.IALLOCATOR_DIR_IN:
15319 result = ial.in_text
15321 ial.Run(self.op.allocator, validate=False)
15322 result = ial.out_text
15326 #: Query type implementations
15328 constants.QR_CLUSTER: _ClusterQuery,
15329 constants.QR_INSTANCE: _InstanceQuery,
15330 constants.QR_NODE: _NodeQuery,
15331 constants.QR_GROUP: _GroupQuery,
15332 constants.QR_OS: _OsQuery,
15333 constants.QR_EXPORT: _ExportQuery,
15336 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
15339 def _GetQueryImplementation(name):
15340 """Returns the implemtnation for a query type.
15342 @param name: Query type, must be one of L{constants.QR_VIA_OP}
15346 return _QUERY_IMPL[name]
15348 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
15349 errors.ECODE_INVAL)