4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 from ganeti import rpc
62 from ganeti import runtime
64 import ganeti.masterd.instance # pylint: disable=W0611
67 #: Size of DRBD meta block device
71 INSTANCE_DOWN = [constants.ADMINST_DOWN]
72 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
73 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
75 #: Instance status in which an instance can be marked as offline/online
76 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
77 constants.ADMINST_OFFLINE,
82 """Data container for LU results with jobs.
84 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
85 by L{mcpu._ProcessResult}. The latter will then submit the jobs
86 contained in the C{jobs} attribute and include the job IDs in the opcode
90 def __init__(self, jobs, **kwargs):
91 """Initializes this class.
93 Additional return values can be specified as keyword arguments.
95 @type jobs: list of lists of L{opcode.OpCode}
96 @param jobs: A list of lists of opcode objects
103 class LogicalUnit(object):
104 """Logical Unit base class.
106 Subclasses must follow these rules:
107 - implement ExpandNames
108 - implement CheckPrereq (except when tasklets are used)
109 - implement Exec (except when tasklets are used)
110 - implement BuildHooksEnv
111 - implement BuildHooksNodes
112 - redefine HPATH and HTYPE
113 - optionally redefine their run requirements:
114 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
116 Note that all commands require root permissions.
118 @ivar dry_run_result: the value (if any) that will be returned to the caller
119 in dry-run mode (signalled by opcode dry_run parameter)
126 def __init__(self, processor, op, context, rpc_runner):
127 """Constructor for LogicalUnit.
129 This needs to be overridden in derived classes in order to check op
133 self.proc = processor
135 self.cfg = context.cfg
136 self.glm = context.glm
138 self.owned_locks = context.glm.list_owned
139 self.context = context
140 self.rpc = rpc_runner
141 # Dicts used to declare locking needs to mcpu
142 self.needed_locks = None
143 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
145 self.remove_locks = {}
146 # Used to force good behavior when calling helper functions
147 self.recalculate_locks = {}
149 self.Log = processor.Log # pylint: disable=C0103
150 self.LogWarning = processor.LogWarning # pylint: disable=C0103
151 self.LogInfo = processor.LogInfo # pylint: disable=C0103
152 self.LogStep = processor.LogStep # pylint: disable=C0103
153 # support for dry-run
154 self.dry_run_result = None
155 # support for generic debug attribute
156 if (not hasattr(self.op, "debug_level") or
157 not isinstance(self.op.debug_level, int)):
158 self.op.debug_level = 0
163 # Validate opcode parameters and set defaults
164 self.op.Validate(True)
166 self.CheckArguments()
168 def CheckArguments(self):
169 """Check syntactic validity for the opcode arguments.
171 This method is for doing a simple syntactic check and ensure
172 validity of opcode parameters, without any cluster-related
173 checks. While the same can be accomplished in ExpandNames and/or
174 CheckPrereq, doing these separate is better because:
176 - ExpandNames is left as as purely a lock-related function
177 - CheckPrereq is run after we have acquired locks (and possible
180 The function is allowed to change the self.op attribute so that
181 later methods can no longer worry about missing parameters.
186 def ExpandNames(self):
187 """Expand names for this LU.
189 This method is called before starting to execute the opcode, and it should
190 update all the parameters of the opcode to their canonical form (e.g. a
191 short node name must be fully expanded after this method has successfully
192 completed). This way locking, hooks, logging, etc. can work correctly.
194 LUs which implement this method must also populate the self.needed_locks
195 member, as a dict with lock levels as keys, and a list of needed lock names
198 - use an empty dict if you don't need any lock
199 - if you don't need any lock at a particular level omit that
200 level (note that in this case C{DeclareLocks} won't be called
201 at all for that level)
202 - if you need locks at a level, but you can't calculate it in
203 this function, initialise that level with an empty list and do
204 further processing in L{LogicalUnit.DeclareLocks} (see that
205 function's docstring)
206 - don't put anything for the BGL level
207 - if you want all locks at a level use L{locking.ALL_SET} as a value
209 If you need to share locks (rather than acquire them exclusively) at one
210 level you can modify self.share_locks, setting a true value (usually 1) for
211 that level. By default locks are not shared.
213 This function can also define a list of tasklets, which then will be
214 executed in order instead of the usual LU-level CheckPrereq and Exec
215 functions, if those are not defined by the LU.
219 # Acquire all nodes and one instance
220 self.needed_locks = {
221 locking.LEVEL_NODE: locking.ALL_SET,
222 locking.LEVEL_INSTANCE: ['instance1.example.com'],
224 # Acquire just two nodes
225 self.needed_locks = {
226 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
229 self.needed_locks = {} # No, you can't leave it to the default value None
232 # The implementation of this method is mandatory only if the new LU is
233 # concurrent, so that old LUs don't need to be changed all at the same
236 self.needed_locks = {} # Exclusive LUs don't need locks.
238 raise NotImplementedError
240 def DeclareLocks(self, level):
241 """Declare LU locking needs for a level
243 While most LUs can just declare their locking needs at ExpandNames time,
244 sometimes there's the need to calculate some locks after having acquired
245 the ones before. This function is called just before acquiring locks at a
246 particular level, but after acquiring the ones at lower levels, and permits
247 such calculations. It can be used to modify self.needed_locks, and by
248 default it does nothing.
250 This function is only called if you have something already set in
251 self.needed_locks for the level.
253 @param level: Locking level which is going to be locked
254 @type level: member of L{ganeti.locking.LEVELS}
258 def CheckPrereq(self):
259 """Check prerequisites for this LU.
261 This method should check that the prerequisites for the execution
262 of this LU are fulfilled. It can do internode communication, but
263 it should be idempotent - no cluster or system changes are
266 The method should raise errors.OpPrereqError in case something is
267 not fulfilled. Its return value is ignored.
269 This method should also update all the parameters of the opcode to
270 their canonical form if it hasn't been done by ExpandNames before.
273 if self.tasklets is not None:
274 for (idx, tl) in enumerate(self.tasklets):
275 logging.debug("Checking prerequisites for tasklet %s/%s",
276 idx + 1, len(self.tasklets))
281 def Exec(self, feedback_fn):
284 This method should implement the actual work. It should raise
285 errors.OpExecError for failures that are somewhat dealt with in
289 if self.tasklets is not None:
290 for (idx, tl) in enumerate(self.tasklets):
291 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
294 raise NotImplementedError
296 def BuildHooksEnv(self):
297 """Build hooks environment for this LU.
300 @return: Dictionary containing the environment that will be used for
301 running the hooks for this LU. The keys of the dict must not be prefixed
302 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
303 will extend the environment with additional variables. If no environment
304 should be defined, an empty dictionary should be returned (not C{None}).
305 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
309 raise NotImplementedError
311 def BuildHooksNodes(self):
312 """Build list of nodes to run LU's hooks.
314 @rtype: tuple; (list, list)
315 @return: Tuple containing a list of node names on which the hook
316 should run before the execution and a list of node names on which the
317 hook should run after the execution. No nodes should be returned as an
318 empty list (and not None).
319 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
323 raise NotImplementedError
325 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
326 """Notify the LU about the results of its hooks.
328 This method is called every time a hooks phase is executed, and notifies
329 the Logical Unit about the hooks' result. The LU can then use it to alter
330 its result based on the hooks. By default the method does nothing and the
331 previous result is passed back unchanged but any LU can define it if it
332 wants to use the local cluster hook-scripts somehow.
334 @param phase: one of L{constants.HOOKS_PHASE_POST} or
335 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
336 @param hook_results: the results of the multi-node hooks rpc call
337 @param feedback_fn: function used send feedback back to the caller
338 @param lu_result: the previous Exec result this LU had, or None
340 @return: the new Exec result, based on the previous result
344 # API must be kept, thus we ignore the unused argument and could
345 # be a function warnings
346 # pylint: disable=W0613,R0201
349 def _ExpandAndLockInstance(self):
350 """Helper function to expand and lock an instance.
352 Many LUs that work on an instance take its name in self.op.instance_name
353 and need to expand it and then declare the expanded name for locking. This
354 function does it, and then updates self.op.instance_name to the expanded
355 name. It also initializes needed_locks as a dict, if this hasn't been done
359 if self.needed_locks is None:
360 self.needed_locks = {}
362 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
363 "_ExpandAndLockInstance called with instance-level locks set"
364 self.op.instance_name = _ExpandInstanceName(self.cfg,
365 self.op.instance_name)
366 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
368 def _LockInstancesNodes(self, primary_only=False,
369 level=locking.LEVEL_NODE):
370 """Helper function to declare instances' nodes for locking.
372 This function should be called after locking one or more instances to lock
373 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
374 with all primary or secondary nodes for instances already locked and
375 present in self.needed_locks[locking.LEVEL_INSTANCE].
377 It should be called from DeclareLocks, and for safety only works if
378 self.recalculate_locks[locking.LEVEL_NODE] is set.
380 In the future it may grow parameters to just lock some instance's nodes, or
381 to just lock primaries or secondary nodes, if needed.
383 If should be called in DeclareLocks in a way similar to::
385 if level == locking.LEVEL_NODE:
386 self._LockInstancesNodes()
388 @type primary_only: boolean
389 @param primary_only: only lock primary nodes of locked instances
390 @param level: Which lock level to use for locking nodes
393 assert level in self.recalculate_locks, \
394 "_LockInstancesNodes helper function called with no nodes to recalculate"
396 # TODO: check if we're really been called with the instance locks held
398 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
399 # future we might want to have different behaviors depending on the value
400 # of self.recalculate_locks[locking.LEVEL_NODE]
402 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
403 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
404 wanted_nodes.append(instance.primary_node)
406 wanted_nodes.extend(instance.secondary_nodes)
408 if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
409 self.needed_locks[level] = wanted_nodes
410 elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
411 self.needed_locks[level].extend(wanted_nodes)
413 raise errors.ProgrammerError("Unknown recalculation mode")
415 del self.recalculate_locks[level]
418 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
419 """Simple LU which runs no hooks.
421 This LU is intended as a parent for other LogicalUnits which will
422 run no hooks, in order to reduce duplicate code.
428 def BuildHooksEnv(self):
429 """Empty BuildHooksEnv for NoHooksLu.
431 This just raises an error.
434 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
436 def BuildHooksNodes(self):
437 """Empty BuildHooksNodes for NoHooksLU.
440 raise AssertionError("BuildHooksNodes called for NoHooksLU")
444 """Tasklet base class.
446 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447 they can mix legacy code with tasklets. Locking needs to be done in the LU,
448 tasklets know nothing about locks.
450 Subclasses must follow these rules:
451 - Implement CheckPrereq
455 def __init__(self, lu):
462 def CheckPrereq(self):
463 """Check prerequisites for this tasklets.
465 This method should check whether the prerequisites for the execution of
466 this tasklet are fulfilled. It can do internode communication, but it
467 should be idempotent - no cluster or system changes are allowed.
469 The method should raise errors.OpPrereqError in case something is not
470 fulfilled. Its return value is ignored.
472 This method should also update all parameters to their canonical form if it
473 hasn't been done before.
478 def Exec(self, feedback_fn):
479 """Execute the tasklet.
481 This method should implement the actual work. It should raise
482 errors.OpExecError for failures that are somewhat dealt with in code, or
486 raise NotImplementedError
490 """Base for query utility classes.
493 #: Attribute holding field definitions
499 def __init__(self, qfilter, fields, use_locking):
500 """Initializes this class.
503 self.use_locking = use_locking
505 self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
506 namefield=self.SORT_FIELD)
507 self.requested_data = self.query.RequestedData()
508 self.names = self.query.RequestedNames()
510 # Sort only if no names were requested
511 self.sort_by_name = not self.names
513 self.do_locking = None
516 def _GetNames(self, lu, all_names, lock_level):
517 """Helper function to determine names asked for in the query.
521 names = lu.owned_locks(lock_level)
525 if self.wanted == locking.ALL_SET:
526 assert not self.names
527 # caller didn't specify names, so ordering is not important
528 return utils.NiceSort(names)
530 # caller specified names and we must keep the same order
532 assert not self.do_locking or lu.glm.is_owned(lock_level)
534 missing = set(self.wanted).difference(names)
536 raise errors.OpExecError("Some items were removed before retrieving"
537 " their data: %s" % missing)
539 # Return expanded names
542 def ExpandNames(self, lu):
543 """Expand names for this query.
545 See L{LogicalUnit.ExpandNames}.
548 raise NotImplementedError()
550 def DeclareLocks(self, lu, level):
551 """Declare locks for this query.
553 See L{LogicalUnit.DeclareLocks}.
556 raise NotImplementedError()
558 def _GetQueryData(self, lu):
559 """Collects all data for this query.
561 @return: Query data object
564 raise NotImplementedError()
566 def NewStyleQuery(self, lu):
567 """Collect data and execute query.
570 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
571 sort_by_name=self.sort_by_name)
573 def OldStyleQuery(self, lu):
574 """Collect data and execute query.
577 return self.query.OldStyleQuery(self._GetQueryData(lu),
578 sort_by_name=self.sort_by_name)
582 """Returns a dict declaring all lock levels shared.
585 return dict.fromkeys(locking.LEVELS, 1)
588 def _MakeLegacyNodeInfo(data):
589 """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
591 Converts the data into a single dictionary. This is fine for most use cases,
592 but some require information from more than one volume group or hypervisor.
595 (bootid, (vg_info, ), (hv_info, )) = data
597 return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
602 def _AnnotateDiskParams(instance, devs, cfg):
603 """Little helper wrapper to the rpc annotation method.
605 @param instance: The instance object
606 @type devs: List of L{objects.Disk}
607 @param devs: The root devices (not any of its children!)
608 @param cfg: The config object
609 @returns The annotated disk copies
610 @see L{rpc.AnnotateDiskParams}
613 return rpc.AnnotateDiskParams(instance.disk_template, devs,
614 cfg.GetInstanceDiskParams(instance))
617 def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
619 """Checks if node groups for locked instances are still correct.
621 @type cfg: L{config.ConfigWriter}
622 @param cfg: Cluster configuration
623 @type instances: dict; string as key, L{objects.Instance} as value
624 @param instances: Dictionary, instance name as key, instance object as value
625 @type owned_groups: iterable of string
626 @param owned_groups: List of owned groups
627 @type owned_nodes: iterable of string
628 @param owned_nodes: List of owned nodes
629 @type cur_group_uuid: string or None
630 @param cur_group_uuid: Optional group UUID to check against instance's groups
633 for (name, inst) in instances.items():
634 assert owned_nodes.issuperset(inst.all_nodes), \
635 "Instance %s's nodes changed while we kept the lock" % name
637 inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
639 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
640 "Instance %s has no node in group %s" % (name, cur_group_uuid)
643 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
644 """Checks if the owned node groups are still correct for an instance.
646 @type cfg: L{config.ConfigWriter}
647 @param cfg: The cluster configuration
648 @type instance_name: string
649 @param instance_name: Instance name
650 @type owned_groups: set or frozenset
651 @param owned_groups: List of currently owned node groups
654 inst_groups = cfg.GetInstanceNodeGroups(instance_name)
656 if not owned_groups.issuperset(inst_groups):
657 raise errors.OpPrereqError("Instance %s's node groups changed since"
658 " locks were acquired, current groups are"
659 " are '%s', owning groups '%s'; retry the"
662 utils.CommaJoin(inst_groups),
663 utils.CommaJoin(owned_groups)),
669 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
670 """Checks if the instances in a node group are still correct.
672 @type cfg: L{config.ConfigWriter}
673 @param cfg: The cluster configuration
674 @type group_uuid: string
675 @param group_uuid: Node group UUID
676 @type owned_instances: set or frozenset
677 @param owned_instances: List of currently owned instances
680 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
681 if owned_instances != wanted_instances:
682 raise errors.OpPrereqError("Instances in node group '%s' changed since"
683 " locks were acquired, wanted '%s', have '%s';"
684 " retry the operation" %
686 utils.CommaJoin(wanted_instances),
687 utils.CommaJoin(owned_instances)),
690 return wanted_instances
693 def _SupportsOob(cfg, node):
694 """Tells if node supports OOB.
696 @type cfg: L{config.ConfigWriter}
697 @param cfg: The cluster configuration
698 @type node: L{objects.Node}
699 @param node: The node
700 @return: The OOB script if supported or an empty string otherwise
703 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
706 def _CopyLockList(names):
707 """Makes a copy of a list of lock names.
709 Handles L{locking.ALL_SET} correctly.
712 if names == locking.ALL_SET:
713 return locking.ALL_SET
718 def _GetWantedNodes(lu, nodes):
719 """Returns list of checked and expanded node names.
721 @type lu: L{LogicalUnit}
722 @param lu: the logical unit on whose behalf we execute
724 @param nodes: list of node names or None for all nodes
726 @return: the list of nodes, sorted
727 @raise errors.ProgrammerError: if the nodes parameter is wrong type
731 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
733 return utils.NiceSort(lu.cfg.GetNodeList())
736 def _GetWantedInstances(lu, instances):
737 """Returns list of checked and expanded instance names.
739 @type lu: L{LogicalUnit}
740 @param lu: the logical unit on whose behalf we execute
741 @type instances: list
742 @param instances: list of instance names or None for all instances
744 @return: the list of instances, sorted
745 @raise errors.OpPrereqError: if the instances parameter is wrong type
746 @raise errors.OpPrereqError: if any of the passed instances is not found
750 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
752 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
756 def _GetUpdatedParams(old_params, update_dict,
757 use_default=True, use_none=False):
758 """Return the new version of a parameter dictionary.
760 @type old_params: dict
761 @param old_params: old parameters
762 @type update_dict: dict
763 @param update_dict: dict containing new parameter values, or
764 constants.VALUE_DEFAULT to reset the parameter to its default
766 @param use_default: boolean
767 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
768 values as 'to be deleted' values
769 @param use_none: boolean
770 @type use_none: whether to recognise C{None} values as 'to be
773 @return: the new parameter dictionary
776 params_copy = copy.deepcopy(old_params)
777 for key, val in update_dict.iteritems():
778 if ((use_default and val == constants.VALUE_DEFAULT) or
779 (use_none and val is None)):
785 params_copy[key] = val
789 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
790 """Return the new version of a instance policy.
792 @param group_policy: whether this policy applies to a group and thus
793 we should support removal of policy entries
796 use_none = use_default = group_policy
797 ipolicy = copy.deepcopy(old_ipolicy)
798 for key, value in new_ipolicy.items():
799 if key not in constants.IPOLICY_ALL_KEYS:
800 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
802 if key in constants.IPOLICY_ISPECS:
803 utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
804 ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
806 use_default=use_default)
808 if (not value or value == [constants.VALUE_DEFAULT] or
809 value == constants.VALUE_DEFAULT):
813 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
814 " on the cluster'" % key,
817 if key in constants.IPOLICY_PARAMETERS:
818 # FIXME: we assume all such values are float
820 ipolicy[key] = float(value)
821 except (TypeError, ValueError), err:
822 raise errors.OpPrereqError("Invalid value for attribute"
823 " '%s': '%s', error: %s" %
824 (key, value, err), errors.ECODE_INVAL)
826 # FIXME: we assume all others are lists; this should be redone
828 ipolicy[key] = list(value)
830 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
831 except errors.ConfigurationError, err:
832 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
837 def _UpdateAndVerifySubDict(base, updates, type_check):
838 """Updates and verifies a dict with sub dicts of the same type.
840 @param base: The dict with the old data
841 @param updates: The dict with the new data
842 @param type_check: Dict suitable to ForceDictType to verify correct types
843 @returns: A new dict with updated and verified values
847 new = _GetUpdatedParams(old, value)
848 utils.ForceDictType(new, type_check)
851 ret = copy.deepcopy(base)
852 ret.update(dict((key, fn(base.get(key, {}), value))
853 for key, value in updates.items()))
857 def _MergeAndVerifyHvState(op_input, obj_input):
858 """Combines the hv state from an opcode with the one of the object
860 @param op_input: The input dict from the opcode
861 @param obj_input: The input dict from the objects
862 @return: The verified and updated dict
866 invalid_hvs = set(op_input) - constants.HYPER_TYPES
868 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
869 " %s" % utils.CommaJoin(invalid_hvs),
871 if obj_input is None:
873 type_check = constants.HVSTS_PARAMETER_TYPES
874 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
879 def _MergeAndVerifyDiskState(op_input, obj_input):
880 """Combines the disk state from an opcode with the one of the object
882 @param op_input: The input dict from the opcode
883 @param obj_input: The input dict from the objects
884 @return: The verified and updated dict
887 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
889 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
890 utils.CommaJoin(invalid_dst),
892 type_check = constants.DSS_PARAMETER_TYPES
893 if obj_input is None:
895 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
897 for key, value in op_input.items())
902 def _ReleaseLocks(lu, level, names=None, keep=None):
903 """Releases locks owned by an LU.
905 @type lu: L{LogicalUnit}
906 @param level: Lock level
907 @type names: list or None
908 @param names: Names of locks to release
909 @type keep: list or None
910 @param keep: Names of locks to retain
913 assert not (keep is not None and names is not None), \
914 "Only one of the 'names' and the 'keep' parameters can be given"
916 if names is not None:
917 should_release = names.__contains__
919 should_release = lambda name: name not in keep
921 should_release = None
923 owned = lu.owned_locks(level)
925 # Not owning any lock at this level, do nothing
932 # Determine which locks to release
934 if should_release(name):
939 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
941 # Release just some locks
942 lu.glm.release(level, names=release)
944 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
947 lu.glm.release(level)
949 assert not lu.glm.is_owned(level), "No locks should be owned"
952 def _MapInstanceDisksToNodes(instances):
953 """Creates a map from (node, volume) to instance name.
955 @type instances: list of L{objects.Instance}
956 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
959 return dict(((node, vol), inst.name)
960 for inst in instances
961 for (node, vols) in inst.MapLVsByNode().items()
965 def _RunPostHook(lu, node_name):
966 """Runs the post-hook for an opcode on a single node.
969 hm = lu.proc.BuildHooksManager(lu)
971 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
972 except Exception, err: # pylint: disable=W0703
973 lu.LogWarning("Errors occurred running hooks on %s: %s" % (node_name, err))
976 def _CheckOutputFields(static, dynamic, selected):
977 """Checks whether all selected fields are valid.
979 @type static: L{utils.FieldSet}
980 @param static: static fields set
981 @type dynamic: L{utils.FieldSet}
982 @param dynamic: dynamic fields set
989 delta = f.NonMatching(selected)
991 raise errors.OpPrereqError("Unknown output fields selected: %s"
992 % ",".join(delta), errors.ECODE_INVAL)
995 def _CheckGlobalHvParams(params):
996 """Validates that given hypervisor params are not global ones.
998 This will ensure that instances don't get customised versions of
1002 used_globals = constants.HVC_GLOBALS.intersection(params)
1004 msg = ("The following hypervisor parameters are global and cannot"
1005 " be customized at instance level, please modify them at"
1006 " cluster level: %s" % utils.CommaJoin(used_globals))
1007 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1010 def _CheckNodeOnline(lu, node, msg=None):
1011 """Ensure that a given node is online.
1013 @param lu: the LU on behalf of which we make the check
1014 @param node: the node to check
1015 @param msg: if passed, should be a message to replace the default one
1016 @raise errors.OpPrereqError: if the node is offline
1020 msg = "Can't use offline node"
1021 if lu.cfg.GetNodeInfo(node).offline:
1022 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
1025 def _CheckNodeNotDrained(lu, node):
1026 """Ensure that a given node is not drained.
1028 @param lu: the LU on behalf of which we make the check
1029 @param node: the node to check
1030 @raise errors.OpPrereqError: if the node is drained
1033 if lu.cfg.GetNodeInfo(node).drained:
1034 raise errors.OpPrereqError("Can't use drained node %s" % node,
1038 def _CheckNodeVmCapable(lu, node):
1039 """Ensure that a given node is vm capable.
1041 @param lu: the LU on behalf of which we make the check
1042 @param node: the node to check
1043 @raise errors.OpPrereqError: if the node is not vm capable
1046 if not lu.cfg.GetNodeInfo(node).vm_capable:
1047 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
1051 def _CheckNodeHasOS(lu, node, os_name, force_variant):
1052 """Ensure that a node supports a given OS.
1054 @param lu: the LU on behalf of which we make the check
1055 @param node: the node to check
1056 @param os_name: the OS to query about
1057 @param force_variant: whether to ignore variant errors
1058 @raise errors.OpPrereqError: if the node is not supporting the OS
1061 result = lu.rpc.call_os_get(node, os_name)
1062 result.Raise("OS '%s' not in supported OS list for node %s" %
1064 prereq=True, ecode=errors.ECODE_INVAL)
1065 if not force_variant:
1066 _CheckOSVariant(result.payload, os_name)
1069 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
1070 """Ensure that a node has the given secondary ip.
1072 @type lu: L{LogicalUnit}
1073 @param lu: the LU on behalf of which we make the check
1075 @param node: the node to check
1076 @type secondary_ip: string
1077 @param secondary_ip: the ip to check
1078 @type prereq: boolean
1079 @param prereq: whether to throw a prerequisite or an execute error
1080 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
1081 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
1084 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
1085 result.Raise("Failure checking secondary ip on node %s" % node,
1086 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1087 if not result.payload:
1088 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
1089 " please fix and re-run this command" % secondary_ip)
1091 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
1093 raise errors.OpExecError(msg)
1096 def _GetClusterDomainSecret():
1097 """Reads the cluster domain secret.
1100 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
1104 def _CheckInstanceState(lu, instance, req_states, msg=None):
1105 """Ensure that an instance is in one of the required states.
1107 @param lu: the LU on behalf of which we make the check
1108 @param instance: the instance to check
1109 @param msg: if passed, should be a message to replace the default one
1110 @raise errors.OpPrereqError: if the instance is not in the required state
1114 msg = "can't use instance from outside %s states" % ", ".join(req_states)
1115 if instance.admin_state not in req_states:
1116 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1117 (instance.name, instance.admin_state, msg),
1120 if constants.ADMINST_UP not in req_states:
1121 pnode = instance.primary_node
1122 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
1123 ins_l.Raise("Can't contact node %s for instance information" % pnode,
1124 prereq=True, ecode=errors.ECODE_ENVIRON)
1126 if instance.name in ins_l.payload:
1127 raise errors.OpPrereqError("Instance %s is running, %s" %
1128 (instance.name, msg), errors.ECODE_STATE)
1131 def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
1132 """Computes if value is in the desired range.
1134 @param name: name of the parameter for which we perform the check
1135 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
1137 @param ipolicy: dictionary containing min, max and std values
1138 @param value: actual value that we want to use
1139 @return: None or element not meeting the criteria
1143 if value in [None, constants.VALUE_AUTO]:
1145 max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
1146 min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
1147 if value > max_v or min_v > value:
1149 fqn = "%s/%s" % (name, qualifier)
1152 return ("%s value %s is not in range [%s, %s]" %
1153 (fqn, value, min_v, max_v))
1157 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
1158 nic_count, disk_sizes, spindle_use,
1159 _compute_fn=_ComputeMinMaxSpec):
1160 """Verifies ipolicy against provided specs.
1163 @param ipolicy: The ipolicy
1165 @param mem_size: The memory size
1166 @type cpu_count: int
1167 @param cpu_count: Used cpu cores
1168 @type disk_count: int
1169 @param disk_count: Number of disks used
1170 @type nic_count: int
1171 @param nic_count: Number of nics used
1172 @type disk_sizes: list of ints
1173 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
1174 @type spindle_use: int
1175 @param spindle_use: The number of spindles this instance uses
1176 @param _compute_fn: The compute function (unittest only)
1177 @return: A list of violations, or an empty list of no violations are found
1180 assert disk_count == len(disk_sizes)
1183 (constants.ISPEC_MEM_SIZE, "", mem_size),
1184 (constants.ISPEC_CPU_COUNT, "", cpu_count),
1185 (constants.ISPEC_DISK_COUNT, "", disk_count),
1186 (constants.ISPEC_NIC_COUNT, "", nic_count),
1187 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
1188 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
1189 for idx, d in enumerate(disk_sizes)]
1192 (_compute_fn(name, qualifier, ipolicy, value)
1193 for (name, qualifier, value) in test_settings))
1196 def _ComputeIPolicyInstanceViolation(ipolicy, instance,
1197 _compute_fn=_ComputeIPolicySpecViolation):
1198 """Compute if instance meets the specs of ipolicy.
1201 @param ipolicy: The ipolicy to verify against
1202 @type instance: L{objects.Instance}
1203 @param instance: The instance to verify
1204 @param _compute_fn: The function to verify ipolicy (unittest only)
1205 @see: L{_ComputeIPolicySpecViolation}
1208 mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
1209 cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
1210 spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
1211 disk_count = len(instance.disks)
1212 disk_sizes = [disk.size for disk in instance.disks]
1213 nic_count = len(instance.nics)
1215 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1216 disk_sizes, spindle_use)
1219 def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
1220 _compute_fn=_ComputeIPolicySpecViolation):
1221 """Compute if instance specs meets the specs of ipolicy.
1224 @param ipolicy: The ipolicy to verify against
1225 @param instance_spec: dict
1226 @param instance_spec: The instance spec to verify
1227 @param _compute_fn: The function to verify ipolicy (unittest only)
1228 @see: L{_ComputeIPolicySpecViolation}
1231 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
1232 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
1233 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
1234 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
1235 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
1236 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
1238 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1239 disk_sizes, spindle_use)
1242 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
1244 _compute_fn=_ComputeIPolicyInstanceViolation):
1245 """Compute if instance meets the specs of the new target group.
1247 @param ipolicy: The ipolicy to verify
1248 @param instance: The instance object to verify
1249 @param current_group: The current group of the instance
1250 @param target_group: The new group of the instance
1251 @param _compute_fn: The function to verify ipolicy (unittest only)
1252 @see: L{_ComputeIPolicySpecViolation}
1255 if current_group == target_group:
1258 return _compute_fn(ipolicy, instance)
1261 def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
1262 _compute_fn=_ComputeIPolicyNodeViolation):
1263 """Checks that the target node is correct in terms of instance policy.
1265 @param ipolicy: The ipolicy to verify
1266 @param instance: The instance object to verify
1267 @param node: The new node to relocate
1268 @param ignore: Ignore violations of the ipolicy
1269 @param _compute_fn: The function to verify ipolicy (unittest only)
1270 @see: L{_ComputeIPolicySpecViolation}
1273 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
1274 res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
1277 msg = ("Instance does not meet target node group's (%s) instance"
1278 " policy: %s") % (node.group, utils.CommaJoin(res))
1282 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1285 def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
1286 """Computes a set of any instances that would violate the new ipolicy.
1288 @param old_ipolicy: The current (still in-place) ipolicy
1289 @param new_ipolicy: The new (to become) ipolicy
1290 @param instances: List of instances to verify
1291 @return: A list of instances which violates the new ipolicy but
1295 return (_ComputeViolatingInstances(new_ipolicy, instances) -
1296 _ComputeViolatingInstances(old_ipolicy, instances))
1299 def _ExpandItemName(fn, name, kind):
1300 """Expand an item name.
1302 @param fn: the function to use for expansion
1303 @param name: requested item name
1304 @param kind: text description ('Node' or 'Instance')
1305 @return: the resolved (full) name
1306 @raise errors.OpPrereqError: if the item is not found
1309 full_name = fn(name)
1310 if full_name is None:
1311 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
1316 def _ExpandNodeName(cfg, name):
1317 """Wrapper over L{_ExpandItemName} for nodes."""
1318 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
1321 def _ExpandInstanceName(cfg, name):
1322 """Wrapper over L{_ExpandItemName} for instance."""
1323 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
1326 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
1327 minmem, maxmem, vcpus, nics, disk_template, disks,
1328 bep, hvp, hypervisor_name, tags):
1329 """Builds instance related env variables for hooks
1331 This builds the hook environment from individual variables.
1334 @param name: the name of the instance
1335 @type primary_node: string
1336 @param primary_node: the name of the instance's primary node
1337 @type secondary_nodes: list
1338 @param secondary_nodes: list of secondary nodes as strings
1339 @type os_type: string
1340 @param os_type: the name of the instance's OS
1341 @type status: string
1342 @param status: the desired status of the instance
1343 @type minmem: string
1344 @param minmem: the minimum memory size of the instance
1345 @type maxmem: string
1346 @param maxmem: the maximum memory size of the instance
1348 @param vcpus: the count of VCPUs the instance has
1350 @param nics: list of tuples (ip, mac, mode, link) representing
1351 the NICs the instance has
1352 @type disk_template: string
1353 @param disk_template: the disk template of the instance
1355 @param disks: the list of (size, mode) pairs
1357 @param bep: the backend parameters for the instance
1359 @param hvp: the hypervisor parameters for the instance
1360 @type hypervisor_name: string
1361 @param hypervisor_name: the hypervisor for the instance
1363 @param tags: list of instance tags as strings
1365 @return: the hook environment for this instance
1370 "INSTANCE_NAME": name,
1371 "INSTANCE_PRIMARY": primary_node,
1372 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
1373 "INSTANCE_OS_TYPE": os_type,
1374 "INSTANCE_STATUS": status,
1375 "INSTANCE_MINMEM": minmem,
1376 "INSTANCE_MAXMEM": maxmem,
1377 # TODO(2.7) remove deprecated "memory" value
1378 "INSTANCE_MEMORY": maxmem,
1379 "INSTANCE_VCPUS": vcpus,
1380 "INSTANCE_DISK_TEMPLATE": disk_template,
1381 "INSTANCE_HYPERVISOR": hypervisor_name,
1384 nic_count = len(nics)
1385 for idx, (ip, mac, mode, link) in enumerate(nics):
1388 env["INSTANCE_NIC%d_IP" % idx] = ip
1389 env["INSTANCE_NIC%d_MAC" % idx] = mac
1390 env["INSTANCE_NIC%d_MODE" % idx] = mode
1391 env["INSTANCE_NIC%d_LINK" % idx] = link
1392 if mode == constants.NIC_MODE_BRIDGED:
1393 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1397 env["INSTANCE_NIC_COUNT"] = nic_count
1400 disk_count = len(disks)
1401 for idx, (size, mode) in enumerate(disks):
1402 env["INSTANCE_DISK%d_SIZE" % idx] = size
1403 env["INSTANCE_DISK%d_MODE" % idx] = mode
1407 env["INSTANCE_DISK_COUNT"] = disk_count
1412 env["INSTANCE_TAGS"] = " ".join(tags)
1414 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1415 for key, value in source.items():
1416 env["INSTANCE_%s_%s" % (kind, key)] = value
1421 def _NICListToTuple(lu, nics):
1422 """Build a list of nic information tuples.
1424 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1425 value in LUInstanceQueryData.
1427 @type lu: L{LogicalUnit}
1428 @param lu: the logical unit on whose behalf we execute
1429 @type nics: list of L{objects.NIC}
1430 @param nics: list of nics to convert to hooks tuples
1434 cluster = lu.cfg.GetClusterInfo()
1438 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1439 mode = filled_params[constants.NIC_MODE]
1440 link = filled_params[constants.NIC_LINK]
1441 hooks_nics.append((ip, mac, mode, link))
1445 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1446 """Builds instance related env variables for hooks from an object.
1448 @type lu: L{LogicalUnit}
1449 @param lu: the logical unit on whose behalf we execute
1450 @type instance: L{objects.Instance}
1451 @param instance: the instance for which we should build the
1453 @type override: dict
1454 @param override: dictionary with key/values that will override
1457 @return: the hook environment dictionary
1460 cluster = lu.cfg.GetClusterInfo()
1461 bep = cluster.FillBE(instance)
1462 hvp = cluster.FillHV(instance)
1464 "name": instance.name,
1465 "primary_node": instance.primary_node,
1466 "secondary_nodes": instance.secondary_nodes,
1467 "os_type": instance.os,
1468 "status": instance.admin_state,
1469 "maxmem": bep[constants.BE_MAXMEM],
1470 "minmem": bep[constants.BE_MINMEM],
1471 "vcpus": bep[constants.BE_VCPUS],
1472 "nics": _NICListToTuple(lu, instance.nics),
1473 "disk_template": instance.disk_template,
1474 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1477 "hypervisor_name": instance.hypervisor,
1478 "tags": instance.tags,
1481 args.update(override)
1482 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1485 def _AdjustCandidatePool(lu, exceptions):
1486 """Adjust the candidate pool after node operations.
1489 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1491 lu.LogInfo("Promoted nodes to master candidate role: %s",
1492 utils.CommaJoin(node.name for node in mod_list))
1493 for name in mod_list:
1494 lu.context.ReaddNode(name)
1495 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1497 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1501 def _DecideSelfPromotion(lu, exceptions=None):
1502 """Decide whether I should promote myself as a master candidate.
1505 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1506 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1507 # the new node will increase mc_max with one, so:
1508 mc_should = min(mc_should + 1, cp_size)
1509 return mc_now < mc_should
1512 def _CalculateGroupIPolicy(cluster, group):
1513 """Calculate instance policy for group.
1516 return cluster.SimpleFillIPolicy(group.ipolicy)
1519 def _ComputeViolatingInstances(ipolicy, instances):
1520 """Computes a set of instances who violates given ipolicy.
1522 @param ipolicy: The ipolicy to verify
1523 @type instances: object.Instance
1524 @param instances: List of instances to verify
1525 @return: A frozenset of instance names violating the ipolicy
1528 return frozenset([inst.name for inst in instances
1529 if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
1532 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1533 """Check that the brigdes needed by a list of nics exist.
1536 cluster = lu.cfg.GetClusterInfo()
1537 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1538 brlist = [params[constants.NIC_LINK] for params in paramslist
1539 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1541 result = lu.rpc.call_bridges_exist(target_node, brlist)
1542 result.Raise("Error checking bridges on destination node '%s'" %
1543 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1546 def _CheckInstanceBridgesExist(lu, instance, node=None):
1547 """Check that the brigdes needed by an instance exist.
1551 node = instance.primary_node
1552 _CheckNicsBridgesExist(lu, instance.nics, node)
1555 def _CheckOSVariant(os_obj, name):
1556 """Check whether an OS name conforms to the os variants specification.
1558 @type os_obj: L{objects.OS}
1559 @param os_obj: OS object to check
1561 @param name: OS name passed by the user, to check for validity
1564 variant = objects.OS.GetVariant(name)
1565 if not os_obj.supported_variants:
1567 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1568 " passed)" % (os_obj.name, variant),
1572 raise errors.OpPrereqError("OS name must include a variant",
1575 if variant not in os_obj.supported_variants:
1576 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1579 def _GetNodeInstancesInner(cfg, fn):
1580 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1583 def _GetNodeInstances(cfg, node_name):
1584 """Returns a list of all primary and secondary instances on a node.
1588 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1591 def _GetNodePrimaryInstances(cfg, node_name):
1592 """Returns primary instances on a node.
1595 return _GetNodeInstancesInner(cfg,
1596 lambda inst: node_name == inst.primary_node)
1599 def _GetNodeSecondaryInstances(cfg, node_name):
1600 """Returns secondary instances on a node.
1603 return _GetNodeInstancesInner(cfg,
1604 lambda inst: node_name in inst.secondary_nodes)
1607 def _GetStorageTypeArgs(cfg, storage_type):
1608 """Returns the arguments for a storage type.
1611 # Special case for file storage
1612 if storage_type == constants.ST_FILE:
1613 # storage.FileStorage wants a list of storage directories
1614 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1619 def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
1622 for dev in instance.disks:
1623 cfg.SetDiskID(dev, node_name)
1625 result = rpc_runner.call_blockdev_getmirrorstatus(node_name, (instance.disks,
1627 result.Raise("Failed to get disk status from node %s" % node_name,
1628 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1630 for idx, bdev_status in enumerate(result.payload):
1631 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1637 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1638 """Check the sanity of iallocator and node arguments and use the
1639 cluster-wide iallocator if appropriate.
1641 Check that at most one of (iallocator, node) is specified. If none is
1642 specified, then the LU's opcode's iallocator slot is filled with the
1643 cluster-wide default iallocator.
1645 @type iallocator_slot: string
1646 @param iallocator_slot: the name of the opcode iallocator slot
1647 @type node_slot: string
1648 @param node_slot: the name of the opcode target node slot
1651 node = getattr(lu.op, node_slot, None)
1652 iallocator = getattr(lu.op, iallocator_slot, None)
1654 if node is not None and iallocator is not None:
1655 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1657 elif node is None and iallocator is None:
1658 default_iallocator = lu.cfg.GetDefaultIAllocator()
1659 if default_iallocator:
1660 setattr(lu.op, iallocator_slot, default_iallocator)
1662 raise errors.OpPrereqError("No iallocator or node given and no"
1663 " cluster-wide default iallocator found;"
1664 " please specify either an iallocator or a"
1665 " node, or set a cluster-wide default"
1669 def _GetDefaultIAllocator(cfg, iallocator):
1670 """Decides on which iallocator to use.
1672 @type cfg: L{config.ConfigWriter}
1673 @param cfg: Cluster configuration object
1674 @type iallocator: string or None
1675 @param iallocator: Iallocator specified in opcode
1677 @return: Iallocator name
1681 # Use default iallocator
1682 iallocator = cfg.GetDefaultIAllocator()
1685 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1686 " opcode nor as a cluster-wide default",
1692 class LUClusterPostInit(LogicalUnit):
1693 """Logical unit for running hooks after cluster initialization.
1696 HPATH = "cluster-init"
1697 HTYPE = constants.HTYPE_CLUSTER
1699 def BuildHooksEnv(self):
1704 "OP_TARGET": self.cfg.GetClusterName(),
1707 def BuildHooksNodes(self):
1708 """Build hooks nodes.
1711 return ([], [self.cfg.GetMasterNode()])
1713 def Exec(self, feedback_fn):
1720 class LUClusterDestroy(LogicalUnit):
1721 """Logical unit for destroying the cluster.
1724 HPATH = "cluster-destroy"
1725 HTYPE = constants.HTYPE_CLUSTER
1727 def BuildHooksEnv(self):
1732 "OP_TARGET": self.cfg.GetClusterName(),
1735 def BuildHooksNodes(self):
1736 """Build hooks nodes.
1741 def CheckPrereq(self):
1742 """Check prerequisites.
1744 This checks whether the cluster is empty.
1746 Any errors are signaled by raising errors.OpPrereqError.
1749 master = self.cfg.GetMasterNode()
1751 nodelist = self.cfg.GetNodeList()
1752 if len(nodelist) != 1 or nodelist[0] != master:
1753 raise errors.OpPrereqError("There are still %d node(s) in"
1754 " this cluster." % (len(nodelist) - 1),
1756 instancelist = self.cfg.GetInstanceList()
1758 raise errors.OpPrereqError("There are still %d instance(s) in"
1759 " this cluster." % len(instancelist),
1762 def Exec(self, feedback_fn):
1763 """Destroys the cluster.
1766 master_params = self.cfg.GetMasterNetworkParameters()
1768 # Run post hooks on master node before it's removed
1769 _RunPostHook(self, master_params.name)
1771 ems = self.cfg.GetUseExternalMipScript()
1772 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
1775 self.LogWarning("Error disabling the master IP address: %s",
1778 return master_params.name
1781 def _VerifyCertificate(filename):
1782 """Verifies a certificate for L{LUClusterVerifyConfig}.
1784 @type filename: string
1785 @param filename: Path to PEM file
1789 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1790 utils.ReadFile(filename))
1791 except Exception, err: # pylint: disable=W0703
1792 return (LUClusterVerifyConfig.ETYPE_ERROR,
1793 "Failed to load X509 certificate %s: %s" % (filename, err))
1796 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1797 constants.SSL_CERT_EXPIRATION_ERROR)
1800 fnamemsg = "While verifying %s: %s" % (filename, msg)
1805 return (None, fnamemsg)
1806 elif errcode == utils.CERT_WARNING:
1807 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1808 elif errcode == utils.CERT_ERROR:
1809 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1811 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1814 def _GetAllHypervisorParameters(cluster, instances):
1815 """Compute the set of all hypervisor parameters.
1817 @type cluster: L{objects.Cluster}
1818 @param cluster: the cluster object
1819 @param instances: list of L{objects.Instance}
1820 @param instances: additional instances from which to obtain parameters
1821 @rtype: list of (origin, hypervisor, parameters)
1822 @return: a list with all parameters found, indicating the hypervisor they
1823 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1828 for hv_name in cluster.enabled_hypervisors:
1829 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1831 for os_name, os_hvp in cluster.os_hvp.items():
1832 for hv_name, hv_params in os_hvp.items():
1834 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1835 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1837 # TODO: collapse identical parameter values in a single one
1838 for instance in instances:
1839 if instance.hvparams:
1840 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1841 cluster.FillHV(instance)))
1846 class _VerifyErrors(object):
1847 """Mix-in for cluster/group verify LUs.
1849 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1850 self.op and self._feedback_fn to be available.)
1854 ETYPE_FIELD = "code"
1855 ETYPE_ERROR = "ERROR"
1856 ETYPE_WARNING = "WARNING"
1858 def _Error(self, ecode, item, msg, *args, **kwargs):
1859 """Format an error message.
1861 Based on the opcode's error_codes parameter, either format a
1862 parseable error code, or a simpler error string.
1864 This must be called only from Exec and functions called from Exec.
1867 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1868 itype, etxt, _ = ecode
1869 # first complete the msg
1872 # then format the whole message
1873 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1874 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1880 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1881 # and finally report it via the feedback_fn
1882 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1884 def _ErrorIf(self, cond, ecode, *args, **kwargs):
1885 """Log an error message if the passed condition is True.
1889 or self.op.debug_simulate_errors) # pylint: disable=E1101
1891 # If the error code is in the list of ignored errors, demote the error to a
1893 (_, etxt, _) = ecode
1894 if etxt in self.op.ignore_errors: # pylint: disable=E1101
1895 kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
1898 self._Error(ecode, *args, **kwargs)
1900 # do not mark the operation as failed for WARN cases only
1901 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1902 self.bad = self.bad or cond
1905 class LUClusterVerify(NoHooksLU):
1906 """Submits all jobs necessary to verify the cluster.
1911 def ExpandNames(self):
1912 self.needed_locks = {}
1914 def Exec(self, feedback_fn):
1917 if self.op.group_name:
1918 groups = [self.op.group_name]
1919 depends_fn = lambda: None
1921 groups = self.cfg.GetNodeGroupList()
1923 # Verify global configuration
1925 opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
1928 # Always depend on global verification
1929 depends_fn = lambda: [(-len(jobs), [])]
1931 jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1932 ignore_errors=self.op.ignore_errors,
1933 depends=depends_fn())]
1934 for group in groups)
1936 # Fix up all parameters
1937 for op in itertools.chain(*jobs): # pylint: disable=W0142
1938 op.debug_simulate_errors = self.op.debug_simulate_errors
1939 op.verbose = self.op.verbose
1940 op.error_codes = self.op.error_codes
1942 op.skip_checks = self.op.skip_checks
1943 except AttributeError:
1944 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1946 return ResultWithJobs(jobs)
1949 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1950 """Verifies the cluster config.
1955 def _VerifyHVP(self, hvp_data):
1956 """Verifies locally the syntax of the hypervisor parameters.
1959 for item, hv_name, hv_params in hvp_data:
1960 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1963 hv_class = hypervisor.GetHypervisor(hv_name)
1964 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1965 hv_class.CheckParameterSyntax(hv_params)
1966 except errors.GenericError, err:
1967 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
1969 def ExpandNames(self):
1970 self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
1971 self.share_locks = _ShareAll()
1973 def CheckPrereq(self):
1974 """Check prerequisites.
1977 # Retrieve all information
1978 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1979 self.all_node_info = self.cfg.GetAllNodesInfo()
1980 self.all_inst_info = self.cfg.GetAllInstancesInfo()
1982 def Exec(self, feedback_fn):
1983 """Verify integrity of cluster, performing various test on nodes.
1987 self._feedback_fn = feedback_fn
1989 feedback_fn("* Verifying cluster config")
1991 for msg in self.cfg.VerifyConfig():
1992 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
1994 feedback_fn("* Verifying cluster certificate files")
1996 for cert_filename in constants.ALL_CERT_FILES:
1997 (errcode, msg) = _VerifyCertificate(cert_filename)
1998 self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
2000 feedback_fn("* Verifying hypervisor parameters")
2002 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
2003 self.all_inst_info.values()))
2005 feedback_fn("* Verifying all nodes belong to an existing group")
2007 # We do this verification here because, should this bogus circumstance
2008 # occur, it would never be caught by VerifyGroup, which only acts on
2009 # nodes/instances reachable from existing node groups.
2011 dangling_nodes = set(node.name for node in self.all_node_info.values()
2012 if node.group not in self.all_group_info)
2014 dangling_instances = {}
2015 no_node_instances = []
2017 for inst in self.all_inst_info.values():
2018 if inst.primary_node in dangling_nodes:
2019 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
2020 elif inst.primary_node not in self.all_node_info:
2021 no_node_instances.append(inst.name)
2026 utils.CommaJoin(dangling_instances.get(node.name,
2028 for node in dangling_nodes]
2030 self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
2032 "the following nodes (and their instances) belong to a non"
2033 " existing group: %s", utils.CommaJoin(pretty_dangling))
2035 self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
2037 "the following instances have a non-existing primary-node:"
2038 " %s", utils.CommaJoin(no_node_instances))
2043 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
2044 """Verifies the status of a node group.
2047 HPATH = "cluster-verify"
2048 HTYPE = constants.HTYPE_CLUSTER
2051 _HOOKS_INDENT_RE = re.compile("^", re.M)
2053 class NodeImage(object):
2054 """A class representing the logical and physical status of a node.
2057 @ivar name: the node name to which this object refers
2058 @ivar volumes: a structure as returned from
2059 L{ganeti.backend.GetVolumeList} (runtime)
2060 @ivar instances: a list of running instances (runtime)
2061 @ivar pinst: list of configured primary instances (config)
2062 @ivar sinst: list of configured secondary instances (config)
2063 @ivar sbp: dictionary of {primary-node: list of instances} for all
2064 instances for which this node is secondary (config)
2065 @ivar mfree: free memory, as reported by hypervisor (runtime)
2066 @ivar dfree: free disk, as reported by the node (runtime)
2067 @ivar offline: the offline status (config)
2068 @type rpc_fail: boolean
2069 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
2070 not whether the individual keys were correct) (runtime)
2071 @type lvm_fail: boolean
2072 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
2073 @type hyp_fail: boolean
2074 @ivar hyp_fail: whether the RPC call didn't return the instance list
2075 @type ghost: boolean
2076 @ivar ghost: whether this is a known node or not (config)
2077 @type os_fail: boolean
2078 @ivar os_fail: whether the RPC call didn't return valid OS data
2080 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
2081 @type vm_capable: boolean
2082 @ivar vm_capable: whether the node can host instances
2085 def __init__(self, offline=False, name=None, vm_capable=True):
2094 self.offline = offline
2095 self.vm_capable = vm_capable
2096 self.rpc_fail = False
2097 self.lvm_fail = False
2098 self.hyp_fail = False
2100 self.os_fail = False
2103 def ExpandNames(self):
2104 # This raises errors.OpPrereqError on its own:
2105 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2107 # Get instances in node group; this is unsafe and needs verification later
2109 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2111 self.needed_locks = {
2112 locking.LEVEL_INSTANCE: inst_names,
2113 locking.LEVEL_NODEGROUP: [self.group_uuid],
2114 locking.LEVEL_NODE: [],
2117 self.share_locks = _ShareAll()
2119 def DeclareLocks(self, level):
2120 if level == locking.LEVEL_NODE:
2121 # Get members of node group; this is unsafe and needs verification later
2122 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
2124 all_inst_info = self.cfg.GetAllInstancesInfo()
2126 # In Exec(), we warn about mirrored instances that have primary and
2127 # secondary living in separate node groups. To fully verify that
2128 # volumes for these instances are healthy, we will need to do an
2129 # extra call to their secondaries. We ensure here those nodes will
2131 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
2132 # Important: access only the instances whose lock is owned
2133 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
2134 nodes.update(all_inst_info[inst].secondary_nodes)
2136 self.needed_locks[locking.LEVEL_NODE] = nodes
2138 def CheckPrereq(self):
2139 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
2140 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
2142 group_nodes = set(self.group_info.members)
2144 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2147 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2149 unlocked_instances = \
2150 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
2153 raise errors.OpPrereqError("Missing lock for nodes: %s" %
2154 utils.CommaJoin(unlocked_nodes),
2157 if unlocked_instances:
2158 raise errors.OpPrereqError("Missing lock for instances: %s" %
2159 utils.CommaJoin(unlocked_instances),
2162 self.all_node_info = self.cfg.GetAllNodesInfo()
2163 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2165 self.my_node_names = utils.NiceSort(group_nodes)
2166 self.my_inst_names = utils.NiceSort(group_instances)
2168 self.my_node_info = dict((name, self.all_node_info[name])
2169 for name in self.my_node_names)
2171 self.my_inst_info = dict((name, self.all_inst_info[name])
2172 for name in self.my_inst_names)
2174 # We detect here the nodes that will need the extra RPC calls for verifying
2175 # split LV volumes; they should be locked.
2176 extra_lv_nodes = set()
2178 for inst in self.my_inst_info.values():
2179 if inst.disk_template in constants.DTS_INT_MIRROR:
2180 for nname in inst.all_nodes:
2181 if self.all_node_info[nname].group != self.group_uuid:
2182 extra_lv_nodes.add(nname)
2184 unlocked_lv_nodes = \
2185 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2187 if unlocked_lv_nodes:
2188 raise errors.OpPrereqError("Missing node locks for LV check: %s" %
2189 utils.CommaJoin(unlocked_lv_nodes),
2191 self.extra_lv_nodes = list(extra_lv_nodes)
2193 def _VerifyNode(self, ninfo, nresult):
2194 """Perform some basic validation on data returned from a node.
2196 - check the result data structure is well formed and has all the
2198 - check ganeti version
2200 @type ninfo: L{objects.Node}
2201 @param ninfo: the node to check
2202 @param nresult: the results from the node
2204 @return: whether overall this call was successful (and we can expect
2205 reasonable values in the respose)
2209 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2211 # main result, nresult should be a non-empty dict
2212 test = not nresult or not isinstance(nresult, dict)
2213 _ErrorIf(test, constants.CV_ENODERPC, node,
2214 "unable to verify node: no data returned")
2218 # compares ganeti version
2219 local_version = constants.PROTOCOL_VERSION
2220 remote_version = nresult.get("version", None)
2221 test = not (remote_version and
2222 isinstance(remote_version, (list, tuple)) and
2223 len(remote_version) == 2)
2224 _ErrorIf(test, constants.CV_ENODERPC, node,
2225 "connection to node returned invalid data")
2229 test = local_version != remote_version[0]
2230 _ErrorIf(test, constants.CV_ENODEVERSION, node,
2231 "incompatible protocol versions: master %s,"
2232 " node %s", local_version, remote_version[0])
2236 # node seems compatible, we can actually try to look into its results
2238 # full package version
2239 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
2240 constants.CV_ENODEVERSION, node,
2241 "software version mismatch: master %s, node %s",
2242 constants.RELEASE_VERSION, remote_version[1],
2243 code=self.ETYPE_WARNING)
2245 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
2246 if ninfo.vm_capable and isinstance(hyp_result, dict):
2247 for hv_name, hv_result in hyp_result.iteritems():
2248 test = hv_result is not None
2249 _ErrorIf(test, constants.CV_ENODEHV, node,
2250 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
2252 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
2253 if ninfo.vm_capable and isinstance(hvp_result, list):
2254 for item, hv_name, hv_result in hvp_result:
2255 _ErrorIf(True, constants.CV_ENODEHV, node,
2256 "hypervisor %s parameter verify failure (source %s): %s",
2257 hv_name, item, hv_result)
2259 test = nresult.get(constants.NV_NODESETUP,
2260 ["Missing NODESETUP results"])
2261 _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
2266 def _VerifyNodeTime(self, ninfo, nresult,
2267 nvinfo_starttime, nvinfo_endtime):
2268 """Check the node time.
2270 @type ninfo: L{objects.Node}
2271 @param ninfo: the node to check
2272 @param nresult: the remote results for the node
2273 @param nvinfo_starttime: the start time of the RPC call
2274 @param nvinfo_endtime: the end time of the RPC call
2278 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2280 ntime = nresult.get(constants.NV_TIME, None)
2282 ntime_merged = utils.MergeTime(ntime)
2283 except (ValueError, TypeError):
2284 _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
2287 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
2288 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
2289 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
2290 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
2294 _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
2295 "Node time diverges by at least %s from master node time",
2298 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
2299 """Check the node LVM results.
2301 @type ninfo: L{objects.Node}
2302 @param ninfo: the node to check
2303 @param nresult: the remote results for the node
2304 @param vg_name: the configured VG name
2311 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2313 # checks vg existence and size > 20G
2314 vglist = nresult.get(constants.NV_VGLIST, None)
2316 _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
2318 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
2319 constants.MIN_VG_SIZE)
2320 _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
2323 pvlist = nresult.get(constants.NV_PVLIST, None)
2324 test = pvlist is None
2325 _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
2327 # check that ':' is not present in PV names, since it's a
2328 # special character for lvcreate (denotes the range of PEs to
2330 for _, pvname, owner_vg in pvlist:
2331 test = ":" in pvname
2332 _ErrorIf(test, constants.CV_ENODELVM, node,
2333 "Invalid character ':' in PV '%s' of VG '%s'",
2336 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
2337 """Check the node bridges.
2339 @type ninfo: L{objects.Node}
2340 @param ninfo: the node to check
2341 @param nresult: the remote results for the node
2342 @param bridges: the expected list of bridges
2349 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2351 missing = nresult.get(constants.NV_BRIDGES, None)
2352 test = not isinstance(missing, list)
2353 _ErrorIf(test, constants.CV_ENODENET, node,
2354 "did not return valid bridge information")
2356 _ErrorIf(bool(missing), constants.CV_ENODENET, node,
2357 "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
2359 def _VerifyNodeUserScripts(self, ninfo, nresult):
2360 """Check the results of user scripts presence and executability on the node
2362 @type ninfo: L{objects.Node}
2363 @param ninfo: the node to check
2364 @param nresult: the remote results for the node
2369 test = not constants.NV_USERSCRIPTS in nresult
2370 self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
2371 "did not return user scripts information")
2373 broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
2375 self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
2376 "user scripts not present or not executable: %s" %
2377 utils.CommaJoin(sorted(broken_scripts)))
2379 def _VerifyNodeNetwork(self, ninfo, nresult):
2380 """Check the node network connectivity results.
2382 @type ninfo: L{objects.Node}
2383 @param ninfo: the node to check
2384 @param nresult: the remote results for the node
2388 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2390 test = constants.NV_NODELIST not in nresult
2391 _ErrorIf(test, constants.CV_ENODESSH, node,
2392 "node hasn't returned node ssh connectivity data")
2394 if nresult[constants.NV_NODELIST]:
2395 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
2396 _ErrorIf(True, constants.CV_ENODESSH, node,
2397 "ssh communication with node '%s': %s", a_node, a_msg)
2399 test = constants.NV_NODENETTEST not in nresult
2400 _ErrorIf(test, constants.CV_ENODENET, node,
2401 "node hasn't returned node tcp connectivity data")
2403 if nresult[constants.NV_NODENETTEST]:
2404 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
2406 _ErrorIf(True, constants.CV_ENODENET, node,
2407 "tcp communication with node '%s': %s",
2408 anode, nresult[constants.NV_NODENETTEST][anode])
2410 test = constants.NV_MASTERIP not in nresult
2411 _ErrorIf(test, constants.CV_ENODENET, node,
2412 "node hasn't returned node master IP reachability data")
2414 if not nresult[constants.NV_MASTERIP]:
2415 if node == self.master_node:
2416 msg = "the master node cannot reach the master IP (not configured?)"
2418 msg = "cannot reach the master IP"
2419 _ErrorIf(True, constants.CV_ENODENET, node, msg)
2421 def _VerifyInstance(self, instance, instanceconfig, node_image,
2423 """Verify an instance.
2425 This function checks to see if the required block devices are
2426 available on the instance's node.
2429 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2430 node_current = instanceconfig.primary_node
2432 node_vol_should = {}
2433 instanceconfig.MapLVsByNode(node_vol_should)
2435 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
2436 err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
2437 _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
2439 for node in node_vol_should:
2440 n_img = node_image[node]
2441 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2442 # ignore missing volumes on offline or broken nodes
2444 for volume in node_vol_should[node]:
2445 test = volume not in n_img.volumes
2446 _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2447 "volume %s missing on node %s", volume, node)
2449 if instanceconfig.admin_state == constants.ADMINST_UP:
2450 pri_img = node_image[node_current]
2451 test = instance not in pri_img.instances and not pri_img.offline
2452 _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
2453 "instance not running on its primary node %s",
2456 diskdata = [(nname, success, status, idx)
2457 for (nname, disks) in diskstatus.items()
2458 for idx, (success, status) in enumerate(disks)]
2460 for nname, success, bdev_status, idx in diskdata:
2461 # the 'ghost node' construction in Exec() ensures that we have a
2463 snode = node_image[nname]
2464 bad_snode = snode.ghost or snode.offline
2465 _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
2466 not success and not bad_snode,
2467 constants.CV_EINSTANCEFAULTYDISK, instance,
2468 "couldn't retrieve status for disk/%s on %s: %s",
2469 idx, nname, bdev_status)
2470 _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
2471 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
2472 constants.CV_EINSTANCEFAULTYDISK, instance,
2473 "disk/%s on %s is faulty", idx, nname)
2475 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2476 """Verify if there are any unknown volumes in the cluster.
2478 The .os, .swap and backup volumes are ignored. All other volumes are
2479 reported as unknown.
2481 @type reserved: L{ganeti.utils.FieldSet}
2482 @param reserved: a FieldSet of reserved volume names
2485 for node, n_img in node_image.items():
2486 if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2487 self.all_node_info[node].group != self.group_uuid):
2488 # skip non-healthy nodes
2490 for volume in n_img.volumes:
2491 test = ((node not in node_vol_should or
2492 volume not in node_vol_should[node]) and
2493 not reserved.Matches(volume))
2494 self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
2495 "volume %s is unknown", volume)
2497 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2498 """Verify N+1 Memory Resilience.
2500 Check that if one single node dies we can still start all the
2501 instances it was primary for.
2504 cluster_info = self.cfg.GetClusterInfo()
2505 for node, n_img in node_image.items():
2506 # This code checks that every node which is now listed as
2507 # secondary has enough memory to host all instances it is
2508 # supposed to should a single other node in the cluster fail.
2509 # FIXME: not ready for failover to an arbitrary node
2510 # FIXME: does not support file-backed instances
2511 # WARNING: we currently take into account down instances as well
2512 # as up ones, considering that even if they're down someone
2513 # might want to start them even in the event of a node failure.
2514 if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2515 # we're skipping nodes marked offline and nodes in other groups from
2516 # the N+1 warning, since most likely we don't have good memory
2517 # infromation from them; we already list instances living on such
2518 # nodes, and that's enough warning
2520 #TODO(dynmem): also consider ballooning out other instances
2521 for prinode, instances in n_img.sbp.items():
2523 for instance in instances:
2524 bep = cluster_info.FillBE(instance_cfg[instance])
2525 if bep[constants.BE_AUTO_BALANCE]:
2526 needed_mem += bep[constants.BE_MINMEM]
2527 test = n_img.mfree < needed_mem
2528 self._ErrorIf(test, constants.CV_ENODEN1, node,
2529 "not enough memory to accomodate instance failovers"
2530 " should node %s fail (%dMiB needed, %dMiB available)",
2531 prinode, needed_mem, n_img.mfree)
2534 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2535 (files_all, files_opt, files_mc, files_vm)):
2536 """Verifies file checksums collected from all nodes.
2538 @param errorif: Callback for reporting errors
2539 @param nodeinfo: List of L{objects.Node} objects
2540 @param master_node: Name of master node
2541 @param all_nvinfo: RPC results
2544 # Define functions determining which nodes to consider for a file
2547 (files_mc, lambda node: (node.master_candidate or
2548 node.name == master_node)),
2549 (files_vm, lambda node: node.vm_capable),
2552 # Build mapping from filename to list of nodes which should have the file
2554 for (files, fn) in files2nodefn:
2556 filenodes = nodeinfo
2558 filenodes = filter(fn, nodeinfo)
2559 nodefiles.update((filename,
2560 frozenset(map(operator.attrgetter("name"), filenodes)))
2561 for filename in files)
2563 assert set(nodefiles) == (files_all | files_mc | files_vm)
2565 fileinfo = dict((filename, {}) for filename in nodefiles)
2566 ignore_nodes = set()
2568 for node in nodeinfo:
2570 ignore_nodes.add(node.name)
2573 nresult = all_nvinfo[node.name]
2575 if nresult.fail_msg or not nresult.payload:
2578 node_files = nresult.payload.get(constants.NV_FILELIST, None)
2580 test = not (node_files and isinstance(node_files, dict))
2581 errorif(test, constants.CV_ENODEFILECHECK, node.name,
2582 "Node did not return file checksum data")
2584 ignore_nodes.add(node.name)
2587 # Build per-checksum mapping from filename to nodes having it
2588 for (filename, checksum) in node_files.items():
2589 assert filename in nodefiles
2590 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2592 for (filename, checksums) in fileinfo.items():
2593 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2595 # Nodes having the file
2596 with_file = frozenset(node_name
2597 for nodes in fileinfo[filename].values()
2598 for node_name in nodes) - ignore_nodes
2600 expected_nodes = nodefiles[filename] - ignore_nodes
2602 # Nodes missing file
2603 missing_file = expected_nodes - with_file
2605 if filename in files_opt:
2607 errorif(missing_file and missing_file != expected_nodes,
2608 constants.CV_ECLUSTERFILECHECK, None,
2609 "File %s is optional, but it must exist on all or no"
2610 " nodes (not found on %s)",
2611 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2613 errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
2614 "File %s is missing from node(s) %s", filename,
2615 utils.CommaJoin(utils.NiceSort(missing_file)))
2617 # Warn if a node has a file it shouldn't
2618 unexpected = with_file - expected_nodes
2620 constants.CV_ECLUSTERFILECHECK, None,
2621 "File %s should not exist on node(s) %s",
2622 filename, utils.CommaJoin(utils.NiceSort(unexpected)))
2624 # See if there are multiple versions of the file
2625 test = len(checksums) > 1
2627 variants = ["variant %s on %s" %
2628 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2629 for (idx, (checksum, nodes)) in
2630 enumerate(sorted(checksums.items()))]
2634 errorif(test, constants.CV_ECLUSTERFILECHECK, None,
2635 "File %s found with %s different checksums (%s)",
2636 filename, len(checksums), "; ".join(variants))
2638 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2640 """Verifies and the node DRBD status.
2642 @type ninfo: L{objects.Node}
2643 @param ninfo: the node to check
2644 @param nresult: the remote results for the node
2645 @param instanceinfo: the dict of instances
2646 @param drbd_helper: the configured DRBD usermode helper
2647 @param drbd_map: the DRBD map as returned by
2648 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2652 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2655 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2656 test = (helper_result == None)
2657 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2658 "no drbd usermode helper returned")
2660 status, payload = helper_result
2662 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2663 "drbd usermode helper check unsuccessful: %s", payload)
2664 test = status and (payload != drbd_helper)
2665 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2666 "wrong drbd usermode helper: %s", payload)
2668 # compute the DRBD minors
2670 for minor, instance in drbd_map[node].items():
2671 test = instance not in instanceinfo
2672 _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2673 "ghost instance '%s' in temporary DRBD map", instance)
2674 # ghost instance should not be running, but otherwise we
2675 # don't give double warnings (both ghost instance and
2676 # unallocated minor in use)
2678 node_drbd[minor] = (instance, False)
2680 instance = instanceinfo[instance]
2681 node_drbd[minor] = (instance.name,
2682 instance.admin_state == constants.ADMINST_UP)
2684 # and now check them
2685 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2686 test = not isinstance(used_minors, (tuple, list))
2687 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2688 "cannot parse drbd status file: %s", str(used_minors))
2690 # we cannot check drbd status
2693 for minor, (iname, must_exist) in node_drbd.items():
2694 test = minor not in used_minors and must_exist
2695 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2696 "drbd minor %d of instance %s is not active", minor, iname)
2697 for minor in used_minors:
2698 test = minor not in node_drbd
2699 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2700 "unallocated drbd minor %d is in use", minor)
2702 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2703 """Builds the node OS structures.
2705 @type ninfo: L{objects.Node}
2706 @param ninfo: the node to check
2707 @param nresult: the remote results for the node
2708 @param nimg: the node image object
2712 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2714 remote_os = nresult.get(constants.NV_OSLIST, None)
2715 test = (not isinstance(remote_os, list) or
2716 not compat.all(isinstance(v, list) and len(v) == 7
2717 for v in remote_os))
2719 _ErrorIf(test, constants.CV_ENODEOS, node,
2720 "node hasn't returned valid OS data")
2729 for (name, os_path, status, diagnose,
2730 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2732 if name not in os_dict:
2735 # parameters is a list of lists instead of list of tuples due to
2736 # JSON lacking a real tuple type, fix it:
2737 parameters = [tuple(v) for v in parameters]
2738 os_dict[name].append((os_path, status, diagnose,
2739 set(variants), set(parameters), set(api_ver)))
2741 nimg.oslist = os_dict
2743 def _VerifyNodeOS(self, ninfo, nimg, base):
2744 """Verifies the node OS list.
2746 @type ninfo: L{objects.Node}
2747 @param ninfo: the node to check
2748 @param nimg: the node image object
2749 @param base: the 'template' node we match against (e.g. from the master)
2753 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2755 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2757 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2758 for os_name, os_data in nimg.oslist.items():
2759 assert os_data, "Empty OS status for OS %s?!" % os_name
2760 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2761 _ErrorIf(not f_status, constants.CV_ENODEOS, node,
2762 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2763 _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
2764 "OS '%s' has multiple entries (first one shadows the rest): %s",
2765 os_name, utils.CommaJoin([v[0] for v in os_data]))
2766 # comparisons with the 'base' image
2767 test = os_name not in base.oslist
2768 _ErrorIf(test, constants.CV_ENODEOS, node,
2769 "Extra OS %s not present on reference node (%s)",
2773 assert base.oslist[os_name], "Base node has empty OS status?"
2774 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2776 # base OS is invalid, skipping
2778 for kind, a, b in [("API version", f_api, b_api),
2779 ("variants list", f_var, b_var),
2780 ("parameters", beautify_params(f_param),
2781 beautify_params(b_param))]:
2782 _ErrorIf(a != b, constants.CV_ENODEOS, node,
2783 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2784 kind, os_name, base.name,
2785 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2787 # check any missing OSes
2788 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2789 _ErrorIf(missing, constants.CV_ENODEOS, node,
2790 "OSes present on reference node %s but missing on this node: %s",
2791 base.name, utils.CommaJoin(missing))
2793 def _VerifyOob(self, ninfo, nresult):
2794 """Verifies out of band functionality of a node.
2796 @type ninfo: L{objects.Node}
2797 @param ninfo: the node to check
2798 @param nresult: the remote results for the node
2802 # We just have to verify the paths on master and/or master candidates
2803 # as the oob helper is invoked on the master
2804 if ((ninfo.master_candidate or ninfo.master_capable) and
2805 constants.NV_OOB_PATHS in nresult):
2806 for path_result in nresult[constants.NV_OOB_PATHS]:
2807 self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
2809 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2810 """Verifies and updates the node volume data.
2812 This function will update a L{NodeImage}'s internal structures
2813 with data from the remote call.
2815 @type ninfo: L{objects.Node}
2816 @param ninfo: the node to check
2817 @param nresult: the remote results for the node
2818 @param nimg: the node image object
2819 @param vg_name: the configured VG name
2823 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2825 nimg.lvm_fail = True
2826 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2829 elif isinstance(lvdata, basestring):
2830 _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
2831 utils.SafeEncode(lvdata))
2832 elif not isinstance(lvdata, dict):
2833 _ErrorIf(True, constants.CV_ENODELVM, node,
2834 "rpc call to node failed (lvlist)")
2836 nimg.volumes = lvdata
2837 nimg.lvm_fail = False
2839 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2840 """Verifies and updates the node instance list.
2842 If the listing was successful, then updates this node's instance
2843 list. Otherwise, it marks the RPC call as failed for the instance
2846 @type ninfo: L{objects.Node}
2847 @param ninfo: the node to check
2848 @param nresult: the remote results for the node
2849 @param nimg: the node image object
2852 idata = nresult.get(constants.NV_INSTANCELIST, None)
2853 test = not isinstance(idata, list)
2854 self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2855 "rpc call to node failed (instancelist): %s",
2856 utils.SafeEncode(str(idata)))
2858 nimg.hyp_fail = True
2860 nimg.instances = idata
2862 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2863 """Verifies and computes a node information map
2865 @type ninfo: L{objects.Node}
2866 @param ninfo: the node to check
2867 @param nresult: the remote results for the node
2868 @param nimg: the node image object
2869 @param vg_name: the configured VG name
2873 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2875 # try to read free memory (from the hypervisor)
2876 hv_info = nresult.get(constants.NV_HVINFO, None)
2877 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2878 _ErrorIf(test, constants.CV_ENODEHV, node,
2879 "rpc call to node failed (hvinfo)")
2882 nimg.mfree = int(hv_info["memory_free"])
2883 except (ValueError, TypeError):
2884 _ErrorIf(True, constants.CV_ENODERPC, node,
2885 "node returned invalid nodeinfo, check hypervisor")
2887 # FIXME: devise a free space model for file based instances as well
2888 if vg_name is not None:
2889 test = (constants.NV_VGLIST not in nresult or
2890 vg_name not in nresult[constants.NV_VGLIST])
2891 _ErrorIf(test, constants.CV_ENODELVM, node,
2892 "node didn't return data for the volume group '%s'"
2893 " - it is either missing or broken", vg_name)
2896 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2897 except (ValueError, TypeError):
2898 _ErrorIf(True, constants.CV_ENODERPC, node,
2899 "node returned invalid LVM info, check LVM status")
2901 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2902 """Gets per-disk status information for all instances.
2904 @type nodelist: list of strings
2905 @param nodelist: Node names
2906 @type node_image: dict of (name, L{objects.Node})
2907 @param node_image: Node objects
2908 @type instanceinfo: dict of (name, L{objects.Instance})
2909 @param instanceinfo: Instance objects
2910 @rtype: {instance: {node: [(succes, payload)]}}
2911 @return: a dictionary of per-instance dictionaries with nodes as
2912 keys and disk information as values; the disk information is a
2913 list of tuples (success, payload)
2916 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2919 node_disks_devonly = {}
2920 diskless_instances = set()
2921 diskless = constants.DT_DISKLESS
2923 for nname in nodelist:
2924 node_instances = list(itertools.chain(node_image[nname].pinst,
2925 node_image[nname].sinst))
2926 diskless_instances.update(inst for inst in node_instances
2927 if instanceinfo[inst].disk_template == diskless)
2928 disks = [(inst, disk)
2929 for inst in node_instances
2930 for disk in instanceinfo[inst].disks]
2933 # No need to collect data
2936 node_disks[nname] = disks
2938 # _AnnotateDiskParams makes already copies of the disks
2940 for (inst, dev) in disks:
2941 (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
2942 self.cfg.SetDiskID(anno_disk, nname)
2943 devonly.append(anno_disk)
2945 node_disks_devonly[nname] = devonly
2947 assert len(node_disks) == len(node_disks_devonly)
2949 # Collect data from all nodes with disks
2950 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2953 assert len(result) == len(node_disks)
2957 for (nname, nres) in result.items():
2958 disks = node_disks[nname]
2961 # No data from this node
2962 data = len(disks) * [(False, "node offline")]
2965 _ErrorIf(msg, constants.CV_ENODERPC, nname,
2966 "while getting disk information: %s", msg)
2968 # No data from this node
2969 data = len(disks) * [(False, msg)]
2972 for idx, i in enumerate(nres.payload):
2973 if isinstance(i, (tuple, list)) and len(i) == 2:
2976 logging.warning("Invalid result from node %s, entry %d: %s",
2978 data.append((False, "Invalid result from the remote node"))
2980 for ((inst, _), status) in zip(disks, data):
2981 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2983 # Add empty entries for diskless instances.
2984 for inst in diskless_instances:
2985 assert inst not in instdisk
2988 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2989 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2990 compat.all(isinstance(s, (tuple, list)) and
2991 len(s) == 2 for s in statuses)
2992 for inst, nnames in instdisk.items()
2993 for nname, statuses in nnames.items())
2994 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2999 def _SshNodeSelector(group_uuid, all_nodes):
3000 """Create endless iterators for all potential SSH check hosts.
3003 nodes = [node for node in all_nodes
3004 if (node.group != group_uuid and
3006 keyfunc = operator.attrgetter("group")
3008 return map(itertools.cycle,
3009 [sorted(map(operator.attrgetter("name"), names))
3010 for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
3014 def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
3015 """Choose which nodes should talk to which other nodes.
3017 We will make nodes contact all nodes in their group, and one node from
3020 @warning: This algorithm has a known issue if one node group is much
3021 smaller than others (e.g. just one node). In such a case all other
3022 nodes will talk to the single node.
3025 online_nodes = sorted(node.name for node in group_nodes if not node.offline)
3026 sel = cls._SshNodeSelector(group_uuid, all_nodes)
3028 return (online_nodes,
3029 dict((name, sorted([i.next() for i in sel]))
3030 for name in online_nodes))
3032 def BuildHooksEnv(self):
3035 Cluster-Verify hooks just ran in the post phase and their failure makes
3036 the output be logged in the verify output and the verification to fail.
3040 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
3043 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
3044 for node in self.my_node_info.values())
3048 def BuildHooksNodes(self):
3049 """Build hooks nodes.
3052 return ([], self.my_node_names)
3054 def Exec(self, feedback_fn):
3055 """Verify integrity of the node group, performing various test on nodes.
3058 # This method has too many local variables. pylint: disable=R0914
3059 feedback_fn("* Verifying group '%s'" % self.group_info.name)
3061 if not self.my_node_names:
3063 feedback_fn("* Empty node group, skipping verification")
3067 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3068 verbose = self.op.verbose
3069 self._feedback_fn = feedback_fn
3071 vg_name = self.cfg.GetVGName()
3072 drbd_helper = self.cfg.GetDRBDHelper()
3073 cluster = self.cfg.GetClusterInfo()
3074 groupinfo = self.cfg.GetAllNodeGroupsInfo()
3075 hypervisors = cluster.enabled_hypervisors
3076 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
3078 i_non_redundant = [] # Non redundant instances
3079 i_non_a_balanced = [] # Non auto-balanced instances
3080 i_offline = 0 # Count of offline instances
3081 n_offline = 0 # Count of offline nodes
3082 n_drained = 0 # Count of nodes being drained
3083 node_vol_should = {}
3085 # FIXME: verify OS list
3088 filemap = _ComputeAncillaryFiles(cluster, False)
3090 # do local checksums
3091 master_node = self.master_node = self.cfg.GetMasterNode()
3092 master_ip = self.cfg.GetMasterIP()
3094 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
3097 if self.cfg.GetUseExternalMipScript():
3098 user_scripts.append(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
3100 node_verify_param = {
3101 constants.NV_FILELIST:
3102 utils.UniqueSequence(filename
3103 for files in filemap
3104 for filename in files),
3105 constants.NV_NODELIST:
3106 self._SelectSshCheckNodes(node_data_list, self.group_uuid,
3107 self.all_node_info.values()),
3108 constants.NV_HYPERVISOR: hypervisors,
3109 constants.NV_HVPARAMS:
3110 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
3111 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
3112 for node in node_data_list
3113 if not node.offline],
3114 constants.NV_INSTANCELIST: hypervisors,
3115 constants.NV_VERSION: None,
3116 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
3117 constants.NV_NODESETUP: None,
3118 constants.NV_TIME: None,
3119 constants.NV_MASTERIP: (master_node, master_ip),
3120 constants.NV_OSLIST: None,
3121 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
3122 constants.NV_USERSCRIPTS: user_scripts,
3125 if vg_name is not None:
3126 node_verify_param[constants.NV_VGLIST] = None
3127 node_verify_param[constants.NV_LVLIST] = vg_name
3128 node_verify_param[constants.NV_PVLIST] = [vg_name]
3131 node_verify_param[constants.NV_DRBDLIST] = None
3132 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
3135 # FIXME: this needs to be changed per node-group, not cluster-wide
3137 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
3138 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3139 bridges.add(default_nicpp[constants.NIC_LINK])
3140 for instance in self.my_inst_info.values():
3141 for nic in instance.nics:
3142 full_nic = cluster.SimpleFillNIC(nic.nicparams)
3143 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3144 bridges.add(full_nic[constants.NIC_LINK])
3147 node_verify_param[constants.NV_BRIDGES] = list(bridges)
3149 # Build our expected cluster state
3150 node_image = dict((node.name, self.NodeImage(offline=node.offline,
3152 vm_capable=node.vm_capable))
3153 for node in node_data_list)
3157 for node in self.all_node_info.values():
3158 path = _SupportsOob(self.cfg, node)
3159 if path and path not in oob_paths:
3160 oob_paths.append(path)
3163 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
3165 for instance in self.my_inst_names:
3166 inst_config = self.my_inst_info[instance]
3167 if inst_config.admin_state == constants.ADMINST_OFFLINE:
3170 for nname in inst_config.all_nodes:
3171 if nname not in node_image:
3172 gnode = self.NodeImage(name=nname)
3173 gnode.ghost = (nname not in self.all_node_info)
3174 node_image[nname] = gnode
3176 inst_config.MapLVsByNode(node_vol_should)
3178 pnode = inst_config.primary_node
3179 node_image[pnode].pinst.append(instance)
3181 for snode in inst_config.secondary_nodes:
3182 nimg = node_image[snode]
3183 nimg.sinst.append(instance)
3184 if pnode not in nimg.sbp:
3185 nimg.sbp[pnode] = []
3186 nimg.sbp[pnode].append(instance)
3188 # At this point, we have the in-memory data structures complete,
3189 # except for the runtime information, which we'll gather next
3191 # Due to the way our RPC system works, exact response times cannot be
3192 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
3193 # time before and after executing the request, we can at least have a time
3195 nvinfo_starttime = time.time()
3196 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
3198 self.cfg.GetClusterName())
3199 nvinfo_endtime = time.time()
3201 if self.extra_lv_nodes and vg_name is not None:
3203 self.rpc.call_node_verify(self.extra_lv_nodes,
3204 {constants.NV_LVLIST: vg_name},
3205 self.cfg.GetClusterName())
3207 extra_lv_nvinfo = {}
3209 all_drbd_map = self.cfg.ComputeDRBDMap()
3211 feedback_fn("* Gathering disk information (%s nodes)" %
3212 len(self.my_node_names))
3213 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
3216 feedback_fn("* Verifying configuration file consistency")
3218 # If not all nodes are being checked, we need to make sure the master node
3219 # and a non-checked vm_capable node are in the list.
3220 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
3222 vf_nvinfo = all_nvinfo.copy()
3223 vf_node_info = list(self.my_node_info.values())
3224 additional_nodes = []
3225 if master_node not in self.my_node_info:
3226 additional_nodes.append(master_node)
3227 vf_node_info.append(self.all_node_info[master_node])
3228 # Add the first vm_capable node we find which is not included,
3229 # excluding the master node (which we already have)
3230 for node in absent_nodes:
3231 nodeinfo = self.all_node_info[node]
3232 if (nodeinfo.vm_capable and not nodeinfo.offline and
3233 node != master_node):
3234 additional_nodes.append(node)
3235 vf_node_info.append(self.all_node_info[node])
3237 key = constants.NV_FILELIST
3238 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
3239 {key: node_verify_param[key]},
3240 self.cfg.GetClusterName()))
3242 vf_nvinfo = all_nvinfo
3243 vf_node_info = self.my_node_info.values()
3245 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
3247 feedback_fn("* Verifying node status")
3251 for node_i in node_data_list:
3253 nimg = node_image[node]
3257 feedback_fn("* Skipping offline node %s" % (node,))
3261 if node == master_node:
3263 elif node_i.master_candidate:
3264 ntype = "master candidate"
3265 elif node_i.drained:
3271 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
3273 msg = all_nvinfo[node].fail_msg
3274 _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
3277 nimg.rpc_fail = True
3280 nresult = all_nvinfo[node].payload
3282 nimg.call_ok = self._VerifyNode(node_i, nresult)
3283 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
3284 self._VerifyNodeNetwork(node_i, nresult)
3285 self._VerifyNodeUserScripts(node_i, nresult)
3286 self._VerifyOob(node_i, nresult)
3289 self._VerifyNodeLVM(node_i, nresult, vg_name)
3290 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
3293 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
3294 self._UpdateNodeInstances(node_i, nresult, nimg)
3295 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
3296 self._UpdateNodeOS(node_i, nresult, nimg)
3298 if not nimg.os_fail:
3299 if refos_img is None:
3301 self._VerifyNodeOS(node_i, nimg, refos_img)
3302 self._VerifyNodeBridges(node_i, nresult, bridges)
3304 # Check whether all running instancies are primary for the node. (This
3305 # can no longer be done from _VerifyInstance below, since some of the
3306 # wrong instances could be from other node groups.)
3307 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
3309 for inst in non_primary_inst:
3310 test = inst in self.all_inst_info
3311 _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
3312 "instance should not run on node %s", node_i.name)
3313 _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
3314 "node is running unknown instance %s", inst)
3316 for node, result in extra_lv_nvinfo.items():
3317 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
3318 node_image[node], vg_name)
3320 feedback_fn("* Verifying instance status")
3321 for instance in self.my_inst_names:
3323 feedback_fn("* Verifying instance %s" % instance)
3324 inst_config = self.my_inst_info[instance]
3325 self._VerifyInstance(instance, inst_config, node_image,
3327 inst_nodes_offline = []
3329 pnode = inst_config.primary_node
3330 pnode_img = node_image[pnode]
3331 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
3332 constants.CV_ENODERPC, pnode, "instance %s, connection to"
3333 " primary node failed", instance)
3335 _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
3337 constants.CV_EINSTANCEBADNODE, instance,
3338 "instance is marked as running and lives on offline node %s",
3339 inst_config.primary_node)
3341 # If the instance is non-redundant we cannot survive losing its primary
3342 # node, so we are not N+1 compliant.
3343 if inst_config.disk_template not in constants.DTS_MIRRORED:
3344 i_non_redundant.append(instance)
3346 _ErrorIf(len(inst_config.secondary_nodes) > 1,
3347 constants.CV_EINSTANCELAYOUT,
3348 instance, "instance has multiple secondary nodes: %s",
3349 utils.CommaJoin(inst_config.secondary_nodes),
3350 code=self.ETYPE_WARNING)
3352 if inst_config.disk_template in constants.DTS_INT_MIRROR:
3353 pnode = inst_config.primary_node
3354 instance_nodes = utils.NiceSort(inst_config.all_nodes)
3355 instance_groups = {}
3357 for node in instance_nodes:
3358 instance_groups.setdefault(self.all_node_info[node].group,
3362 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
3363 # Sort so that we always list the primary node first.
3364 for group, nodes in sorted(instance_groups.items(),
3365 key=lambda (_, nodes): pnode in nodes,
3368 self._ErrorIf(len(instance_groups) > 1,
3369 constants.CV_EINSTANCESPLITGROUPS,
3370 instance, "instance has primary and secondary nodes in"
3371 " different groups: %s", utils.CommaJoin(pretty_list),
3372 code=self.ETYPE_WARNING)
3374 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
3375 i_non_a_balanced.append(instance)
3377 for snode in inst_config.secondary_nodes:
3378 s_img = node_image[snode]
3379 _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
3380 snode, "instance %s, connection to secondary node failed",
3384 inst_nodes_offline.append(snode)
3386 # warn that the instance lives on offline nodes
3387 _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
3388 "instance has offline secondary node(s) %s",
3389 utils.CommaJoin(inst_nodes_offline))
3390 # ... or ghost/non-vm_capable nodes
3391 for node in inst_config.all_nodes:
3392 _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
3393 instance, "instance lives on ghost node %s", node)
3394 _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
3395 instance, "instance lives on non-vm_capable node %s", node)
3397 feedback_fn("* Verifying orphan volumes")
3398 reserved = utils.FieldSet(*cluster.reserved_lvs)
3400 # We will get spurious "unknown volume" warnings if any node of this group
3401 # is secondary for an instance whose primary is in another group. To avoid
3402 # them, we find these instances and add their volumes to node_vol_should.
3403 for inst in self.all_inst_info.values():
3404 for secondary in inst.secondary_nodes:
3405 if (secondary in self.my_node_info
3406 and inst.name not in self.my_inst_info):
3407 inst.MapLVsByNode(node_vol_should)
3410 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
3412 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
3413 feedback_fn("* Verifying N+1 Memory redundancy")
3414 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
3416 feedback_fn("* Other Notes")
3418 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
3419 % len(i_non_redundant))
3421 if i_non_a_balanced:
3422 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
3423 % len(i_non_a_balanced))
3426 feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
3429 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
3432 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
3436 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
3437 """Analyze the post-hooks' result
3439 This method analyses the hook result, handles it, and sends some
3440 nicely-formatted feedback back to the user.
3442 @param phase: one of L{constants.HOOKS_PHASE_POST} or
3443 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
3444 @param hooks_results: the results of the multi-node hooks rpc call
3445 @param feedback_fn: function used send feedback back to the caller
3446 @param lu_result: previous Exec result
3447 @return: the new Exec result, based on the previous result
3451 # We only really run POST phase hooks, only for non-empty groups,
3452 # and are only interested in their results
3453 if not self.my_node_names:
3456 elif phase == constants.HOOKS_PHASE_POST:
3457 # Used to change hooks' output to proper indentation
3458 feedback_fn("* Hooks Results")
3459 assert hooks_results, "invalid result from hooks"
3461 for node_name in hooks_results:
3462 res = hooks_results[node_name]
3464 test = msg and not res.offline
3465 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3466 "Communication failure in hooks execution: %s", msg)
3467 if res.offline or msg:
3468 # No need to investigate payload if node is offline or gave
3471 for script, hkr, output in res.payload:
3472 test = hkr == constants.HKR_FAIL
3473 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3474 "Script %s failed, output:", script)
3476 output = self._HOOKS_INDENT_RE.sub(" ", output)
3477 feedback_fn("%s" % output)
3483 class LUClusterVerifyDisks(NoHooksLU):
3484 """Verifies the cluster disks status.
3489 def ExpandNames(self):
3490 self.share_locks = _ShareAll()
3491 self.needed_locks = {
3492 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3495 def Exec(self, feedback_fn):
3496 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3498 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3499 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3500 for group in group_names])
3503 class LUGroupVerifyDisks(NoHooksLU):
3504 """Verifies the status of all disks in a node group.
3509 def ExpandNames(self):
3510 # Raises errors.OpPrereqError on its own if group can't be found
3511 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3513 self.share_locks = _ShareAll()
3514 self.needed_locks = {
3515 locking.LEVEL_INSTANCE: [],
3516 locking.LEVEL_NODEGROUP: [],
3517 locking.LEVEL_NODE: [],
3520 def DeclareLocks(self, level):
3521 if level == locking.LEVEL_INSTANCE:
3522 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3524 # Lock instances optimistically, needs verification once node and group
3525 # locks have been acquired
3526 self.needed_locks[locking.LEVEL_INSTANCE] = \
3527 self.cfg.GetNodeGroupInstances(self.group_uuid)
3529 elif level == locking.LEVEL_NODEGROUP:
3530 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3532 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3533 set([self.group_uuid] +
3534 # Lock all groups used by instances optimistically; this requires
3535 # going via the node before it's locked, requiring verification
3538 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3539 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3541 elif level == locking.LEVEL_NODE:
3542 # This will only lock the nodes in the group to be verified which contain
3544 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3545 self._LockInstancesNodes()
3547 # Lock all nodes in group to be verified
3548 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3549 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3550 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3552 def CheckPrereq(self):
3553 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3554 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3555 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3557 assert self.group_uuid in owned_groups
3559 # Check if locked instances are still correct
3560 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3562 # Get instance information
3563 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3565 # Check if node groups for locked instances are still correct
3566 _CheckInstancesNodeGroups(self.cfg, self.instances,
3567 owned_groups, owned_nodes, self.group_uuid)
3569 def Exec(self, feedback_fn):
3570 """Verify integrity of cluster disks.
3572 @rtype: tuple of three items
3573 @return: a tuple of (dict of node-to-node_error, list of instances
3574 which need activate-disks, dict of instance: (node, volume) for
3579 res_instances = set()
3582 nv_dict = _MapInstanceDisksToNodes([inst
3583 for inst in self.instances.values()
3584 if inst.admin_state == constants.ADMINST_UP])
3587 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3588 set(self.cfg.GetVmCapableNodeList()))
3590 node_lvs = self.rpc.call_lv_list(nodes, [])
3592 for (node, node_res) in node_lvs.items():
3593 if node_res.offline:
3596 msg = node_res.fail_msg
3598 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3599 res_nodes[node] = msg
3602 for lv_name, (_, _, lv_online) in node_res.payload.items():
3603 inst = nv_dict.pop((node, lv_name), None)
3604 if not (lv_online or inst is None):
3605 res_instances.add(inst)
3607 # any leftover items in nv_dict are missing LVs, let's arrange the data
3609 for key, inst in nv_dict.iteritems():
3610 res_missing.setdefault(inst, []).append(list(key))
3612 return (res_nodes, list(res_instances), res_missing)
3615 class LUClusterRepairDiskSizes(NoHooksLU):
3616 """Verifies the cluster disks sizes.
3621 def ExpandNames(self):
3622 if self.op.instances:
3623 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3624 self.needed_locks = {
3625 locking.LEVEL_NODE_RES: [],
3626 locking.LEVEL_INSTANCE: self.wanted_names,
3628 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
3630 self.wanted_names = None
3631 self.needed_locks = {
3632 locking.LEVEL_NODE_RES: locking.ALL_SET,
3633 locking.LEVEL_INSTANCE: locking.ALL_SET,
3635 self.share_locks = {
3636 locking.LEVEL_NODE_RES: 1,
3637 locking.LEVEL_INSTANCE: 0,
3640 def DeclareLocks(self, level):
3641 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
3642 self._LockInstancesNodes(primary_only=True, level=level)
3644 def CheckPrereq(self):
3645 """Check prerequisites.
3647 This only checks the optional instance list against the existing names.
3650 if self.wanted_names is None:
3651 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3653 self.wanted_instances = \
3654 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3656 def _EnsureChildSizes(self, disk):
3657 """Ensure children of the disk have the needed disk size.
3659 This is valid mainly for DRBD8 and fixes an issue where the
3660 children have smaller disk size.
3662 @param disk: an L{ganeti.objects.Disk} object
3665 if disk.dev_type == constants.LD_DRBD8:
3666 assert disk.children, "Empty children for DRBD8?"
3667 fchild = disk.children[0]
3668 mismatch = fchild.size < disk.size
3670 self.LogInfo("Child disk has size %d, parent %d, fixing",
3671 fchild.size, disk.size)
3672 fchild.size = disk.size
3674 # and we recurse on this child only, not on the metadev
3675 return self._EnsureChildSizes(fchild) or mismatch
3679 def Exec(self, feedback_fn):
3680 """Verify the size of cluster disks.
3683 # TODO: check child disks too
3684 # TODO: check differences in size between primary/secondary nodes
3686 for instance in self.wanted_instances:
3687 pnode = instance.primary_node
3688 if pnode not in per_node_disks:
3689 per_node_disks[pnode] = []
3690 for idx, disk in enumerate(instance.disks):
3691 per_node_disks[pnode].append((instance, idx, disk))
3693 assert not (frozenset(per_node_disks.keys()) -
3694 self.owned_locks(locking.LEVEL_NODE_RES)), \
3695 "Not owning correct locks"
3696 assert not self.owned_locks(locking.LEVEL_NODE)
3699 for node, dskl in per_node_disks.items():
3700 newl = [v[2].Copy() for v in dskl]
3702 self.cfg.SetDiskID(dsk, node)
3703 result = self.rpc.call_blockdev_getsize(node, newl)
3705 self.LogWarning("Failure in blockdev_getsize call to node"
3706 " %s, ignoring", node)
3708 if len(result.payload) != len(dskl):
3709 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3710 " result.payload=%s", node, len(dskl), result.payload)
3711 self.LogWarning("Invalid result from node %s, ignoring node results",
3714 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3716 self.LogWarning("Disk %d of instance %s did not return size"
3717 " information, ignoring", idx, instance.name)
3719 if not isinstance(size, (int, long)):
3720 self.LogWarning("Disk %d of instance %s did not return valid"
3721 " size information, ignoring", idx, instance.name)
3724 if size != disk.size:
3725 self.LogInfo("Disk %d of instance %s has mismatched size,"
3726 " correcting: recorded %d, actual %d", idx,
3727 instance.name, disk.size, size)
3729 self.cfg.Update(instance, feedback_fn)
3730 changed.append((instance.name, idx, size))
3731 if self._EnsureChildSizes(disk):
3732 self.cfg.Update(instance, feedback_fn)
3733 changed.append((instance.name, idx, disk.size))
3737 class LUClusterRename(LogicalUnit):
3738 """Rename the cluster.
3741 HPATH = "cluster-rename"
3742 HTYPE = constants.HTYPE_CLUSTER
3744 def BuildHooksEnv(self):
3749 "OP_TARGET": self.cfg.GetClusterName(),
3750 "NEW_NAME": self.op.name,
3753 def BuildHooksNodes(self):
3754 """Build hooks nodes.
3757 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3759 def CheckPrereq(self):
3760 """Verify that the passed name is a valid one.
3763 hostname = netutils.GetHostname(name=self.op.name,
3764 family=self.cfg.GetPrimaryIPFamily())
3766 new_name = hostname.name
3767 self.ip = new_ip = hostname.ip
3768 old_name = self.cfg.GetClusterName()
3769 old_ip = self.cfg.GetMasterIP()
3770 if new_name == old_name and new_ip == old_ip:
3771 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3772 " cluster has changed",
3774 if new_ip != old_ip:
3775 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3776 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3777 " reachable on the network" %
3778 new_ip, errors.ECODE_NOTUNIQUE)
3780 self.op.name = new_name
3782 def Exec(self, feedback_fn):
3783 """Rename the cluster.
3786 clustername = self.op.name
3789 # shutdown the master IP
3790 master_params = self.cfg.GetMasterNetworkParameters()
3791 ems = self.cfg.GetUseExternalMipScript()
3792 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
3794 result.Raise("Could not disable the master role")
3797 cluster = self.cfg.GetClusterInfo()
3798 cluster.cluster_name = clustername
3799 cluster.master_ip = new_ip
3800 self.cfg.Update(cluster, feedback_fn)
3802 # update the known hosts file
3803 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3804 node_list = self.cfg.GetOnlineNodeList()
3806 node_list.remove(master_params.name)
3809 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3811 master_params.ip = new_ip
3812 result = self.rpc.call_node_activate_master_ip(master_params.name,
3814 msg = result.fail_msg
3816 self.LogWarning("Could not re-enable the master role on"
3817 " the master, please restart manually: %s", msg)
3822 def _ValidateNetmask(cfg, netmask):
3823 """Checks if a netmask is valid.
3825 @type cfg: L{config.ConfigWriter}
3826 @param cfg: The cluster configuration
3828 @param netmask: the netmask to be verified
3829 @raise errors.OpPrereqError: if the validation fails
3832 ip_family = cfg.GetPrimaryIPFamily()
3834 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
3835 except errors.ProgrammerError:
3836 raise errors.OpPrereqError("Invalid primary ip family: %s." %
3838 if not ipcls.ValidateNetmask(netmask):
3839 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
3843 class LUClusterSetParams(LogicalUnit):
3844 """Change the parameters of the cluster.
3847 HPATH = "cluster-modify"
3848 HTYPE = constants.HTYPE_CLUSTER
3851 def CheckArguments(self):
3855 if self.op.uid_pool:
3856 uidpool.CheckUidPool(self.op.uid_pool)
3858 if self.op.add_uids:
3859 uidpool.CheckUidPool(self.op.add_uids)
3861 if self.op.remove_uids:
3862 uidpool.CheckUidPool(self.op.remove_uids)
3864 if self.op.master_netmask is not None:
3865 _ValidateNetmask(self.cfg, self.op.master_netmask)
3867 if self.op.diskparams:
3868 for dt_params in self.op.diskparams.values():
3869 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
3871 utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
3872 except errors.OpPrereqError, err:
3873 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
3876 def ExpandNames(self):
3877 # FIXME: in the future maybe other cluster params won't require checking on
3878 # all nodes to be modified.
3879 self.needed_locks = {
3880 locking.LEVEL_NODE: locking.ALL_SET,
3881 locking.LEVEL_INSTANCE: locking.ALL_SET,
3882 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3884 self.share_locks = {
3885 locking.LEVEL_NODE: 1,
3886 locking.LEVEL_INSTANCE: 1,
3887 locking.LEVEL_NODEGROUP: 1,
3890 def BuildHooksEnv(self):
3895 "OP_TARGET": self.cfg.GetClusterName(),
3896 "NEW_VG_NAME": self.op.vg_name,
3899 def BuildHooksNodes(self):
3900 """Build hooks nodes.
3903 mn = self.cfg.GetMasterNode()
3906 def CheckPrereq(self):
3907 """Check prerequisites.
3909 This checks whether the given params don't conflict and
3910 if the given volume group is valid.
3913 if self.op.vg_name is not None and not self.op.vg_name:
3914 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3915 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3916 " instances exist", errors.ECODE_INVAL)
3918 if self.op.drbd_helper is not None and not self.op.drbd_helper:
3919 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3920 raise errors.OpPrereqError("Cannot disable drbd helper while"
3921 " drbd-based instances exist",
3924 node_list = self.owned_locks(locking.LEVEL_NODE)
3926 # if vg_name not None, checks given volume group on all nodes
3928 vglist = self.rpc.call_vg_list(node_list)
3929 for node in node_list:
3930 msg = vglist[node].fail_msg
3932 # ignoring down node
3933 self.LogWarning("Error while gathering data on node %s"
3934 " (ignoring node): %s", node, msg)
3936 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3938 constants.MIN_VG_SIZE)
3940 raise errors.OpPrereqError("Error on node '%s': %s" %
3941 (node, vgstatus), errors.ECODE_ENVIRON)
3943 if self.op.drbd_helper:
3944 # checks given drbd helper on all nodes
3945 helpers = self.rpc.call_drbd_helper(node_list)
3946 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3948 self.LogInfo("Not checking drbd helper on offline node %s", node)
3950 msg = helpers[node].fail_msg
3952 raise errors.OpPrereqError("Error checking drbd helper on node"
3953 " '%s': %s" % (node, msg),
3954 errors.ECODE_ENVIRON)
3955 node_helper = helpers[node].payload
3956 if node_helper != self.op.drbd_helper:
3957 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3958 (node, node_helper), errors.ECODE_ENVIRON)
3960 self.cluster = cluster = self.cfg.GetClusterInfo()
3961 # validate params changes
3962 if self.op.beparams:
3963 objects.UpgradeBeParams(self.op.beparams)
3964 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3965 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3967 if self.op.ndparams:
3968 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3969 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3971 # TODO: we need a more general way to handle resetting
3972 # cluster-level parameters to default values
3973 if self.new_ndparams["oob_program"] == "":
3974 self.new_ndparams["oob_program"] = \
3975 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3977 if self.op.hv_state:
3978 new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
3979 self.cluster.hv_state_static)
3980 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
3981 for hv, values in new_hv_state.items())
3983 if self.op.disk_state:
3984 new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
3985 self.cluster.disk_state_static)
3986 self.new_disk_state = \
3987 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
3988 for name, values in svalues.items()))
3989 for storage, svalues in new_disk_state.items())
3992 self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
3995 all_instances = self.cfg.GetAllInstancesInfo().values()
3997 for group in self.cfg.GetAllNodeGroupsInfo().values():
3998 instances = frozenset([inst for inst in all_instances
3999 if compat.any(node in group.members
4000 for node in inst.all_nodes)])
4001 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
4002 new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
4004 new_ipolicy, instances)
4006 violations.update(new)
4009 self.LogWarning("After the ipolicy change the following instances"
4010 " violate them: %s",
4011 utils.CommaJoin(utils.NiceSort(violations)))
4013 if self.op.nicparams:
4014 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
4015 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
4016 objects.NIC.CheckParameterSyntax(self.new_nicparams)
4019 # check all instances for consistency
4020 for instance in self.cfg.GetAllInstancesInfo().values():
4021 for nic_idx, nic in enumerate(instance.nics):
4022 params_copy = copy.deepcopy(nic.nicparams)
4023 params_filled = objects.FillDict(self.new_nicparams, params_copy)
4025 # check parameter syntax
4027 objects.NIC.CheckParameterSyntax(params_filled)
4028 except errors.ConfigurationError, err:
4029 nic_errors.append("Instance %s, nic/%d: %s" %
4030 (instance.name, nic_idx, err))
4032 # if we're moving instances to routed, check that they have an ip
4033 target_mode = params_filled[constants.NIC_MODE]
4034 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
4035 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
4036 " address" % (instance.name, nic_idx))
4038 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
4039 "\n".join(nic_errors))
4041 # hypervisor list/parameters
4042 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
4043 if self.op.hvparams:
4044 for hv_name, hv_dict in self.op.hvparams.items():
4045 if hv_name not in self.new_hvparams:
4046 self.new_hvparams[hv_name] = hv_dict
4048 self.new_hvparams[hv_name].update(hv_dict)
4050 # disk template parameters
4051 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
4052 if self.op.diskparams:
4053 for dt_name, dt_params in self.op.diskparams.items():
4054 if dt_name not in self.op.diskparams:
4055 self.new_diskparams[dt_name] = dt_params
4057 self.new_diskparams[dt_name].update(dt_params)
4059 # os hypervisor parameters
4060 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
4062 for os_name, hvs in self.op.os_hvp.items():
4063 if os_name not in self.new_os_hvp:
4064 self.new_os_hvp[os_name] = hvs
4066 for hv_name, hv_dict in hvs.items():
4067 if hv_name not in self.new_os_hvp[os_name]:
4068 self.new_os_hvp[os_name][hv_name] = hv_dict
4070 self.new_os_hvp[os_name][hv_name].update(hv_dict)
4073 self.new_osp = objects.FillDict(cluster.osparams, {})
4074 if self.op.osparams:
4075 for os_name, osp in self.op.osparams.items():
4076 if os_name not in self.new_osp:
4077 self.new_osp[os_name] = {}
4079 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
4082 if not self.new_osp[os_name]:
4083 # we removed all parameters
4084 del self.new_osp[os_name]
4086 # check the parameter validity (remote check)
4087 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
4088 os_name, self.new_osp[os_name])
4090 # changes to the hypervisor list
4091 if self.op.enabled_hypervisors is not None:
4092 self.hv_list = self.op.enabled_hypervisors
4093 for hv in self.hv_list:
4094 # if the hypervisor doesn't already exist in the cluster
4095 # hvparams, we initialize it to empty, and then (in both
4096 # cases) we make sure to fill the defaults, as we might not
4097 # have a complete defaults list if the hypervisor wasn't
4099 if hv not in new_hvp:
4101 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
4102 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
4104 self.hv_list = cluster.enabled_hypervisors
4106 if self.op.hvparams or self.op.enabled_hypervisors is not None:
4107 # either the enabled list has changed, or the parameters have, validate
4108 for hv_name, hv_params in self.new_hvparams.items():
4109 if ((self.op.hvparams and hv_name in self.op.hvparams) or
4110 (self.op.enabled_hypervisors and
4111 hv_name in self.op.enabled_hypervisors)):
4112 # either this is a new hypervisor, or its parameters have changed
4113 hv_class = hypervisor.GetHypervisor(hv_name)
4114 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4115 hv_class.CheckParameterSyntax(hv_params)
4116 _CheckHVParams(self, node_list, hv_name, hv_params)
4119 # no need to check any newly-enabled hypervisors, since the
4120 # defaults have already been checked in the above code-block
4121 for os_name, os_hvp in self.new_os_hvp.items():
4122 for hv_name, hv_params in os_hvp.items():
4123 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4124 # we need to fill in the new os_hvp on top of the actual hv_p
4125 cluster_defaults = self.new_hvparams.get(hv_name, {})
4126 new_osp = objects.FillDict(cluster_defaults, hv_params)
4127 hv_class = hypervisor.GetHypervisor(hv_name)
4128 hv_class.CheckParameterSyntax(new_osp)
4129 _CheckHVParams(self, node_list, hv_name, new_osp)
4131 if self.op.default_iallocator:
4132 alloc_script = utils.FindFile(self.op.default_iallocator,
4133 constants.IALLOCATOR_SEARCH_PATH,
4135 if alloc_script is None:
4136 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
4137 " specified" % self.op.default_iallocator,
4140 def Exec(self, feedback_fn):
4141 """Change the parameters of the cluster.
4144 if self.op.vg_name is not None:
4145 new_volume = self.op.vg_name
4148 if new_volume != self.cfg.GetVGName():
4149 self.cfg.SetVGName(new_volume)
4151 feedback_fn("Cluster LVM configuration already in desired"
4152 " state, not changing")
4153 if self.op.drbd_helper is not None:
4154 new_helper = self.op.drbd_helper
4157 if new_helper != self.cfg.GetDRBDHelper():
4158 self.cfg.SetDRBDHelper(new_helper)
4160 feedback_fn("Cluster DRBD helper already in desired state,"
4162 if self.op.hvparams:
4163 self.cluster.hvparams = self.new_hvparams
4165 self.cluster.os_hvp = self.new_os_hvp
4166 if self.op.enabled_hypervisors is not None:
4167 self.cluster.hvparams = self.new_hvparams
4168 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
4169 if self.op.beparams:
4170 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
4171 if self.op.nicparams:
4172 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
4174 self.cluster.ipolicy = self.new_ipolicy
4175 if self.op.osparams:
4176 self.cluster.osparams = self.new_osp
4177 if self.op.ndparams:
4178 self.cluster.ndparams = self.new_ndparams
4179 if self.op.diskparams:
4180 self.cluster.diskparams = self.new_diskparams
4181 if self.op.hv_state:
4182 self.cluster.hv_state_static = self.new_hv_state
4183 if self.op.disk_state:
4184 self.cluster.disk_state_static = self.new_disk_state
4186 if self.op.candidate_pool_size is not None:
4187 self.cluster.candidate_pool_size = self.op.candidate_pool_size
4188 # we need to update the pool size here, otherwise the save will fail
4189 _AdjustCandidatePool(self, [])
4191 if self.op.maintain_node_health is not None:
4192 if self.op.maintain_node_health and not constants.ENABLE_CONFD:
4193 feedback_fn("Note: CONFD was disabled at build time, node health"
4194 " maintenance is not useful (still enabling it)")
4195 self.cluster.maintain_node_health = self.op.maintain_node_health
4197 if self.op.prealloc_wipe_disks is not None:
4198 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
4200 if self.op.add_uids is not None:
4201 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
4203 if self.op.remove_uids is not None:
4204 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
4206 if self.op.uid_pool is not None:
4207 self.cluster.uid_pool = self.op.uid_pool
4209 if self.op.default_iallocator is not None:
4210 self.cluster.default_iallocator = self.op.default_iallocator
4212 if self.op.reserved_lvs is not None:
4213 self.cluster.reserved_lvs = self.op.reserved_lvs
4215 if self.op.use_external_mip_script is not None:
4216 self.cluster.use_external_mip_script = self.op.use_external_mip_script
4218 def helper_os(aname, mods, desc):
4220 lst = getattr(self.cluster, aname)
4221 for key, val in mods:
4222 if key == constants.DDM_ADD:
4224 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
4227 elif key == constants.DDM_REMOVE:
4231 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
4233 raise errors.ProgrammerError("Invalid modification '%s'" % key)
4235 if self.op.hidden_os:
4236 helper_os("hidden_os", self.op.hidden_os, "hidden")
4238 if self.op.blacklisted_os:
4239 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
4241 if self.op.master_netdev:
4242 master_params = self.cfg.GetMasterNetworkParameters()
4243 ems = self.cfg.GetUseExternalMipScript()
4244 feedback_fn("Shutting down master ip on the current netdev (%s)" %
4245 self.cluster.master_netdev)
4246 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4248 result.Raise("Could not disable the master ip")
4249 feedback_fn("Changing master_netdev from %s to %s" %
4250 (master_params.netdev, self.op.master_netdev))
4251 self.cluster.master_netdev = self.op.master_netdev
4253 if self.op.master_netmask:
4254 master_params = self.cfg.GetMasterNetworkParameters()
4255 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
4256 result = self.rpc.call_node_change_master_netmask(master_params.name,
4257 master_params.netmask,
4258 self.op.master_netmask,
4260 master_params.netdev)
4262 msg = "Could not change the master IP netmask: %s" % result.fail_msg
4265 self.cluster.master_netmask = self.op.master_netmask
4267 self.cfg.Update(self.cluster, feedback_fn)
4269 if self.op.master_netdev:
4270 master_params = self.cfg.GetMasterNetworkParameters()
4271 feedback_fn("Starting the master ip on the new master netdev (%s)" %
4272 self.op.master_netdev)
4273 ems = self.cfg.GetUseExternalMipScript()
4274 result = self.rpc.call_node_activate_master_ip(master_params.name,
4277 self.LogWarning("Could not re-enable the master ip on"
4278 " the master, please restart manually: %s",
4282 def _UploadHelper(lu, nodes, fname):
4283 """Helper for uploading a file and showing warnings.
4286 if os.path.exists(fname):
4287 result = lu.rpc.call_upload_file(nodes, fname)
4288 for to_node, to_result in result.items():
4289 msg = to_result.fail_msg
4291 msg = ("Copy of file %s to node %s failed: %s" %
4292 (fname, to_node, msg))
4293 lu.proc.LogWarning(msg)
4296 def _ComputeAncillaryFiles(cluster, redist):
4297 """Compute files external to Ganeti which need to be consistent.
4299 @type redist: boolean
4300 @param redist: Whether to include files which need to be redistributed
4303 # Compute files for all nodes
4305 constants.SSH_KNOWN_HOSTS_FILE,
4306 constants.CONFD_HMAC_KEY,
4307 constants.CLUSTER_DOMAIN_SECRET_FILE,
4308 constants.SPICE_CERT_FILE,
4309 constants.SPICE_CACERT_FILE,
4310 constants.RAPI_USERS_FILE,
4314 files_all.update(constants.ALL_CERT_FILES)
4315 files_all.update(ssconf.SimpleStore().GetFileList())
4317 # we need to ship at least the RAPI certificate
4318 files_all.add(constants.RAPI_CERT_FILE)
4320 if cluster.modify_etc_hosts:
4321 files_all.add(constants.ETC_HOSTS)
4323 if cluster.use_external_mip_script:
4324 files_all.add(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
4326 # Files which are optional, these must:
4327 # - be present in one other category as well
4328 # - either exist or not exist on all nodes of that category (mc, vm all)
4330 constants.RAPI_USERS_FILE,
4333 # Files which should only be on master candidates
4337 files_mc.add(constants.CLUSTER_CONF_FILE)
4339 # Files which should only be on VM-capable nodes
4340 files_vm = set(filename
4341 for hv_name in cluster.enabled_hypervisors
4342 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
4344 files_opt |= set(filename
4345 for hv_name in cluster.enabled_hypervisors
4346 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
4348 # Filenames in each category must be unique
4349 all_files_set = files_all | files_mc | files_vm
4350 assert (len(all_files_set) ==
4351 sum(map(len, [files_all, files_mc, files_vm]))), \
4352 "Found file listed in more than one file list"
4354 # Optional files must be present in one other category
4355 assert all_files_set.issuperset(files_opt), \
4356 "Optional file not in a different required list"
4358 return (files_all, files_opt, files_mc, files_vm)
4361 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
4362 """Distribute additional files which are part of the cluster configuration.
4364 ConfigWriter takes care of distributing the config and ssconf files, but
4365 there are more files which should be distributed to all nodes. This function
4366 makes sure those are copied.
4368 @param lu: calling logical unit
4369 @param additional_nodes: list of nodes not in the config to distribute to
4370 @type additional_vm: boolean
4371 @param additional_vm: whether the additional nodes are vm-capable or not
4374 # Gather target nodes
4375 cluster = lu.cfg.GetClusterInfo()
4376 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
4378 online_nodes = lu.cfg.GetOnlineNodeList()
4379 online_set = frozenset(online_nodes)
4380 vm_nodes = list(online_set.intersection(lu.cfg.GetVmCapableNodeList()))
4382 if additional_nodes is not None:
4383 online_nodes.extend(additional_nodes)
4385 vm_nodes.extend(additional_nodes)
4387 # Never distribute to master node
4388 for nodelist in [online_nodes, vm_nodes]:
4389 if master_info.name in nodelist:
4390 nodelist.remove(master_info.name)
4393 (files_all, _, files_mc, files_vm) = \
4394 _ComputeAncillaryFiles(cluster, True)
4396 # Never re-distribute configuration file from here
4397 assert not (constants.CLUSTER_CONF_FILE in files_all or
4398 constants.CLUSTER_CONF_FILE in files_vm)
4399 assert not files_mc, "Master candidates not handled in this function"
4402 (online_nodes, files_all),
4403 (vm_nodes, files_vm),
4407 for (node_list, files) in filemap:
4409 _UploadHelper(lu, node_list, fname)
4412 class LUClusterRedistConf(NoHooksLU):
4413 """Force the redistribution of cluster configuration.
4415 This is a very simple LU.
4420 def ExpandNames(self):
4421 self.needed_locks = {
4422 locking.LEVEL_NODE: locking.ALL_SET,
4424 self.share_locks[locking.LEVEL_NODE] = 1
4426 def Exec(self, feedback_fn):
4427 """Redistribute the configuration.
4430 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
4431 _RedistributeAncillaryFiles(self)
4434 class LUClusterActivateMasterIp(NoHooksLU):
4435 """Activate the master IP on the master node.
4438 def Exec(self, feedback_fn):
4439 """Activate the master IP.
4442 master_params = self.cfg.GetMasterNetworkParameters()
4443 ems = self.cfg.GetUseExternalMipScript()
4444 result = self.rpc.call_node_activate_master_ip(master_params.name,
4446 result.Raise("Could not activate the master IP")
4449 class LUClusterDeactivateMasterIp(NoHooksLU):
4450 """Deactivate the master IP on the master node.
4453 def Exec(self, feedback_fn):
4454 """Deactivate the master IP.
4457 master_params = self.cfg.GetMasterNetworkParameters()
4458 ems = self.cfg.GetUseExternalMipScript()
4459 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4461 result.Raise("Could not deactivate the master IP")
4464 def _WaitForSync(lu, instance, disks=None, oneshot=False):
4465 """Sleep and poll for an instance's disk to sync.
4468 if not instance.disks or disks is not None and not disks:
4471 disks = _ExpandCheckDisks(instance, disks)
4474 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
4476 node = instance.primary_node
4479 lu.cfg.SetDiskID(dev, node)
4481 # TODO: Convert to utils.Retry
4484 degr_retries = 10 # in seconds, as we sleep 1 second each time
4488 cumul_degraded = False
4489 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
4490 msg = rstats.fail_msg
4492 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
4495 raise errors.RemoteError("Can't contact node %s for mirror data,"
4496 " aborting." % node)
4499 rstats = rstats.payload
4501 for i, mstat in enumerate(rstats):
4503 lu.LogWarning("Can't compute data for node %s/%s",
4504 node, disks[i].iv_name)
4507 cumul_degraded = (cumul_degraded or
4508 (mstat.is_degraded and mstat.sync_percent is None))
4509 if mstat.sync_percent is not None:
4511 if mstat.estimated_time is not None:
4512 rem_time = ("%s remaining (estimated)" %
4513 utils.FormatSeconds(mstat.estimated_time))
4514 max_time = mstat.estimated_time
4516 rem_time = "no time estimate"
4517 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
4518 (disks[i].iv_name, mstat.sync_percent, rem_time))
4520 # if we're done but degraded, let's do a few small retries, to
4521 # make sure we see a stable and not transient situation; therefore
4522 # we force restart of the loop
4523 if (done or oneshot) and cumul_degraded and degr_retries > 0:
4524 logging.info("Degraded disks found, %d retries left", degr_retries)
4532 time.sleep(min(60, max_time))
4535 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
4536 return not cumul_degraded
4539 def _BlockdevFind(lu, node, dev, instance):
4540 """Wrapper around call_blockdev_find to annotate diskparams.
4542 @param lu: A reference to the lu object
4543 @param node: The node to call out
4544 @param dev: The device to find
4545 @param instance: The instance object the device belongs to
4546 @returns The result of the rpc call
4549 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4550 return lu.rpc.call_blockdev_find(node, disk)
4553 def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4554 """Wrapper around L{_CheckDiskConsistencyInner}.
4557 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4558 return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
4562 def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
4564 """Check that mirrors are not degraded.
4566 @attention: The device has to be annotated already.
4568 The ldisk parameter, if True, will change the test from the
4569 is_degraded attribute (which represents overall non-ok status for
4570 the device(s)) to the ldisk (representing the local storage status).
4573 lu.cfg.SetDiskID(dev, node)
4577 if on_primary or dev.AssembleOnSecondary():
4578 rstats = lu.rpc.call_blockdev_find(node, dev)
4579 msg = rstats.fail_msg
4581 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
4583 elif not rstats.payload:
4584 lu.LogWarning("Can't find disk on node %s", node)
4588 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
4590 result = result and not rstats.payload.is_degraded
4593 for child in dev.children:
4594 result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
4600 class LUOobCommand(NoHooksLU):
4601 """Logical unit for OOB handling.
4605 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
4607 def ExpandNames(self):
4608 """Gather locks we need.
4611 if self.op.node_names:
4612 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
4613 lock_names = self.op.node_names
4615 lock_names = locking.ALL_SET
4617 self.needed_locks = {
4618 locking.LEVEL_NODE: lock_names,
4621 def CheckPrereq(self):
4622 """Check prerequisites.
4625 - the node exists in the configuration
4628 Any errors are signaled by raising errors.OpPrereqError.
4632 self.master_node = self.cfg.GetMasterNode()
4634 assert self.op.power_delay >= 0.0
4636 if self.op.node_names:
4637 if (self.op.command in self._SKIP_MASTER and
4638 self.master_node in self.op.node_names):
4639 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
4640 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
4642 if master_oob_handler:
4643 additional_text = ("run '%s %s %s' if you want to operate on the"
4644 " master regardless") % (master_oob_handler,
4648 additional_text = "it does not support out-of-band operations"
4650 raise errors.OpPrereqError(("Operating on the master node %s is not"
4651 " allowed for %s; %s") %
4652 (self.master_node, self.op.command,
4653 additional_text), errors.ECODE_INVAL)
4655 self.op.node_names = self.cfg.GetNodeList()
4656 if self.op.command in self._SKIP_MASTER:
4657 self.op.node_names.remove(self.master_node)
4659 if self.op.command in self._SKIP_MASTER:
4660 assert self.master_node not in self.op.node_names
4662 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
4664 raise errors.OpPrereqError("Node %s not found" % node_name,
4667 self.nodes.append(node)
4669 if (not self.op.ignore_status and
4670 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
4671 raise errors.OpPrereqError(("Cannot power off node %s because it is"
4672 " not marked offline") % node_name,
4675 def Exec(self, feedback_fn):
4676 """Execute OOB and return result if we expect any.
4679 master_node = self.master_node
4682 for idx, node in enumerate(utils.NiceSort(self.nodes,
4683 key=lambda node: node.name)):
4684 node_entry = [(constants.RS_NORMAL, node.name)]
4685 ret.append(node_entry)
4687 oob_program = _SupportsOob(self.cfg, node)
4690 node_entry.append((constants.RS_UNAVAIL, None))
4693 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4694 self.op.command, oob_program, node.name)
4695 result = self.rpc.call_run_oob(master_node, oob_program,
4696 self.op.command, node.name,
4700 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4701 node.name, result.fail_msg)
4702 node_entry.append((constants.RS_NODATA, None))
4705 self._CheckPayload(result)
4706 except errors.OpExecError, err:
4707 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4709 node_entry.append((constants.RS_NODATA, None))
4711 if self.op.command == constants.OOB_HEALTH:
4712 # For health we should log important events
4713 for item, status in result.payload:
4714 if status in [constants.OOB_STATUS_WARNING,
4715 constants.OOB_STATUS_CRITICAL]:
4716 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4717 item, node.name, status)
4719 if self.op.command == constants.OOB_POWER_ON:
4721 elif self.op.command == constants.OOB_POWER_OFF:
4722 node.powered = False
4723 elif self.op.command == constants.OOB_POWER_STATUS:
4724 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4725 if powered != node.powered:
4726 logging.warning(("Recorded power state (%s) of node '%s' does not"
4727 " match actual power state (%s)"), node.powered,
4730 # For configuration changing commands we should update the node
4731 if self.op.command in (constants.OOB_POWER_ON,
4732 constants.OOB_POWER_OFF):
4733 self.cfg.Update(node, feedback_fn)
4735 node_entry.append((constants.RS_NORMAL, result.payload))
4737 if (self.op.command == constants.OOB_POWER_ON and
4738 idx < len(self.nodes) - 1):
4739 time.sleep(self.op.power_delay)
4743 def _CheckPayload(self, result):
4744 """Checks if the payload is valid.
4746 @param result: RPC result
4747 @raises errors.OpExecError: If payload is not valid
4751 if self.op.command == constants.OOB_HEALTH:
4752 if not isinstance(result.payload, list):
4753 errs.append("command 'health' is expected to return a list but got %s" %
4754 type(result.payload))
4756 for item, status in result.payload:
4757 if status not in constants.OOB_STATUSES:
4758 errs.append("health item '%s' has invalid status '%s'" %
4761 if self.op.command == constants.OOB_POWER_STATUS:
4762 if not isinstance(result.payload, dict):
4763 errs.append("power-status is expected to return a dict but got %s" %
4764 type(result.payload))
4766 if self.op.command in [
4767 constants.OOB_POWER_ON,
4768 constants.OOB_POWER_OFF,
4769 constants.OOB_POWER_CYCLE,
4771 if result.payload is not None:
4772 errs.append("%s is expected to not return payload but got '%s'" %
4773 (self.op.command, result.payload))
4776 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4777 utils.CommaJoin(errs))
4780 class _OsQuery(_QueryBase):
4781 FIELDS = query.OS_FIELDS
4783 def ExpandNames(self, lu):
4784 # Lock all nodes in shared mode
4785 # Temporary removal of locks, should be reverted later
4786 # TODO: reintroduce locks when they are lighter-weight
4787 lu.needed_locks = {}
4788 #self.share_locks[locking.LEVEL_NODE] = 1
4789 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4791 # The following variables interact with _QueryBase._GetNames
4793 self.wanted = self.names
4795 self.wanted = locking.ALL_SET
4797 self.do_locking = self.use_locking
4799 def DeclareLocks(self, lu, level):
4803 def _DiagnoseByOS(rlist):
4804 """Remaps a per-node return list into an a per-os per-node dictionary
4806 @param rlist: a map with node names as keys and OS objects as values
4809 @return: a dictionary with osnames as keys and as value another
4810 map, with nodes as keys and tuples of (path, status, diagnose,
4811 variants, parameters, api_versions) as values, eg::
4813 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4814 (/srv/..., False, "invalid api")],
4815 "node2": [(/srv/..., True, "", [], [])]}
4820 # we build here the list of nodes that didn't fail the RPC (at RPC
4821 # level), so that nodes with a non-responding node daemon don't
4822 # make all OSes invalid
4823 good_nodes = [node_name for node_name in rlist
4824 if not rlist[node_name].fail_msg]
4825 for node_name, nr in rlist.items():
4826 if nr.fail_msg or not nr.payload:
4828 for (name, path, status, diagnose, variants,
4829 params, api_versions) in nr.payload:
4830 if name not in all_os:
4831 # build a list of nodes for this os containing empty lists
4832 # for each node in node_list
4834 for nname in good_nodes:
4835 all_os[name][nname] = []
4836 # convert params from [name, help] to (name, help)
4837 params = [tuple(v) for v in params]
4838 all_os[name][node_name].append((path, status, diagnose,
4839 variants, params, api_versions))
4842 def _GetQueryData(self, lu):
4843 """Computes the list of nodes and their attributes.
4846 # Locking is not used
4847 assert not (compat.any(lu.glm.is_owned(level)
4848 for level in locking.LEVELS
4849 if level != locking.LEVEL_CLUSTER) or
4850 self.do_locking or self.use_locking)
4852 valid_nodes = [node.name
4853 for node in lu.cfg.GetAllNodesInfo().values()
4854 if not node.offline and node.vm_capable]
4855 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4856 cluster = lu.cfg.GetClusterInfo()
4860 for (os_name, os_data) in pol.items():
4861 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4862 hidden=(os_name in cluster.hidden_os),
4863 blacklisted=(os_name in cluster.blacklisted_os))
4867 api_versions = set()
4869 for idx, osl in enumerate(os_data.values()):
4870 info.valid = bool(info.valid and osl and osl[0][1])
4874 (node_variants, node_params, node_api) = osl[0][3:6]
4877 variants.update(node_variants)
4878 parameters.update(node_params)
4879 api_versions.update(node_api)
4881 # Filter out inconsistent values
4882 variants.intersection_update(node_variants)
4883 parameters.intersection_update(node_params)
4884 api_versions.intersection_update(node_api)
4886 info.variants = list(variants)
4887 info.parameters = list(parameters)
4888 info.api_versions = list(api_versions)
4890 data[os_name] = info
4892 # Prepare data in requested order
4893 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4897 class LUOsDiagnose(NoHooksLU):
4898 """Logical unit for OS diagnose/query.
4904 def _BuildFilter(fields, names):
4905 """Builds a filter for querying OSes.
4908 name_filter = qlang.MakeSimpleFilter("name", names)
4910 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4911 # respective field is not requested
4912 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4913 for fname in ["hidden", "blacklisted"]
4914 if fname not in fields]
4915 if "valid" not in fields:
4916 status_filter.append([qlang.OP_TRUE, "valid"])
4919 status_filter.insert(0, qlang.OP_AND)
4921 status_filter = None
4923 if name_filter and status_filter:
4924 return [qlang.OP_AND, name_filter, status_filter]
4928 return status_filter
4930 def CheckArguments(self):
4931 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4932 self.op.output_fields, False)
4934 def ExpandNames(self):
4935 self.oq.ExpandNames(self)
4937 def Exec(self, feedback_fn):
4938 return self.oq.OldStyleQuery(self)
4941 class LUNodeRemove(LogicalUnit):
4942 """Logical unit for removing a node.
4945 HPATH = "node-remove"
4946 HTYPE = constants.HTYPE_NODE
4948 def BuildHooksEnv(self):
4953 "OP_TARGET": self.op.node_name,
4954 "NODE_NAME": self.op.node_name,
4957 def BuildHooksNodes(self):
4958 """Build hooks nodes.
4960 This doesn't run on the target node in the pre phase as a failed
4961 node would then be impossible to remove.
4964 all_nodes = self.cfg.GetNodeList()
4966 all_nodes.remove(self.op.node_name)
4969 return (all_nodes, all_nodes)
4971 def CheckPrereq(self):
4972 """Check prerequisites.
4975 - the node exists in the configuration
4976 - it does not have primary or secondary instances
4977 - it's not the master
4979 Any errors are signaled by raising errors.OpPrereqError.
4982 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4983 node = self.cfg.GetNodeInfo(self.op.node_name)
4984 assert node is not None
4986 masternode = self.cfg.GetMasterNode()
4987 if node.name == masternode:
4988 raise errors.OpPrereqError("Node is the master node, failover to another"
4989 " node is required", errors.ECODE_INVAL)
4991 for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
4992 if node.name in instance.all_nodes:
4993 raise errors.OpPrereqError("Instance %s is still running on the node,"
4994 " please remove first" % instance_name,
4996 self.op.node_name = node.name
4999 def Exec(self, feedback_fn):
5000 """Removes the node from the cluster.
5004 logging.info("Stopping the node daemon and removing configs from node %s",
5007 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
5009 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5012 # Promote nodes to master candidate as needed
5013 _AdjustCandidatePool(self, exceptions=[node.name])
5014 self.context.RemoveNode(node.name)
5016 # Run post hooks on the node before it's removed
5017 _RunPostHook(self, node.name)
5019 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
5020 msg = result.fail_msg
5022 self.LogWarning("Errors encountered on the remote node while leaving"
5023 " the cluster: %s", msg)
5025 # Remove node from our /etc/hosts
5026 if self.cfg.GetClusterInfo().modify_etc_hosts:
5027 master_node = self.cfg.GetMasterNode()
5028 result = self.rpc.call_etc_hosts_modify(master_node,
5029 constants.ETC_HOSTS_REMOVE,
5031 result.Raise("Can't update hosts file with new host data")
5032 _RedistributeAncillaryFiles(self)
5035 class _NodeQuery(_QueryBase):
5036 FIELDS = query.NODE_FIELDS
5038 def ExpandNames(self, lu):
5039 lu.needed_locks = {}
5040 lu.share_locks = _ShareAll()
5043 self.wanted = _GetWantedNodes(lu, self.names)
5045 self.wanted = locking.ALL_SET
5047 self.do_locking = (self.use_locking and
5048 query.NQ_LIVE in self.requested_data)
5051 # If any non-static field is requested we need to lock the nodes
5052 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
5054 def DeclareLocks(self, lu, level):
5057 def _GetQueryData(self, lu):
5058 """Computes the list of nodes and their attributes.
5061 all_info = lu.cfg.GetAllNodesInfo()
5063 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
5065 # Gather data as requested
5066 if query.NQ_LIVE in self.requested_data:
5067 # filter out non-vm_capable nodes
5068 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
5070 node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
5071 [lu.cfg.GetHypervisorType()])
5072 live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
5073 for (name, nresult) in node_data.items()
5074 if not nresult.fail_msg and nresult.payload)
5078 if query.NQ_INST in self.requested_data:
5079 node_to_primary = dict([(name, set()) for name in nodenames])
5080 node_to_secondary = dict([(name, set()) for name in nodenames])
5082 inst_data = lu.cfg.GetAllInstancesInfo()
5084 for inst in inst_data.values():
5085 if inst.primary_node in node_to_primary:
5086 node_to_primary[inst.primary_node].add(inst.name)
5087 for secnode in inst.secondary_nodes:
5088 if secnode in node_to_secondary:
5089 node_to_secondary[secnode].add(inst.name)
5091 node_to_primary = None
5092 node_to_secondary = None
5094 if query.NQ_OOB in self.requested_data:
5095 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
5096 for name, node in all_info.iteritems())
5100 if query.NQ_GROUP in self.requested_data:
5101 groups = lu.cfg.GetAllNodeGroupsInfo()
5105 return query.NodeQueryData([all_info[name] for name in nodenames],
5106 live_data, lu.cfg.GetMasterNode(),
5107 node_to_primary, node_to_secondary, groups,
5108 oob_support, lu.cfg.GetClusterInfo())
5111 class LUNodeQuery(NoHooksLU):
5112 """Logical unit for querying nodes.
5115 # pylint: disable=W0142
5118 def CheckArguments(self):
5119 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
5120 self.op.output_fields, self.op.use_locking)
5122 def ExpandNames(self):
5123 self.nq.ExpandNames(self)
5125 def DeclareLocks(self, level):
5126 self.nq.DeclareLocks(self, level)
5128 def Exec(self, feedback_fn):
5129 return self.nq.OldStyleQuery(self)
5132 class LUNodeQueryvols(NoHooksLU):
5133 """Logical unit for getting volumes on node(s).
5137 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
5138 _FIELDS_STATIC = utils.FieldSet("node")
5140 def CheckArguments(self):
5141 _CheckOutputFields(static=self._FIELDS_STATIC,
5142 dynamic=self._FIELDS_DYNAMIC,
5143 selected=self.op.output_fields)
5145 def ExpandNames(self):
5146 self.share_locks = _ShareAll()
5147 self.needed_locks = {}
5149 if not self.op.nodes:
5150 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5152 self.needed_locks[locking.LEVEL_NODE] = \
5153 _GetWantedNodes(self, self.op.nodes)
5155 def Exec(self, feedback_fn):
5156 """Computes the list of nodes and their attributes.
5159 nodenames = self.owned_locks(locking.LEVEL_NODE)
5160 volumes = self.rpc.call_node_volumes(nodenames)
5162 ilist = self.cfg.GetAllInstancesInfo()
5163 vol2inst = _MapInstanceDisksToNodes(ilist.values())
5166 for node in nodenames:
5167 nresult = volumes[node]
5170 msg = nresult.fail_msg
5172 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
5175 node_vols = sorted(nresult.payload,
5176 key=operator.itemgetter("dev"))
5178 for vol in node_vols:
5180 for field in self.op.output_fields:
5183 elif field == "phys":
5187 elif field == "name":
5189 elif field == "size":
5190 val = int(float(vol["size"]))
5191 elif field == "instance":
5192 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
5194 raise errors.ParameterError(field)
5195 node_output.append(str(val))
5197 output.append(node_output)
5202 class LUNodeQueryStorage(NoHooksLU):
5203 """Logical unit for getting information on storage units on node(s).
5206 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
5209 def CheckArguments(self):
5210 _CheckOutputFields(static=self._FIELDS_STATIC,
5211 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
5212 selected=self.op.output_fields)
5214 def ExpandNames(self):
5215 self.share_locks = _ShareAll()
5216 self.needed_locks = {}
5219 self.needed_locks[locking.LEVEL_NODE] = \
5220 _GetWantedNodes(self, self.op.nodes)
5222 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5224 def Exec(self, feedback_fn):
5225 """Computes the list of nodes and their attributes.
5228 self.nodes = self.owned_locks(locking.LEVEL_NODE)
5230 # Always get name to sort by
5231 if constants.SF_NAME in self.op.output_fields:
5232 fields = self.op.output_fields[:]
5234 fields = [constants.SF_NAME] + self.op.output_fields
5236 # Never ask for node or type as it's only known to the LU
5237 for extra in [constants.SF_NODE, constants.SF_TYPE]:
5238 while extra in fields:
5239 fields.remove(extra)
5241 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
5242 name_idx = field_idx[constants.SF_NAME]
5244 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5245 data = self.rpc.call_storage_list(self.nodes,
5246 self.op.storage_type, st_args,
5247 self.op.name, fields)
5251 for node in utils.NiceSort(self.nodes):
5252 nresult = data[node]
5256 msg = nresult.fail_msg
5258 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
5261 rows = dict([(row[name_idx], row) for row in nresult.payload])
5263 for name in utils.NiceSort(rows.keys()):
5268 for field in self.op.output_fields:
5269 if field == constants.SF_NODE:
5271 elif field == constants.SF_TYPE:
5272 val = self.op.storage_type
5273 elif field in field_idx:
5274 val = row[field_idx[field]]
5276 raise errors.ParameterError(field)
5285 class _InstanceQuery(_QueryBase):
5286 FIELDS = query.INSTANCE_FIELDS
5288 def ExpandNames(self, lu):
5289 lu.needed_locks = {}
5290 lu.share_locks = _ShareAll()
5293 self.wanted = _GetWantedInstances(lu, self.names)
5295 self.wanted = locking.ALL_SET
5297 self.do_locking = (self.use_locking and
5298 query.IQ_LIVE in self.requested_data)
5300 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5301 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
5302 lu.needed_locks[locking.LEVEL_NODE] = []
5303 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5305 self.do_grouplocks = (self.do_locking and
5306 query.IQ_NODES in self.requested_data)
5308 def DeclareLocks(self, lu, level):
5310 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
5311 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
5313 # Lock all groups used by instances optimistically; this requires going
5314 # via the node before it's locked, requiring verification later on
5315 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
5317 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
5318 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
5319 elif level == locking.LEVEL_NODE:
5320 lu._LockInstancesNodes() # pylint: disable=W0212
5323 def _CheckGroupLocks(lu):
5324 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
5325 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
5327 # Check if node groups for locked instances are still correct
5328 for instance_name in owned_instances:
5329 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
5331 def _GetQueryData(self, lu):
5332 """Computes the list of instances and their attributes.
5335 if self.do_grouplocks:
5336 self._CheckGroupLocks(lu)
5338 cluster = lu.cfg.GetClusterInfo()
5339 all_info = lu.cfg.GetAllInstancesInfo()
5341 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
5343 instance_list = [all_info[name] for name in instance_names]
5344 nodes = frozenset(itertools.chain(*(inst.all_nodes
5345 for inst in instance_list)))
5346 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5349 wrongnode_inst = set()
5351 # Gather data as requested
5352 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
5354 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
5356 result = node_data[name]
5358 # offline nodes will be in both lists
5359 assert result.fail_msg
5360 offline_nodes.append(name)
5362 bad_nodes.append(name)
5363 elif result.payload:
5364 for inst in result.payload:
5365 if inst in all_info:
5366 if all_info[inst].primary_node == name:
5367 live_data.update(result.payload)
5369 wrongnode_inst.add(inst)
5371 # orphan instance; we don't list it here as we don't
5372 # handle this case yet in the output of instance listing
5373 logging.warning("Orphan instance '%s' found on node %s",
5375 # else no instance is alive
5379 if query.IQ_DISKUSAGE in self.requested_data:
5380 disk_usage = dict((inst.name,
5381 _ComputeDiskSize(inst.disk_template,
5382 [{constants.IDISK_SIZE: disk.size}
5383 for disk in inst.disks]))
5384 for inst in instance_list)
5388 if query.IQ_CONSOLE in self.requested_data:
5390 for inst in instance_list:
5391 if inst.name in live_data:
5392 # Instance is running
5393 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
5395 consinfo[inst.name] = None
5396 assert set(consinfo.keys()) == set(instance_names)
5400 if query.IQ_NODES in self.requested_data:
5401 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
5403 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
5404 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
5405 for uuid in set(map(operator.attrgetter("group"),
5411 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
5412 disk_usage, offline_nodes, bad_nodes,
5413 live_data, wrongnode_inst, consinfo,
5417 class LUQuery(NoHooksLU):
5418 """Query for resources/items of a certain kind.
5421 # pylint: disable=W0142
5424 def CheckArguments(self):
5425 qcls = _GetQueryImplementation(self.op.what)
5427 self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
5429 def ExpandNames(self):
5430 self.impl.ExpandNames(self)
5432 def DeclareLocks(self, level):
5433 self.impl.DeclareLocks(self, level)
5435 def Exec(self, feedback_fn):
5436 return self.impl.NewStyleQuery(self)
5439 class LUQueryFields(NoHooksLU):
5440 """Query for resources/items of a certain kind.
5443 # pylint: disable=W0142
5446 def CheckArguments(self):
5447 self.qcls = _GetQueryImplementation(self.op.what)
5449 def ExpandNames(self):
5450 self.needed_locks = {}
5452 def Exec(self, feedback_fn):
5453 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
5456 class LUNodeModifyStorage(NoHooksLU):
5457 """Logical unit for modifying a storage volume on a node.
5462 def CheckArguments(self):
5463 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5465 storage_type = self.op.storage_type
5468 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
5470 raise errors.OpPrereqError("Storage units of type '%s' can not be"
5471 " modified" % storage_type,
5474 diff = set(self.op.changes.keys()) - modifiable
5476 raise errors.OpPrereqError("The following fields can not be modified for"
5477 " storage units of type '%s': %r" %
5478 (storage_type, list(diff)),
5481 def ExpandNames(self):
5482 self.needed_locks = {
5483 locking.LEVEL_NODE: self.op.node_name,
5486 def Exec(self, feedback_fn):
5487 """Computes the list of nodes and their attributes.
5490 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5491 result = self.rpc.call_storage_modify(self.op.node_name,
5492 self.op.storage_type, st_args,
5493 self.op.name, self.op.changes)
5494 result.Raise("Failed to modify storage unit '%s' on %s" %
5495 (self.op.name, self.op.node_name))
5498 class LUNodeAdd(LogicalUnit):
5499 """Logical unit for adding node to the cluster.
5503 HTYPE = constants.HTYPE_NODE
5504 _NFLAGS = ["master_capable", "vm_capable"]
5506 def CheckArguments(self):
5507 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
5508 # validate/normalize the node name
5509 self.hostname = netutils.GetHostname(name=self.op.node_name,
5510 family=self.primary_ip_family)
5511 self.op.node_name = self.hostname.name
5513 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
5514 raise errors.OpPrereqError("Cannot readd the master node",
5517 if self.op.readd and self.op.group:
5518 raise errors.OpPrereqError("Cannot pass a node group when a node is"
5519 " being readded", errors.ECODE_INVAL)
5521 def BuildHooksEnv(self):
5524 This will run on all nodes before, and on all nodes + the new node after.
5528 "OP_TARGET": self.op.node_name,
5529 "NODE_NAME": self.op.node_name,
5530 "NODE_PIP": self.op.primary_ip,
5531 "NODE_SIP": self.op.secondary_ip,
5532 "MASTER_CAPABLE": str(self.op.master_capable),
5533 "VM_CAPABLE": str(self.op.vm_capable),
5536 def BuildHooksNodes(self):
5537 """Build hooks nodes.
5540 # Exclude added node
5541 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
5542 post_nodes = pre_nodes + [self.op.node_name, ]
5544 return (pre_nodes, post_nodes)
5546 def CheckPrereq(self):
5547 """Check prerequisites.
5550 - the new node is not already in the config
5552 - its parameters (single/dual homed) matches the cluster
5554 Any errors are signaled by raising errors.OpPrereqError.
5558 hostname = self.hostname
5559 node = hostname.name
5560 primary_ip = self.op.primary_ip = hostname.ip
5561 if self.op.secondary_ip is None:
5562 if self.primary_ip_family == netutils.IP6Address.family:
5563 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
5564 " IPv4 address must be given as secondary",
5566 self.op.secondary_ip = primary_ip
5568 secondary_ip = self.op.secondary_ip
5569 if not netutils.IP4Address.IsValid(secondary_ip):
5570 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5571 " address" % secondary_ip, errors.ECODE_INVAL)
5573 node_list = cfg.GetNodeList()
5574 if not self.op.readd and node in node_list:
5575 raise errors.OpPrereqError("Node %s is already in the configuration" %
5576 node, errors.ECODE_EXISTS)
5577 elif self.op.readd and node not in node_list:
5578 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
5581 self.changed_primary_ip = False
5583 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
5584 if self.op.readd and node == existing_node_name:
5585 if existing_node.secondary_ip != secondary_ip:
5586 raise errors.OpPrereqError("Readded node doesn't have the same IP"
5587 " address configuration as before",
5589 if existing_node.primary_ip != primary_ip:
5590 self.changed_primary_ip = True
5594 if (existing_node.primary_ip == primary_ip or
5595 existing_node.secondary_ip == primary_ip or
5596 existing_node.primary_ip == secondary_ip or
5597 existing_node.secondary_ip == secondary_ip):
5598 raise errors.OpPrereqError("New node ip address(es) conflict with"
5599 " existing node %s" % existing_node.name,
5600 errors.ECODE_NOTUNIQUE)
5602 # After this 'if' block, None is no longer a valid value for the
5603 # _capable op attributes
5605 old_node = self.cfg.GetNodeInfo(node)
5606 assert old_node is not None, "Can't retrieve locked node %s" % node
5607 for attr in self._NFLAGS:
5608 if getattr(self.op, attr) is None:
5609 setattr(self.op, attr, getattr(old_node, attr))
5611 for attr in self._NFLAGS:
5612 if getattr(self.op, attr) is None:
5613 setattr(self.op, attr, True)
5615 if self.op.readd and not self.op.vm_capable:
5616 pri, sec = cfg.GetNodeInstances(node)
5618 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
5619 " flag set to false, but it already holds"
5620 " instances" % node,
5623 # check that the type of the node (single versus dual homed) is the
5624 # same as for the master
5625 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
5626 master_singlehomed = myself.secondary_ip == myself.primary_ip
5627 newbie_singlehomed = secondary_ip == primary_ip
5628 if master_singlehomed != newbie_singlehomed:
5629 if master_singlehomed:
5630 raise errors.OpPrereqError("The master has no secondary ip but the"
5631 " new node has one",
5634 raise errors.OpPrereqError("The master has a secondary ip but the"
5635 " new node doesn't have one",
5638 # checks reachability
5639 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
5640 raise errors.OpPrereqError("Node not reachable by ping",
5641 errors.ECODE_ENVIRON)
5643 if not newbie_singlehomed:
5644 # check reachability from my secondary ip to newbie's secondary ip
5645 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5646 source=myself.secondary_ip):
5647 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5648 " based ping to node daemon port",
5649 errors.ECODE_ENVIRON)
5656 if self.op.master_capable:
5657 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
5659 self.master_candidate = False
5662 self.new_node = old_node
5664 node_group = cfg.LookupNodeGroup(self.op.group)
5665 self.new_node = objects.Node(name=node,
5666 primary_ip=primary_ip,
5667 secondary_ip=secondary_ip,
5668 master_candidate=self.master_candidate,
5669 offline=False, drained=False,
5672 if self.op.ndparams:
5673 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
5675 if self.op.hv_state:
5676 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
5678 if self.op.disk_state:
5679 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
5681 # TODO: If we need to have multiple DnsOnlyRunner we probably should make
5682 # it a property on the base class.
5683 result = rpc.DnsOnlyRunner().call_version([node])[node]
5684 result.Raise("Can't get version information from node %s" % node)
5685 if constants.PROTOCOL_VERSION == result.payload:
5686 logging.info("Communication to node %s fine, sw version %s match",
5687 node, result.payload)
5689 raise errors.OpPrereqError("Version mismatch master version %s,"
5690 " node version %s" %
5691 (constants.PROTOCOL_VERSION, result.payload),
5692 errors.ECODE_ENVIRON)
5694 def Exec(self, feedback_fn):
5695 """Adds the new node to the cluster.
5698 new_node = self.new_node
5699 node = new_node.name
5701 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5704 # We adding a new node so we assume it's powered
5705 new_node.powered = True
5707 # for re-adds, reset the offline/drained/master-candidate flags;
5708 # we need to reset here, otherwise offline would prevent RPC calls
5709 # later in the procedure; this also means that if the re-add
5710 # fails, we are left with a non-offlined, broken node
5712 new_node.drained = new_node.offline = False # pylint: disable=W0201
5713 self.LogInfo("Readding a node, the offline/drained flags were reset")
5714 # if we demote the node, we do cleanup later in the procedure
5715 new_node.master_candidate = self.master_candidate
5716 if self.changed_primary_ip:
5717 new_node.primary_ip = self.op.primary_ip
5719 # copy the master/vm_capable flags
5720 for attr in self._NFLAGS:
5721 setattr(new_node, attr, getattr(self.op, attr))
5723 # notify the user about any possible mc promotion
5724 if new_node.master_candidate:
5725 self.LogInfo("Node will be a master candidate")
5727 if self.op.ndparams:
5728 new_node.ndparams = self.op.ndparams
5730 new_node.ndparams = {}
5732 if self.op.hv_state:
5733 new_node.hv_state_static = self.new_hv_state
5735 if self.op.disk_state:
5736 new_node.disk_state_static = self.new_disk_state
5738 # Add node to our /etc/hosts, and add key to known_hosts
5739 if self.cfg.GetClusterInfo().modify_etc_hosts:
5740 master_node = self.cfg.GetMasterNode()
5741 result = self.rpc.call_etc_hosts_modify(master_node,
5742 constants.ETC_HOSTS_ADD,
5745 result.Raise("Can't update hosts file with new host data")
5747 if new_node.secondary_ip != new_node.primary_ip:
5748 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5751 node_verify_list = [self.cfg.GetMasterNode()]
5752 node_verify_param = {
5753 constants.NV_NODELIST: ([node], {}),
5754 # TODO: do a node-net-test as well?
5757 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5758 self.cfg.GetClusterName())
5759 for verifier in node_verify_list:
5760 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5761 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5763 for failed in nl_payload:
5764 feedback_fn("ssh/hostname verification failed"
5765 " (checking from %s): %s" %
5766 (verifier, nl_payload[failed]))
5767 raise errors.OpExecError("ssh/hostname verification failed")
5770 _RedistributeAncillaryFiles(self)
5771 self.context.ReaddNode(new_node)
5772 # make sure we redistribute the config
5773 self.cfg.Update(new_node, feedback_fn)
5774 # and make sure the new node will not have old files around
5775 if not new_node.master_candidate:
5776 result = self.rpc.call_node_demote_from_mc(new_node.name)
5777 msg = result.fail_msg
5779 self.LogWarning("Node failed to demote itself from master"
5780 " candidate status: %s" % msg)
5782 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5783 additional_vm=self.op.vm_capable)
5784 self.context.AddNode(new_node, self.proc.GetECId())
5787 class LUNodeSetParams(LogicalUnit):
5788 """Modifies the parameters of a node.
5790 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5791 to the node role (as _ROLE_*)
5792 @cvar _R2F: a dictionary from node role to tuples of flags
5793 @cvar _FLAGS: a list of attribute names corresponding to the flags
5796 HPATH = "node-modify"
5797 HTYPE = constants.HTYPE_NODE
5799 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5801 (True, False, False): _ROLE_CANDIDATE,
5802 (False, True, False): _ROLE_DRAINED,
5803 (False, False, True): _ROLE_OFFLINE,
5804 (False, False, False): _ROLE_REGULAR,
5806 _R2F = dict((v, k) for k, v in _F2R.items())
5807 _FLAGS = ["master_candidate", "drained", "offline"]
5809 def CheckArguments(self):
5810 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5811 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5812 self.op.master_capable, self.op.vm_capable,
5813 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
5815 if all_mods.count(None) == len(all_mods):
5816 raise errors.OpPrereqError("Please pass at least one modification",
5818 if all_mods.count(True) > 1:
5819 raise errors.OpPrereqError("Can't set the node into more than one"
5820 " state at the same time",
5823 # Boolean value that tells us whether we might be demoting from MC
5824 self.might_demote = (self.op.master_candidate == False or
5825 self.op.offline == True or
5826 self.op.drained == True or
5827 self.op.master_capable == False)
5829 if self.op.secondary_ip:
5830 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5831 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5832 " address" % self.op.secondary_ip,
5835 self.lock_all = self.op.auto_promote and self.might_demote
5836 self.lock_instances = self.op.secondary_ip is not None
5838 def _InstanceFilter(self, instance):
5839 """Filter for getting affected instances.
5842 return (instance.disk_template in constants.DTS_INT_MIRROR and
5843 self.op.node_name in instance.all_nodes)
5845 def ExpandNames(self):
5847 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5849 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5851 # Since modifying a node can have severe effects on currently running
5852 # operations the resource lock is at least acquired in shared mode
5853 self.needed_locks[locking.LEVEL_NODE_RES] = \
5854 self.needed_locks[locking.LEVEL_NODE]
5856 # Get node resource and instance locks in shared mode; they are not used
5857 # for anything but read-only access
5858 self.share_locks[locking.LEVEL_NODE_RES] = 1
5859 self.share_locks[locking.LEVEL_INSTANCE] = 1
5861 if self.lock_instances:
5862 self.needed_locks[locking.LEVEL_INSTANCE] = \
5863 frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
5865 def BuildHooksEnv(self):
5868 This runs on the master node.
5872 "OP_TARGET": self.op.node_name,
5873 "MASTER_CANDIDATE": str(self.op.master_candidate),
5874 "OFFLINE": str(self.op.offline),
5875 "DRAINED": str(self.op.drained),
5876 "MASTER_CAPABLE": str(self.op.master_capable),
5877 "VM_CAPABLE": str(self.op.vm_capable),
5880 def BuildHooksNodes(self):
5881 """Build hooks nodes.
5884 nl = [self.cfg.GetMasterNode(), self.op.node_name]
5887 def CheckPrereq(self):
5888 """Check prerequisites.
5890 This only checks the instance list against the existing names.
5893 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5895 if self.lock_instances:
5896 affected_instances = \
5897 self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
5899 # Verify instance locks
5900 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
5901 wanted_instances = frozenset(affected_instances.keys())
5902 if wanted_instances - owned_instances:
5903 raise errors.OpPrereqError("Instances affected by changing node %s's"
5904 " secondary IP address have changed since"
5905 " locks were acquired, wanted '%s', have"
5906 " '%s'; retry the operation" %
5908 utils.CommaJoin(wanted_instances),
5909 utils.CommaJoin(owned_instances)),
5912 affected_instances = None
5914 if (self.op.master_candidate is not None or
5915 self.op.drained is not None or
5916 self.op.offline is not None):
5917 # we can't change the master's node flags
5918 if self.op.node_name == self.cfg.GetMasterNode():
5919 raise errors.OpPrereqError("The master role can be changed"
5920 " only via master-failover",
5923 if self.op.master_candidate and not node.master_capable:
5924 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5925 " it a master candidate" % node.name,
5928 if self.op.vm_capable == False:
5929 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5931 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5932 " the vm_capable flag" % node.name,
5935 if node.master_candidate and self.might_demote and not self.lock_all:
5936 assert not self.op.auto_promote, "auto_promote set but lock_all not"
5937 # check if after removing the current node, we're missing master
5939 (mc_remaining, mc_should, _) = \
5940 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5941 if mc_remaining < mc_should:
5942 raise errors.OpPrereqError("Not enough master candidates, please"
5943 " pass auto promote option to allow"
5944 " promotion (--auto-promote or RAPI"
5945 " auto_promote=True)", errors.ECODE_STATE)
5947 self.old_flags = old_flags = (node.master_candidate,
5948 node.drained, node.offline)
5949 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5950 self.old_role = old_role = self._F2R[old_flags]
5952 # Check for ineffective changes
5953 for attr in self._FLAGS:
5954 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5955 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5956 setattr(self.op, attr, None)
5958 # Past this point, any flag change to False means a transition
5959 # away from the respective state, as only real changes are kept
5961 # TODO: We might query the real power state if it supports OOB
5962 if _SupportsOob(self.cfg, node):
5963 if self.op.offline is False and not (node.powered or
5964 self.op.powered == True):
5965 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5966 " offline status can be reset") %
5968 elif self.op.powered is not None:
5969 raise errors.OpPrereqError(("Unable to change powered state for node %s"
5970 " as it does not support out-of-band"
5971 " handling") % self.op.node_name)
5973 # If we're being deofflined/drained, we'll MC ourself if needed
5974 if (self.op.drained == False or self.op.offline == False or
5975 (self.op.master_capable and not node.master_capable)):
5976 if _DecideSelfPromotion(self):
5977 self.op.master_candidate = True
5978 self.LogInfo("Auto-promoting node to master candidate")
5980 # If we're no longer master capable, we'll demote ourselves from MC
5981 if self.op.master_capable == False and node.master_candidate:
5982 self.LogInfo("Demoting from master candidate")
5983 self.op.master_candidate = False
5986 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5987 if self.op.master_candidate:
5988 new_role = self._ROLE_CANDIDATE
5989 elif self.op.drained:
5990 new_role = self._ROLE_DRAINED
5991 elif self.op.offline:
5992 new_role = self._ROLE_OFFLINE
5993 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5994 # False is still in new flags, which means we're un-setting (the
5996 new_role = self._ROLE_REGULAR
5997 else: # no new flags, nothing, keep old role
6000 self.new_role = new_role
6002 if old_role == self._ROLE_OFFLINE and new_role != old_role:
6003 # Trying to transition out of offline status
6004 result = self.rpc.call_version([node.name])[node.name]
6006 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
6007 " to report its version: %s" %
6008 (node.name, result.fail_msg),
6011 self.LogWarning("Transitioning node from offline to online state"
6012 " without using re-add. Please make sure the node"
6015 if self.op.secondary_ip:
6016 # Ok even without locking, because this can't be changed by any LU
6017 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
6018 master_singlehomed = master.secondary_ip == master.primary_ip
6019 if master_singlehomed and self.op.secondary_ip:
6020 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
6021 " homed cluster", errors.ECODE_INVAL)
6023 assert not (frozenset(affected_instances) -
6024 self.owned_locks(locking.LEVEL_INSTANCE))
6027 if affected_instances:
6028 raise errors.OpPrereqError("Cannot change secondary IP address:"
6029 " offline node has instances (%s)"
6030 " configured to use it" %
6031 utils.CommaJoin(affected_instances.keys()))
6033 # On online nodes, check that no instances are running, and that
6034 # the node has the new ip and we can reach it.
6035 for instance in affected_instances.values():
6036 _CheckInstanceState(self, instance, INSTANCE_DOWN,
6037 msg="cannot change secondary ip")
6039 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
6040 if master.name != node.name:
6041 # check reachability from master secondary ip to new secondary ip
6042 if not netutils.TcpPing(self.op.secondary_ip,
6043 constants.DEFAULT_NODED_PORT,
6044 source=master.secondary_ip):
6045 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
6046 " based ping to node daemon port",
6047 errors.ECODE_ENVIRON)
6049 if self.op.ndparams:
6050 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
6051 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
6052 self.new_ndparams = new_ndparams
6054 if self.op.hv_state:
6055 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
6056 self.node.hv_state_static)
6058 if self.op.disk_state:
6059 self.new_disk_state = \
6060 _MergeAndVerifyDiskState(self.op.disk_state,
6061 self.node.disk_state_static)
6063 def Exec(self, feedback_fn):
6068 old_role = self.old_role
6069 new_role = self.new_role
6073 if self.op.ndparams:
6074 node.ndparams = self.new_ndparams
6076 if self.op.powered is not None:
6077 node.powered = self.op.powered
6079 if self.op.hv_state:
6080 node.hv_state_static = self.new_hv_state
6082 if self.op.disk_state:
6083 node.disk_state_static = self.new_disk_state
6085 for attr in ["master_capable", "vm_capable"]:
6086 val = getattr(self.op, attr)
6088 setattr(node, attr, val)
6089 result.append((attr, str(val)))
6091 if new_role != old_role:
6092 # Tell the node to demote itself, if no longer MC and not offline
6093 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
6094 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
6096 self.LogWarning("Node failed to demote itself: %s", msg)
6098 new_flags = self._R2F[new_role]
6099 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
6101 result.append((desc, str(nf)))
6102 (node.master_candidate, node.drained, node.offline) = new_flags
6104 # we locked all nodes, we adjust the CP before updating this node
6106 _AdjustCandidatePool(self, [node.name])
6108 if self.op.secondary_ip:
6109 node.secondary_ip = self.op.secondary_ip
6110 result.append(("secondary_ip", self.op.secondary_ip))
6112 # this will trigger configuration file update, if needed
6113 self.cfg.Update(node, feedback_fn)
6115 # this will trigger job queue propagation or cleanup if the mc
6117 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
6118 self.context.ReaddNode(node)
6123 class LUNodePowercycle(NoHooksLU):
6124 """Powercycles a node.
6129 def CheckArguments(self):
6130 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6131 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
6132 raise errors.OpPrereqError("The node is the master and the force"
6133 " parameter was not set",
6136 def ExpandNames(self):
6137 """Locking for PowercycleNode.
6139 This is a last-resort option and shouldn't block on other
6140 jobs. Therefore, we grab no locks.
6143 self.needed_locks = {}
6145 def Exec(self, feedback_fn):
6149 result = self.rpc.call_node_powercycle(self.op.node_name,
6150 self.cfg.GetHypervisorType())
6151 result.Raise("Failed to schedule the reboot")
6152 return result.payload
6155 class LUClusterQuery(NoHooksLU):
6156 """Query cluster configuration.
6161 def ExpandNames(self):
6162 self.needed_locks = {}
6164 def Exec(self, feedback_fn):
6165 """Return cluster config.
6168 cluster = self.cfg.GetClusterInfo()
6171 # Filter just for enabled hypervisors
6172 for os_name, hv_dict in cluster.os_hvp.items():
6173 os_hvp[os_name] = {}
6174 for hv_name, hv_params in hv_dict.items():
6175 if hv_name in cluster.enabled_hypervisors:
6176 os_hvp[os_name][hv_name] = hv_params
6178 # Convert ip_family to ip_version
6179 primary_ip_version = constants.IP4_VERSION
6180 if cluster.primary_ip_family == netutils.IP6Address.family:
6181 primary_ip_version = constants.IP6_VERSION
6184 "software_version": constants.RELEASE_VERSION,
6185 "protocol_version": constants.PROTOCOL_VERSION,
6186 "config_version": constants.CONFIG_VERSION,
6187 "os_api_version": max(constants.OS_API_VERSIONS),
6188 "export_version": constants.EXPORT_VERSION,
6189 "architecture": runtime.GetArchInfo(),
6190 "name": cluster.cluster_name,
6191 "master": cluster.master_node,
6192 "default_hypervisor": cluster.primary_hypervisor,
6193 "enabled_hypervisors": cluster.enabled_hypervisors,
6194 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
6195 for hypervisor_name in cluster.enabled_hypervisors]),
6197 "beparams": cluster.beparams,
6198 "osparams": cluster.osparams,
6199 "ipolicy": cluster.ipolicy,
6200 "nicparams": cluster.nicparams,
6201 "ndparams": cluster.ndparams,
6202 "diskparams": cluster.diskparams,
6203 "candidate_pool_size": cluster.candidate_pool_size,
6204 "master_netdev": cluster.master_netdev,
6205 "master_netmask": cluster.master_netmask,
6206 "use_external_mip_script": cluster.use_external_mip_script,
6207 "volume_group_name": cluster.volume_group_name,
6208 "drbd_usermode_helper": cluster.drbd_usermode_helper,
6209 "file_storage_dir": cluster.file_storage_dir,
6210 "shared_file_storage_dir": cluster.shared_file_storage_dir,
6211 "maintain_node_health": cluster.maintain_node_health,
6212 "ctime": cluster.ctime,
6213 "mtime": cluster.mtime,
6214 "uuid": cluster.uuid,
6215 "tags": list(cluster.GetTags()),
6216 "uid_pool": cluster.uid_pool,
6217 "default_iallocator": cluster.default_iallocator,
6218 "reserved_lvs": cluster.reserved_lvs,
6219 "primary_ip_version": primary_ip_version,
6220 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
6221 "hidden_os": cluster.hidden_os,
6222 "blacklisted_os": cluster.blacklisted_os,
6228 class LUClusterConfigQuery(NoHooksLU):
6229 """Return configuration values.
6234 def CheckArguments(self):
6235 self.cq = _ClusterQuery(None, self.op.output_fields, False)
6237 def ExpandNames(self):
6238 self.cq.ExpandNames(self)
6240 def DeclareLocks(self, level):
6241 self.cq.DeclareLocks(self, level)
6243 def Exec(self, feedback_fn):
6244 result = self.cq.OldStyleQuery(self)
6246 assert len(result) == 1
6251 class _ClusterQuery(_QueryBase):
6252 FIELDS = query.CLUSTER_FIELDS
6254 #: Do not sort (there is only one item)
6257 def ExpandNames(self, lu):
6258 lu.needed_locks = {}
6260 # The following variables interact with _QueryBase._GetNames
6261 self.wanted = locking.ALL_SET
6262 self.do_locking = self.use_locking
6265 raise errors.OpPrereqError("Can not use locking for cluster queries",
6268 def DeclareLocks(self, lu, level):
6271 def _GetQueryData(self, lu):
6272 """Computes the list of nodes and their attributes.
6275 # Locking is not used
6276 assert not (compat.any(lu.glm.is_owned(level)
6277 for level in locking.LEVELS
6278 if level != locking.LEVEL_CLUSTER) or
6279 self.do_locking or self.use_locking)
6281 if query.CQ_CONFIG in self.requested_data:
6282 cluster = lu.cfg.GetClusterInfo()
6284 cluster = NotImplemented
6286 if query.CQ_QUEUE_DRAINED in self.requested_data:
6287 drain_flag = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
6289 drain_flag = NotImplemented
6291 if query.CQ_WATCHER_PAUSE in self.requested_data:
6292 watcher_pause = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
6294 watcher_pause = NotImplemented
6296 return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
6299 class LUInstanceActivateDisks(NoHooksLU):
6300 """Bring up an instance's disks.
6305 def ExpandNames(self):
6306 self._ExpandAndLockInstance()
6307 self.needed_locks[locking.LEVEL_NODE] = []
6308 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6310 def DeclareLocks(self, level):
6311 if level == locking.LEVEL_NODE:
6312 self._LockInstancesNodes()
6314 def CheckPrereq(self):
6315 """Check prerequisites.
6317 This checks that the instance is in the cluster.
6320 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6321 assert self.instance is not None, \
6322 "Cannot retrieve locked instance %s" % self.op.instance_name
6323 _CheckNodeOnline(self, self.instance.primary_node)
6325 def Exec(self, feedback_fn):
6326 """Activate the disks.
6329 disks_ok, disks_info = \
6330 _AssembleInstanceDisks(self, self.instance,
6331 ignore_size=self.op.ignore_size)
6333 raise errors.OpExecError("Cannot activate block devices")
6338 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
6340 """Prepare the block devices for an instance.
6342 This sets up the block devices on all nodes.
6344 @type lu: L{LogicalUnit}
6345 @param lu: the logical unit on whose behalf we execute
6346 @type instance: L{objects.Instance}
6347 @param instance: the instance for whose disks we assemble
6348 @type disks: list of L{objects.Disk} or None
6349 @param disks: which disks to assemble (or all, if None)
6350 @type ignore_secondaries: boolean
6351 @param ignore_secondaries: if true, errors on secondary nodes
6352 won't result in an error return from the function
6353 @type ignore_size: boolean
6354 @param ignore_size: if true, the current known size of the disk
6355 will not be used during the disk activation, useful for cases
6356 when the size is wrong
6357 @return: False if the operation failed, otherwise a list of
6358 (host, instance_visible_name, node_visible_name)
6359 with the mapping from node devices to instance devices
6364 iname = instance.name
6365 disks = _ExpandCheckDisks(instance, disks)
6367 # With the two passes mechanism we try to reduce the window of
6368 # opportunity for the race condition of switching DRBD to primary
6369 # before handshaking occured, but we do not eliminate it
6371 # The proper fix would be to wait (with some limits) until the
6372 # connection has been made and drbd transitions from WFConnection
6373 # into any other network-connected state (Connected, SyncTarget,
6376 # 1st pass, assemble on all nodes in secondary mode
6377 for idx, inst_disk in enumerate(disks):
6378 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6380 node_disk = node_disk.Copy()
6381 node_disk.UnsetSize()
6382 lu.cfg.SetDiskID(node_disk, node)
6383 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6385 msg = result.fail_msg
6387 is_offline_secondary = (node in instance.secondary_nodes and
6389 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6390 " (is_primary=False, pass=1): %s",
6391 inst_disk.iv_name, node, msg)
6392 if not (ignore_secondaries or is_offline_secondary):
6395 # FIXME: race condition on drbd migration to primary
6397 # 2nd pass, do only the primary node
6398 for idx, inst_disk in enumerate(disks):
6401 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6402 if node != instance.primary_node:
6405 node_disk = node_disk.Copy()
6406 node_disk.UnsetSize()
6407 lu.cfg.SetDiskID(node_disk, node)
6408 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6410 msg = result.fail_msg
6412 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6413 " (is_primary=True, pass=2): %s",
6414 inst_disk.iv_name, node, msg)
6417 dev_path = result.payload
6419 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
6421 # leave the disks configured for the primary node
6422 # this is a workaround that would be fixed better by
6423 # improving the logical/physical id handling
6425 lu.cfg.SetDiskID(disk, instance.primary_node)
6427 return disks_ok, device_info
6430 def _StartInstanceDisks(lu, instance, force):
6431 """Start the disks of an instance.
6434 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
6435 ignore_secondaries=force)
6437 _ShutdownInstanceDisks(lu, instance)
6438 if force is not None and not force:
6439 lu.proc.LogWarning("", hint="If the message above refers to a"
6441 " you can retry the operation using '--force'.")
6442 raise errors.OpExecError("Disk consistency error")
6445 class LUInstanceDeactivateDisks(NoHooksLU):
6446 """Shutdown an instance's disks.
6451 def ExpandNames(self):
6452 self._ExpandAndLockInstance()
6453 self.needed_locks[locking.LEVEL_NODE] = []
6454 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6456 def DeclareLocks(self, level):
6457 if level == locking.LEVEL_NODE:
6458 self._LockInstancesNodes()
6460 def CheckPrereq(self):
6461 """Check prerequisites.
6463 This checks that the instance is in the cluster.
6466 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6467 assert self.instance is not None, \
6468 "Cannot retrieve locked instance %s" % self.op.instance_name
6470 def Exec(self, feedback_fn):
6471 """Deactivate the disks
6474 instance = self.instance
6476 _ShutdownInstanceDisks(self, instance)
6478 _SafeShutdownInstanceDisks(self, instance)
6481 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
6482 """Shutdown block devices of an instance.
6484 This function checks if an instance is running, before calling
6485 _ShutdownInstanceDisks.
6488 _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
6489 _ShutdownInstanceDisks(lu, instance, disks=disks)
6492 def _ExpandCheckDisks(instance, disks):
6493 """Return the instance disks selected by the disks list
6495 @type disks: list of L{objects.Disk} or None
6496 @param disks: selected disks
6497 @rtype: list of L{objects.Disk}
6498 @return: selected instance disks to act on
6502 return instance.disks
6504 if not set(disks).issubset(instance.disks):
6505 raise errors.ProgrammerError("Can only act on disks belonging to the"
6510 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
6511 """Shutdown block devices of an instance.
6513 This does the shutdown on all nodes of the instance.
6515 If the ignore_primary is false, errors on the primary node are
6520 disks = _ExpandCheckDisks(instance, disks)
6523 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
6524 lu.cfg.SetDiskID(top_disk, node)
6525 result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
6526 msg = result.fail_msg
6528 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
6529 disk.iv_name, node, msg)
6530 if ((node == instance.primary_node and not ignore_primary) or
6531 (node != instance.primary_node and not result.offline)):
6536 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
6537 """Checks if a node has enough free memory.
6539 This function check if a given node has the needed amount of free
6540 memory. In case the node has less memory or we cannot get the
6541 information from the node, this function raise an OpPrereqError
6544 @type lu: C{LogicalUnit}
6545 @param lu: a logical unit from which we get configuration data
6547 @param node: the node to check
6548 @type reason: C{str}
6549 @param reason: string to use in the error message
6550 @type requested: C{int}
6551 @param requested: the amount of memory in MiB to check for
6552 @type hypervisor_name: C{str}
6553 @param hypervisor_name: the hypervisor to ask for memory stats
6555 @return: node current free memory
6556 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
6557 we cannot check the node
6560 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6561 nodeinfo[node].Raise("Can't get data from node %s" % node,
6562 prereq=True, ecode=errors.ECODE_ENVIRON)
6563 (_, _, (hv_info, )) = nodeinfo[node].payload
6565 free_mem = hv_info.get("memory_free", None)
6566 if not isinstance(free_mem, int):
6567 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6568 " was '%s'" % (node, free_mem),
6569 errors.ECODE_ENVIRON)
6570 if requested > free_mem:
6571 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
6572 " needed %s MiB, available %s MiB" %
6573 (node, reason, requested, free_mem),
6578 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
6579 """Checks if nodes have enough free disk space in the all VGs.
6581 This function check if all given nodes have the needed amount of
6582 free disk. In case any node has less disk or we cannot get the
6583 information from the node, this function raise an OpPrereqError
6586 @type lu: C{LogicalUnit}
6587 @param lu: a logical unit from which we get configuration data
6588 @type nodenames: C{list}
6589 @param nodenames: the list of node names to check
6590 @type req_sizes: C{dict}
6591 @param req_sizes: the hash of vg and corresponding amount of disk in
6593 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6594 or we cannot check the node
6597 for vg, req_size in req_sizes.items():
6598 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
6601 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
6602 """Checks if nodes have enough free disk space in the specified VG.
6604 This function check if all given nodes have the needed amount of
6605 free disk. In case any node has less disk or we cannot get the
6606 information from the node, this function raise an OpPrereqError
6609 @type lu: C{LogicalUnit}
6610 @param lu: a logical unit from which we get configuration data
6611 @type nodenames: C{list}
6612 @param nodenames: the list of node names to check
6614 @param vg: the volume group to check
6615 @type requested: C{int}
6616 @param requested: the amount of disk in MiB to check for
6617 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6618 or we cannot check the node
6621 nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6622 for node in nodenames:
6623 info = nodeinfo[node]
6624 info.Raise("Cannot get current information from node %s" % node,
6625 prereq=True, ecode=errors.ECODE_ENVIRON)
6626 (_, (vg_info, ), _) = info.payload
6627 vg_free = vg_info.get("vg_free", None)
6628 if not isinstance(vg_free, int):
6629 raise errors.OpPrereqError("Can't compute free disk space on node"
6630 " %s for vg %s, result was '%s'" %
6631 (node, vg, vg_free), errors.ECODE_ENVIRON)
6632 if requested > vg_free:
6633 raise errors.OpPrereqError("Not enough disk space on target node %s"
6634 " vg %s: required %d MiB, available %d MiB" %
6635 (node, vg, requested, vg_free),
6639 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
6640 """Checks if nodes have enough physical CPUs
6642 This function checks if all given nodes have the needed number of
6643 physical CPUs. In case any node has less CPUs or we cannot get the
6644 information from the node, this function raises an OpPrereqError
6647 @type lu: C{LogicalUnit}
6648 @param lu: a logical unit from which we get configuration data
6649 @type nodenames: C{list}
6650 @param nodenames: the list of node names to check
6651 @type requested: C{int}
6652 @param requested: the minimum acceptable number of physical CPUs
6653 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
6654 or we cannot check the node
6657 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6658 for node in nodenames:
6659 info = nodeinfo[node]
6660 info.Raise("Cannot get current information from node %s" % node,
6661 prereq=True, ecode=errors.ECODE_ENVIRON)
6662 (_, _, (hv_info, )) = info.payload
6663 num_cpus = hv_info.get("cpu_total", None)
6664 if not isinstance(num_cpus, int):
6665 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6666 " on node %s, result was '%s'" %
6667 (node, num_cpus), errors.ECODE_ENVIRON)
6668 if requested > num_cpus:
6669 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
6670 "required" % (node, num_cpus, requested),
6674 class LUInstanceStartup(LogicalUnit):
6675 """Starts an instance.
6678 HPATH = "instance-start"
6679 HTYPE = constants.HTYPE_INSTANCE
6682 def CheckArguments(self):
6684 if self.op.beparams:
6685 # fill the beparams dict
6686 objects.UpgradeBeParams(self.op.beparams)
6687 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6689 def ExpandNames(self):
6690 self._ExpandAndLockInstance()
6691 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
6693 def DeclareLocks(self, level):
6694 if level == locking.LEVEL_NODE_RES:
6695 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
6697 def BuildHooksEnv(self):
6700 This runs on master, primary and secondary nodes of the instance.
6704 "FORCE": self.op.force,
6707 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6711 def BuildHooksNodes(self):
6712 """Build hooks nodes.
6715 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6718 def CheckPrereq(self):
6719 """Check prerequisites.
6721 This checks that the instance is in the cluster.
6724 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6725 assert self.instance is not None, \
6726 "Cannot retrieve locked instance %s" % self.op.instance_name
6729 if self.op.hvparams:
6730 # check hypervisor parameter syntax (locally)
6731 cluster = self.cfg.GetClusterInfo()
6732 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6733 filled_hvp = cluster.FillHV(instance)
6734 filled_hvp.update(self.op.hvparams)
6735 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
6736 hv_type.CheckParameterSyntax(filled_hvp)
6737 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
6739 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6741 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
6743 if self.primary_offline and self.op.ignore_offline_nodes:
6744 self.proc.LogWarning("Ignoring offline primary node")
6746 if self.op.hvparams or self.op.beparams:
6747 self.proc.LogWarning("Overridden parameters are ignored")
6749 _CheckNodeOnline(self, instance.primary_node)
6751 bep = self.cfg.GetClusterInfo().FillBE(instance)
6752 bep.update(self.op.beparams)
6754 # check bridges existence
6755 _CheckInstanceBridgesExist(self, instance)
6757 remote_info = self.rpc.call_instance_info(instance.primary_node,
6759 instance.hypervisor)
6760 remote_info.Raise("Error checking node %s" % instance.primary_node,
6761 prereq=True, ecode=errors.ECODE_ENVIRON)
6762 if not remote_info.payload: # not running already
6763 _CheckNodeFreeMemory(self, instance.primary_node,
6764 "starting instance %s" % instance.name,
6765 bep[constants.BE_MINMEM], instance.hypervisor)
6767 def Exec(self, feedback_fn):
6768 """Start the instance.
6771 instance = self.instance
6772 force = self.op.force
6774 if not self.op.no_remember:
6775 self.cfg.MarkInstanceUp(instance.name)
6777 if self.primary_offline:
6778 assert self.op.ignore_offline_nodes
6779 self.proc.LogInfo("Primary node offline, marked instance as started")
6781 node_current = instance.primary_node
6783 _StartInstanceDisks(self, instance, force)
6786 self.rpc.call_instance_start(node_current,
6787 (instance, self.op.hvparams,
6789 self.op.startup_paused)
6790 msg = result.fail_msg
6792 _ShutdownInstanceDisks(self, instance)
6793 raise errors.OpExecError("Could not start instance: %s" % msg)
6796 class LUInstanceReboot(LogicalUnit):
6797 """Reboot an instance.
6800 HPATH = "instance-reboot"
6801 HTYPE = constants.HTYPE_INSTANCE
6804 def ExpandNames(self):
6805 self._ExpandAndLockInstance()
6807 def BuildHooksEnv(self):
6810 This runs on master, primary and secondary nodes of the instance.
6814 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6815 "REBOOT_TYPE": self.op.reboot_type,
6816 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6819 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6823 def BuildHooksNodes(self):
6824 """Build hooks nodes.
6827 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6830 def CheckPrereq(self):
6831 """Check prerequisites.
6833 This checks that the instance is in the cluster.
6836 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6837 assert self.instance is not None, \
6838 "Cannot retrieve locked instance %s" % self.op.instance_name
6839 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6840 _CheckNodeOnline(self, instance.primary_node)
6842 # check bridges existence
6843 _CheckInstanceBridgesExist(self, instance)
6845 def Exec(self, feedback_fn):
6846 """Reboot the instance.
6849 instance = self.instance
6850 ignore_secondaries = self.op.ignore_secondaries
6851 reboot_type = self.op.reboot_type
6853 remote_info = self.rpc.call_instance_info(instance.primary_node,
6855 instance.hypervisor)
6856 remote_info.Raise("Error checking node %s" % instance.primary_node)
6857 instance_running = bool(remote_info.payload)
6859 node_current = instance.primary_node
6861 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6862 constants.INSTANCE_REBOOT_HARD]:
6863 for disk in instance.disks:
6864 self.cfg.SetDiskID(disk, node_current)
6865 result = self.rpc.call_instance_reboot(node_current, instance,
6867 self.op.shutdown_timeout)
6868 result.Raise("Could not reboot instance")
6870 if instance_running:
6871 result = self.rpc.call_instance_shutdown(node_current, instance,
6872 self.op.shutdown_timeout)
6873 result.Raise("Could not shutdown instance for full reboot")
6874 _ShutdownInstanceDisks(self, instance)
6876 self.LogInfo("Instance %s was already stopped, starting now",
6878 _StartInstanceDisks(self, instance, ignore_secondaries)
6879 result = self.rpc.call_instance_start(node_current,
6880 (instance, None, None), False)
6881 msg = result.fail_msg
6883 _ShutdownInstanceDisks(self, instance)
6884 raise errors.OpExecError("Could not start instance for"
6885 " full reboot: %s" % msg)
6887 self.cfg.MarkInstanceUp(instance.name)
6890 class LUInstanceShutdown(LogicalUnit):
6891 """Shutdown an instance.
6894 HPATH = "instance-stop"
6895 HTYPE = constants.HTYPE_INSTANCE
6898 def ExpandNames(self):
6899 self._ExpandAndLockInstance()
6901 def BuildHooksEnv(self):
6904 This runs on master, primary and secondary nodes of the instance.
6907 env = _BuildInstanceHookEnvByObject(self, self.instance)
6908 env["TIMEOUT"] = self.op.timeout
6911 def BuildHooksNodes(self):
6912 """Build hooks nodes.
6915 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6918 def CheckPrereq(self):
6919 """Check prerequisites.
6921 This checks that the instance is in the cluster.
6924 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6925 assert self.instance is not None, \
6926 "Cannot retrieve locked instance %s" % self.op.instance_name
6928 _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
6930 self.primary_offline = \
6931 self.cfg.GetNodeInfo(self.instance.primary_node).offline
6933 if self.primary_offline and self.op.ignore_offline_nodes:
6934 self.proc.LogWarning("Ignoring offline primary node")
6936 _CheckNodeOnline(self, self.instance.primary_node)
6938 def Exec(self, feedback_fn):
6939 """Shutdown the instance.
6942 instance = self.instance
6943 node_current = instance.primary_node
6944 timeout = self.op.timeout
6946 if not self.op.no_remember:
6947 self.cfg.MarkInstanceDown(instance.name)
6949 if self.primary_offline:
6950 assert self.op.ignore_offline_nodes
6951 self.proc.LogInfo("Primary node offline, marked instance as stopped")
6953 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6954 msg = result.fail_msg
6956 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6958 _ShutdownInstanceDisks(self, instance)
6961 class LUInstanceReinstall(LogicalUnit):
6962 """Reinstall an instance.
6965 HPATH = "instance-reinstall"
6966 HTYPE = constants.HTYPE_INSTANCE
6969 def ExpandNames(self):
6970 self._ExpandAndLockInstance()
6972 def BuildHooksEnv(self):
6975 This runs on master, primary and secondary nodes of the instance.
6978 return _BuildInstanceHookEnvByObject(self, self.instance)
6980 def BuildHooksNodes(self):
6981 """Build hooks nodes.
6984 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6987 def CheckPrereq(self):
6988 """Check prerequisites.
6990 This checks that the instance is in the cluster and is not running.
6993 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6994 assert instance is not None, \
6995 "Cannot retrieve locked instance %s" % self.op.instance_name
6996 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
6997 " offline, cannot reinstall")
6999 if instance.disk_template == constants.DT_DISKLESS:
7000 raise errors.OpPrereqError("Instance '%s' has no disks" %
7001 self.op.instance_name,
7003 _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
7005 if self.op.os_type is not None:
7007 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
7008 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
7009 instance_os = self.op.os_type
7011 instance_os = instance.os
7013 nodelist = list(instance.all_nodes)
7015 if self.op.osparams:
7016 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
7017 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
7018 self.os_inst = i_osdict # the new dict (without defaults)
7022 self.instance = instance
7024 def Exec(self, feedback_fn):
7025 """Reinstall the instance.
7028 inst = self.instance
7030 if self.op.os_type is not None:
7031 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
7032 inst.os = self.op.os_type
7033 # Write to configuration
7034 self.cfg.Update(inst, feedback_fn)
7036 _StartInstanceDisks(self, inst, None)
7038 feedback_fn("Running the instance OS create scripts...")
7039 # FIXME: pass debug option from opcode to backend
7040 result = self.rpc.call_instance_os_add(inst.primary_node,
7041 (inst, self.os_inst), True,
7042 self.op.debug_level)
7043 result.Raise("Could not install OS for instance %s on node %s" %
7044 (inst.name, inst.primary_node))
7046 _ShutdownInstanceDisks(self, inst)
7049 class LUInstanceRecreateDisks(LogicalUnit):
7050 """Recreate an instance's missing disks.
7053 HPATH = "instance-recreate-disks"
7054 HTYPE = constants.HTYPE_INSTANCE
7057 _MODIFYABLE = frozenset([
7058 constants.IDISK_SIZE,
7059 constants.IDISK_MODE,
7062 # New or changed disk parameters may have different semantics
7063 assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
7064 constants.IDISK_ADOPT,
7066 # TODO: Implement support changing VG while recreating
7068 constants.IDISK_METAVG,
7071 def CheckArguments(self):
7072 if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
7073 # Normalize and convert deprecated list of disk indices
7074 self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
7076 duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
7078 raise errors.OpPrereqError("Some disks have been specified more than"
7079 " once: %s" % utils.CommaJoin(duplicates),
7082 for (idx, params) in self.op.disks:
7083 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
7084 unsupported = frozenset(params.keys()) - self._MODIFYABLE
7086 raise errors.OpPrereqError("Parameters for disk %s try to change"
7087 " unmodifyable parameter(s): %s" %
7088 (idx, utils.CommaJoin(unsupported)),
7091 def ExpandNames(self):
7092 self._ExpandAndLockInstance()
7093 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7095 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
7096 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
7098 self.needed_locks[locking.LEVEL_NODE] = []
7099 self.needed_locks[locking.LEVEL_NODE_RES] = []
7101 def DeclareLocks(self, level):
7102 if level == locking.LEVEL_NODE:
7103 # if we replace the nodes, we only need to lock the old primary,
7104 # otherwise we need to lock all nodes for disk re-creation
7105 primary_only = bool(self.op.nodes)
7106 self._LockInstancesNodes(primary_only=primary_only)
7107 elif level == locking.LEVEL_NODE_RES:
7109 self.needed_locks[locking.LEVEL_NODE_RES] = \
7110 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7112 def BuildHooksEnv(self):
7115 This runs on master, primary and secondary nodes of the instance.
7118 return _BuildInstanceHookEnvByObject(self, self.instance)
7120 def BuildHooksNodes(self):
7121 """Build hooks nodes.
7124 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7127 def CheckPrereq(self):
7128 """Check prerequisites.
7130 This checks that the instance is in the cluster and is not running.
7133 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7134 assert instance is not None, \
7135 "Cannot retrieve locked instance %s" % self.op.instance_name
7137 if len(self.op.nodes) != len(instance.all_nodes):
7138 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
7139 " %d replacement nodes were specified" %
7140 (instance.name, len(instance.all_nodes),
7141 len(self.op.nodes)),
7143 assert instance.disk_template != constants.DT_DRBD8 or \
7144 len(self.op.nodes) == 2
7145 assert instance.disk_template != constants.DT_PLAIN or \
7146 len(self.op.nodes) == 1
7147 primary_node = self.op.nodes[0]
7149 primary_node = instance.primary_node
7150 _CheckNodeOnline(self, primary_node)
7152 if instance.disk_template == constants.DT_DISKLESS:
7153 raise errors.OpPrereqError("Instance '%s' has no disks" %
7154 self.op.instance_name, errors.ECODE_INVAL)
7156 # if we replace nodes *and* the old primary is offline, we don't
7158 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
7159 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE_RES)
7160 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
7161 if not (self.op.nodes and old_pnode.offline):
7162 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7163 msg="cannot recreate disks")
7166 self.disks = dict(self.op.disks)
7168 self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
7170 maxidx = max(self.disks.keys())
7171 if maxidx >= len(instance.disks):
7172 raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
7175 if (self.op.nodes and
7176 sorted(self.disks.keys()) != range(len(instance.disks))):
7177 raise errors.OpPrereqError("Can't recreate disks partially and"
7178 " change the nodes at the same time",
7181 self.instance = instance
7183 def Exec(self, feedback_fn):
7184 """Recreate the disks.
7187 instance = self.instance
7189 assert (self.owned_locks(locking.LEVEL_NODE) ==
7190 self.owned_locks(locking.LEVEL_NODE_RES))
7193 mods = [] # keeps track of needed changes
7195 for idx, disk in enumerate(instance.disks):
7197 changes = self.disks[idx]
7199 # Disk should not be recreated
7203 # update secondaries for disks, if needed
7204 if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
7205 # need to update the nodes and minors
7206 assert len(self.op.nodes) == 2
7207 assert len(disk.logical_id) == 6 # otherwise disk internals
7209 (_, _, old_port, _, _, old_secret) = disk.logical_id
7210 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
7211 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
7212 new_minors[0], new_minors[1], old_secret)
7213 assert len(disk.logical_id) == len(new_id)
7217 mods.append((idx, new_id, changes))
7219 # now that we have passed all asserts above, we can apply the mods
7220 # in a single run (to avoid partial changes)
7221 for idx, new_id, changes in mods:
7222 disk = instance.disks[idx]
7223 if new_id is not None:
7224 assert disk.dev_type == constants.LD_DRBD8
7225 disk.logical_id = new_id
7227 disk.Update(size=changes.get(constants.IDISK_SIZE, None),
7228 mode=changes.get(constants.IDISK_MODE, None))
7230 # change primary node, if needed
7232 instance.primary_node = self.op.nodes[0]
7233 self.LogWarning("Changing the instance's nodes, you will have to"
7234 " remove any disks left on the older nodes manually")
7237 self.cfg.Update(instance, feedback_fn)
7239 _CreateDisks(self, instance, to_skip=to_skip)
7242 class LUInstanceRename(LogicalUnit):
7243 """Rename an instance.
7246 HPATH = "instance-rename"
7247 HTYPE = constants.HTYPE_INSTANCE
7249 def CheckArguments(self):
7253 if self.op.ip_check and not self.op.name_check:
7254 # TODO: make the ip check more flexible and not depend on the name check
7255 raise errors.OpPrereqError("IP address check requires a name check",
7258 def BuildHooksEnv(self):
7261 This runs on master, primary and secondary nodes of the instance.
7264 env = _BuildInstanceHookEnvByObject(self, self.instance)
7265 env["INSTANCE_NEW_NAME"] = self.op.new_name
7268 def BuildHooksNodes(self):
7269 """Build hooks nodes.
7272 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7275 def CheckPrereq(self):
7276 """Check prerequisites.
7278 This checks that the instance is in the cluster and is not running.
7281 self.op.instance_name = _ExpandInstanceName(self.cfg,
7282 self.op.instance_name)
7283 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7284 assert instance is not None
7285 _CheckNodeOnline(self, instance.primary_node)
7286 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7287 msg="cannot rename")
7288 self.instance = instance
7290 new_name = self.op.new_name
7291 if self.op.name_check:
7292 hostname = netutils.GetHostname(name=new_name)
7293 if hostname.name != new_name:
7294 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
7296 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
7297 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
7298 " same as given hostname '%s'") %
7299 (hostname.name, self.op.new_name),
7301 new_name = self.op.new_name = hostname.name
7302 if (self.op.ip_check and
7303 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
7304 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7305 (hostname.ip, new_name),
7306 errors.ECODE_NOTUNIQUE)
7308 instance_list = self.cfg.GetInstanceList()
7309 if new_name in instance_list and new_name != instance.name:
7310 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7311 new_name, errors.ECODE_EXISTS)
7313 def Exec(self, feedback_fn):
7314 """Rename the instance.
7317 inst = self.instance
7318 old_name = inst.name
7320 rename_file_storage = False
7321 if (inst.disk_template in constants.DTS_FILEBASED and
7322 self.op.new_name != inst.name):
7323 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7324 rename_file_storage = True
7326 self.cfg.RenameInstance(inst.name, self.op.new_name)
7327 # Change the instance lock. This is definitely safe while we hold the BGL.
7328 # Otherwise the new lock would have to be added in acquired mode.
7330 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
7331 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
7333 # re-read the instance from the configuration after rename
7334 inst = self.cfg.GetInstanceInfo(self.op.new_name)
7336 if rename_file_storage:
7337 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7338 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
7339 old_file_storage_dir,
7340 new_file_storage_dir)
7341 result.Raise("Could not rename on node %s directory '%s' to '%s'"
7342 " (but the instance has been renamed in Ganeti)" %
7343 (inst.primary_node, old_file_storage_dir,
7344 new_file_storage_dir))
7346 _StartInstanceDisks(self, inst, None)
7348 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
7349 old_name, self.op.debug_level)
7350 msg = result.fail_msg
7352 msg = ("Could not run OS rename script for instance %s on node %s"
7353 " (but the instance has been renamed in Ganeti): %s" %
7354 (inst.name, inst.primary_node, msg))
7355 self.proc.LogWarning(msg)
7357 _ShutdownInstanceDisks(self, inst)
7362 class LUInstanceRemove(LogicalUnit):
7363 """Remove an instance.
7366 HPATH = "instance-remove"
7367 HTYPE = constants.HTYPE_INSTANCE
7370 def ExpandNames(self):
7371 self._ExpandAndLockInstance()
7372 self.needed_locks[locking.LEVEL_NODE] = []
7373 self.needed_locks[locking.LEVEL_NODE_RES] = []
7374 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7376 def DeclareLocks(self, level):
7377 if level == locking.LEVEL_NODE:
7378 self._LockInstancesNodes()
7379 elif level == locking.LEVEL_NODE_RES:
7381 self.needed_locks[locking.LEVEL_NODE_RES] = \
7382 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7384 def BuildHooksEnv(self):
7387 This runs on master, primary and secondary nodes of the instance.
7390 env = _BuildInstanceHookEnvByObject(self, self.instance)
7391 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
7394 def BuildHooksNodes(self):
7395 """Build hooks nodes.
7398 nl = [self.cfg.GetMasterNode()]
7399 nl_post = list(self.instance.all_nodes) + nl
7400 return (nl, nl_post)
7402 def CheckPrereq(self):
7403 """Check prerequisites.
7405 This checks that the instance is in the cluster.
7408 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7409 assert self.instance is not None, \
7410 "Cannot retrieve locked instance %s" % self.op.instance_name
7412 def Exec(self, feedback_fn):
7413 """Remove the instance.
7416 instance = self.instance
7417 logging.info("Shutting down instance %s on node %s",
7418 instance.name, instance.primary_node)
7420 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
7421 self.op.shutdown_timeout)
7422 msg = result.fail_msg
7424 if self.op.ignore_failures:
7425 feedback_fn("Warning: can't shutdown instance: %s" % msg)
7427 raise errors.OpExecError("Could not shutdown instance %s on"
7429 (instance.name, instance.primary_node, msg))
7431 assert (self.owned_locks(locking.LEVEL_NODE) ==
7432 self.owned_locks(locking.LEVEL_NODE_RES))
7433 assert not (set(instance.all_nodes) -
7434 self.owned_locks(locking.LEVEL_NODE)), \
7435 "Not owning correct locks"
7437 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
7440 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
7441 """Utility function to remove an instance.
7444 logging.info("Removing block devices for instance %s", instance.name)
7446 if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
7447 if not ignore_failures:
7448 raise errors.OpExecError("Can't remove instance's disks")
7449 feedback_fn("Warning: can't remove instance's disks")
7451 logging.info("Removing instance %s out of cluster config", instance.name)
7453 lu.cfg.RemoveInstance(instance.name)
7455 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
7456 "Instance lock removal conflict"
7458 # Remove lock for the instance
7459 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
7462 class LUInstanceQuery(NoHooksLU):
7463 """Logical unit for querying instances.
7466 # pylint: disable=W0142
7469 def CheckArguments(self):
7470 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
7471 self.op.output_fields, self.op.use_locking)
7473 def ExpandNames(self):
7474 self.iq.ExpandNames(self)
7476 def DeclareLocks(self, level):
7477 self.iq.DeclareLocks(self, level)
7479 def Exec(self, feedback_fn):
7480 return self.iq.OldStyleQuery(self)
7483 class LUInstanceFailover(LogicalUnit):
7484 """Failover an instance.
7487 HPATH = "instance-failover"
7488 HTYPE = constants.HTYPE_INSTANCE
7491 def CheckArguments(self):
7492 """Check the arguments.
7495 self.iallocator = getattr(self.op, "iallocator", None)
7496 self.target_node = getattr(self.op, "target_node", None)
7498 def ExpandNames(self):
7499 self._ExpandAndLockInstance()
7501 if self.op.target_node is not None:
7502 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7504 self.needed_locks[locking.LEVEL_NODE] = []
7505 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7507 self.needed_locks[locking.LEVEL_NODE_RES] = []
7508 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
7510 ignore_consistency = self.op.ignore_consistency
7511 shutdown_timeout = self.op.shutdown_timeout
7512 self._migrater = TLMigrateInstance(self, self.op.instance_name,
7515 ignore_consistency=ignore_consistency,
7516 shutdown_timeout=shutdown_timeout,
7517 ignore_ipolicy=self.op.ignore_ipolicy)
7518 self.tasklets = [self._migrater]
7520 def DeclareLocks(self, level):
7521 if level == locking.LEVEL_NODE:
7522 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7523 if instance.disk_template in constants.DTS_EXT_MIRROR:
7524 if self.op.target_node is None:
7525 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7527 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7528 self.op.target_node]
7529 del self.recalculate_locks[locking.LEVEL_NODE]
7531 self._LockInstancesNodes()
7532 elif level == locking.LEVEL_NODE_RES:
7534 self.needed_locks[locking.LEVEL_NODE_RES] = \
7535 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7537 def BuildHooksEnv(self):
7540 This runs on master, primary and secondary nodes of the instance.
7543 instance = self._migrater.instance
7544 source_node = instance.primary_node
7545 target_node = self.op.target_node
7547 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
7548 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7549 "OLD_PRIMARY": source_node,
7550 "NEW_PRIMARY": target_node,
7553 if instance.disk_template in constants.DTS_INT_MIRROR:
7554 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
7555 env["NEW_SECONDARY"] = source_node
7557 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
7559 env.update(_BuildInstanceHookEnvByObject(self, instance))
7563 def BuildHooksNodes(self):
7564 """Build hooks nodes.
7567 instance = self._migrater.instance
7568 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7569 return (nl, nl + [instance.primary_node])
7572 class LUInstanceMigrate(LogicalUnit):
7573 """Migrate an instance.
7575 This is migration without shutting down, compared to the failover,
7576 which is done with shutdown.
7579 HPATH = "instance-migrate"
7580 HTYPE = constants.HTYPE_INSTANCE
7583 def ExpandNames(self):
7584 self._ExpandAndLockInstance()
7586 if self.op.target_node is not None:
7587 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7589 self.needed_locks[locking.LEVEL_NODE] = []
7590 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7592 self.needed_locks[locking.LEVEL_NODE] = []
7593 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7596 TLMigrateInstance(self, self.op.instance_name,
7597 cleanup=self.op.cleanup,
7599 fallback=self.op.allow_failover,
7600 allow_runtime_changes=self.op.allow_runtime_changes,
7601 ignore_ipolicy=self.op.ignore_ipolicy)
7602 self.tasklets = [self._migrater]
7604 def DeclareLocks(self, level):
7605 if level == locking.LEVEL_NODE:
7606 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7607 if instance.disk_template in constants.DTS_EXT_MIRROR:
7608 if self.op.target_node is None:
7609 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7611 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7612 self.op.target_node]
7613 del self.recalculate_locks[locking.LEVEL_NODE]
7615 self._LockInstancesNodes()
7616 elif level == locking.LEVEL_NODE_RES:
7618 self.needed_locks[locking.LEVEL_NODE_RES] = \
7619 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7621 def BuildHooksEnv(self):
7624 This runs on master, primary and secondary nodes of the instance.
7627 instance = self._migrater.instance
7628 source_node = instance.primary_node
7629 target_node = self.op.target_node
7630 env = _BuildInstanceHookEnvByObject(self, instance)
7632 "MIGRATE_LIVE": self._migrater.live,
7633 "MIGRATE_CLEANUP": self.op.cleanup,
7634 "OLD_PRIMARY": source_node,
7635 "NEW_PRIMARY": target_node,
7636 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7639 if instance.disk_template in constants.DTS_INT_MIRROR:
7640 env["OLD_SECONDARY"] = target_node
7641 env["NEW_SECONDARY"] = source_node
7643 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
7647 def BuildHooksNodes(self):
7648 """Build hooks nodes.
7651 instance = self._migrater.instance
7652 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7653 return (nl, nl + [instance.primary_node])
7656 class LUInstanceMove(LogicalUnit):
7657 """Move an instance by data-copying.
7660 HPATH = "instance-move"
7661 HTYPE = constants.HTYPE_INSTANCE
7664 def ExpandNames(self):
7665 self._ExpandAndLockInstance()
7666 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7667 self.op.target_node = target_node
7668 self.needed_locks[locking.LEVEL_NODE] = [target_node]
7669 self.needed_locks[locking.LEVEL_NODE_RES] = []
7670 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7672 def DeclareLocks(self, level):
7673 if level == locking.LEVEL_NODE:
7674 self._LockInstancesNodes(primary_only=True)
7675 elif level == locking.LEVEL_NODE_RES:
7677 self.needed_locks[locking.LEVEL_NODE_RES] = \
7678 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7680 def BuildHooksEnv(self):
7683 This runs on master, primary and secondary nodes of the instance.
7687 "TARGET_NODE": self.op.target_node,
7688 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7690 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7693 def BuildHooksNodes(self):
7694 """Build hooks nodes.
7698 self.cfg.GetMasterNode(),
7699 self.instance.primary_node,
7700 self.op.target_node,
7704 def CheckPrereq(self):
7705 """Check prerequisites.
7707 This checks that the instance is in the cluster.
7710 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7711 assert self.instance is not None, \
7712 "Cannot retrieve locked instance %s" % self.op.instance_name
7714 node = self.cfg.GetNodeInfo(self.op.target_node)
7715 assert node is not None, \
7716 "Cannot retrieve locked node %s" % self.op.target_node
7718 self.target_node = target_node = node.name
7720 if target_node == instance.primary_node:
7721 raise errors.OpPrereqError("Instance %s is already on the node %s" %
7722 (instance.name, target_node),
7725 bep = self.cfg.GetClusterInfo().FillBE(instance)
7727 for idx, dsk in enumerate(instance.disks):
7728 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
7729 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
7730 " cannot copy" % idx, errors.ECODE_STATE)
7732 _CheckNodeOnline(self, target_node)
7733 _CheckNodeNotDrained(self, target_node)
7734 _CheckNodeVmCapable(self, target_node)
7735 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
7736 self.cfg.GetNodeGroup(node.group))
7737 _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
7738 ignore=self.op.ignore_ipolicy)
7740 if instance.admin_state == constants.ADMINST_UP:
7741 # check memory requirements on the secondary node
7742 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7743 instance.name, bep[constants.BE_MAXMEM],
7744 instance.hypervisor)
7746 self.LogInfo("Not checking memory on the secondary node as"
7747 " instance will not be started")
7749 # check bridge existance
7750 _CheckInstanceBridgesExist(self, instance, node=target_node)
7752 def Exec(self, feedback_fn):
7753 """Move an instance.
7755 The move is done by shutting it down on its present node, copying
7756 the data over (slow) and starting it on the new node.
7759 instance = self.instance
7761 source_node = instance.primary_node
7762 target_node = self.target_node
7764 self.LogInfo("Shutting down instance %s on source node %s",
7765 instance.name, source_node)
7767 assert (self.owned_locks(locking.LEVEL_NODE) ==
7768 self.owned_locks(locking.LEVEL_NODE_RES))
7770 result = self.rpc.call_instance_shutdown(source_node, instance,
7771 self.op.shutdown_timeout)
7772 msg = result.fail_msg
7774 if self.op.ignore_consistency:
7775 self.proc.LogWarning("Could not shutdown instance %s on node %s."
7776 " Proceeding anyway. Please make sure node"
7777 " %s is down. Error details: %s",
7778 instance.name, source_node, source_node, msg)
7780 raise errors.OpExecError("Could not shutdown instance %s on"
7782 (instance.name, source_node, msg))
7784 # create the target disks
7786 _CreateDisks(self, instance, target_node=target_node)
7787 except errors.OpExecError:
7788 self.LogWarning("Device creation failed, reverting...")
7790 _RemoveDisks(self, instance, target_node=target_node)
7792 self.cfg.ReleaseDRBDMinors(instance.name)
7795 cluster_name = self.cfg.GetClusterInfo().cluster_name
7798 # activate, get path, copy the data over
7799 for idx, disk in enumerate(instance.disks):
7800 self.LogInfo("Copying data for disk %d", idx)
7801 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
7802 instance.name, True, idx)
7804 self.LogWarning("Can't assemble newly created disk %d: %s",
7805 idx, result.fail_msg)
7806 errs.append(result.fail_msg)
7808 dev_path = result.payload
7809 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
7810 target_node, dev_path,
7813 self.LogWarning("Can't copy data over for disk %d: %s",
7814 idx, result.fail_msg)
7815 errs.append(result.fail_msg)
7819 self.LogWarning("Some disks failed to copy, aborting")
7821 _RemoveDisks(self, instance, target_node=target_node)
7823 self.cfg.ReleaseDRBDMinors(instance.name)
7824 raise errors.OpExecError("Errors during disk copy: %s" %
7827 instance.primary_node = target_node
7828 self.cfg.Update(instance, feedback_fn)
7830 self.LogInfo("Removing the disks on the original node")
7831 _RemoveDisks(self, instance, target_node=source_node)
7833 # Only start the instance if it's marked as up
7834 if instance.admin_state == constants.ADMINST_UP:
7835 self.LogInfo("Starting instance %s on node %s",
7836 instance.name, target_node)
7838 disks_ok, _ = _AssembleInstanceDisks(self, instance,
7839 ignore_secondaries=True)
7841 _ShutdownInstanceDisks(self, instance)
7842 raise errors.OpExecError("Can't activate the instance's disks")
7844 result = self.rpc.call_instance_start(target_node,
7845 (instance, None, None), False)
7846 msg = result.fail_msg
7848 _ShutdownInstanceDisks(self, instance)
7849 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7850 (instance.name, target_node, msg))
7853 class LUNodeMigrate(LogicalUnit):
7854 """Migrate all instances from a node.
7857 HPATH = "node-migrate"
7858 HTYPE = constants.HTYPE_NODE
7861 def CheckArguments(self):
7864 def ExpandNames(self):
7865 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7867 self.share_locks = _ShareAll()
7868 self.needed_locks = {
7869 locking.LEVEL_NODE: [self.op.node_name],
7872 def BuildHooksEnv(self):
7875 This runs on the master, the primary and all the secondaries.
7879 "NODE_NAME": self.op.node_name,
7880 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7883 def BuildHooksNodes(self):
7884 """Build hooks nodes.
7887 nl = [self.cfg.GetMasterNode()]
7890 def CheckPrereq(self):
7893 def Exec(self, feedback_fn):
7894 # Prepare jobs for migration instances
7895 allow_runtime_changes = self.op.allow_runtime_changes
7897 [opcodes.OpInstanceMigrate(instance_name=inst.name,
7900 iallocator=self.op.iallocator,
7901 target_node=self.op.target_node,
7902 allow_runtime_changes=allow_runtime_changes,
7903 ignore_ipolicy=self.op.ignore_ipolicy)]
7904 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
7907 # TODO: Run iallocator in this opcode and pass correct placement options to
7908 # OpInstanceMigrate. Since other jobs can modify the cluster between
7909 # running the iallocator and the actual migration, a good consistency model
7910 # will have to be found.
7912 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
7913 frozenset([self.op.node_name]))
7915 return ResultWithJobs(jobs)
7918 class TLMigrateInstance(Tasklet):
7919 """Tasklet class for instance migration.
7922 @ivar live: whether the migration will be done live or non-live;
7923 this variable is initalized only after CheckPrereq has run
7924 @type cleanup: boolean
7925 @ivar cleanup: Wheater we cleanup from a failed migration
7926 @type iallocator: string
7927 @ivar iallocator: The iallocator used to determine target_node
7928 @type target_node: string
7929 @ivar target_node: If given, the target_node to reallocate the instance to
7930 @type failover: boolean
7931 @ivar failover: Whether operation results in failover or migration
7932 @type fallback: boolean
7933 @ivar fallback: Whether fallback to failover is allowed if migration not
7935 @type ignore_consistency: boolean
7936 @ivar ignore_consistency: Wheter we should ignore consistency between source
7938 @type shutdown_timeout: int
7939 @ivar shutdown_timeout: In case of failover timeout of the shutdown
7940 @type ignore_ipolicy: bool
7941 @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
7946 _MIGRATION_POLL_INTERVAL = 1 # seconds
7947 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7949 def __init__(self, lu, instance_name, cleanup=False,
7950 failover=False, fallback=False,
7951 ignore_consistency=False,
7952 allow_runtime_changes=True,
7953 shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
7954 ignore_ipolicy=False):
7955 """Initializes this class.
7958 Tasklet.__init__(self, lu)
7961 self.instance_name = instance_name
7962 self.cleanup = cleanup
7963 self.live = False # will be overridden later
7964 self.failover = failover
7965 self.fallback = fallback
7966 self.ignore_consistency = ignore_consistency
7967 self.shutdown_timeout = shutdown_timeout
7968 self.ignore_ipolicy = ignore_ipolicy
7969 self.allow_runtime_changes = allow_runtime_changes
7971 def CheckPrereq(self):
7972 """Check prerequisites.
7974 This checks that the instance is in the cluster.
7977 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7978 instance = self.cfg.GetInstanceInfo(instance_name)
7979 assert instance is not None
7980 self.instance = instance
7981 cluster = self.cfg.GetClusterInfo()
7983 if (not self.cleanup and
7984 not instance.admin_state == constants.ADMINST_UP and
7985 not self.failover and self.fallback):
7986 self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
7987 " switching to failover")
7988 self.failover = True
7990 if instance.disk_template not in constants.DTS_MIRRORED:
7995 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
7996 " %s" % (instance.disk_template, text),
7999 if instance.disk_template in constants.DTS_EXT_MIRROR:
8000 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
8002 if self.lu.op.iallocator:
8003 self._RunAllocator()
8005 # We set set self.target_node as it is required by
8007 self.target_node = self.lu.op.target_node
8009 # Check that the target node is correct in terms of instance policy
8010 nodeinfo = self.cfg.GetNodeInfo(self.target_node)
8011 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8012 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8013 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8014 ignore=self.ignore_ipolicy)
8016 # self.target_node is already populated, either directly or by the
8018 target_node = self.target_node
8019 if self.target_node == instance.primary_node:
8020 raise errors.OpPrereqError("Cannot migrate instance %s"
8021 " to its primary (%s)" %
8022 (instance.name, instance.primary_node))
8024 if len(self.lu.tasklets) == 1:
8025 # It is safe to release locks only when we're the only tasklet
8027 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
8028 keep=[instance.primary_node, self.target_node])
8031 secondary_nodes = instance.secondary_nodes
8032 if not secondary_nodes:
8033 raise errors.ConfigurationError("No secondary node but using"
8034 " %s disk template" %
8035 instance.disk_template)
8036 target_node = secondary_nodes[0]
8037 if self.lu.op.iallocator or (self.lu.op.target_node and
8038 self.lu.op.target_node != target_node):
8040 text = "failed over"
8043 raise errors.OpPrereqError("Instances with disk template %s cannot"
8044 " be %s to arbitrary nodes"
8045 " (neither an iallocator nor a target"
8046 " node can be passed)" %
8047 (instance.disk_template, text),
8049 nodeinfo = self.cfg.GetNodeInfo(target_node)
8050 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8051 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8052 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8053 ignore=self.ignore_ipolicy)
8055 i_be = cluster.FillBE(instance)
8057 # check memory requirements on the secondary node
8058 if (not self.cleanup and
8059 (not self.failover or instance.admin_state == constants.ADMINST_UP)):
8060 self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
8061 "migrating instance %s" %
8063 i_be[constants.BE_MINMEM],
8064 instance.hypervisor)
8066 self.lu.LogInfo("Not checking memory on the secondary node as"
8067 " instance will not be started")
8069 # check if failover must be forced instead of migration
8070 if (not self.cleanup and not self.failover and
8071 i_be[constants.BE_ALWAYS_FAILOVER]):
8072 self.lu.LogInfo("Instance configured to always failover; fallback"
8074 self.failover = True
8076 # check bridge existance
8077 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
8079 if not self.cleanup:
8080 _CheckNodeNotDrained(self.lu, target_node)
8081 if not self.failover:
8082 result = self.rpc.call_instance_migratable(instance.primary_node,
8084 if result.fail_msg and self.fallback:
8085 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
8087 self.failover = True
8089 result.Raise("Can't migrate, please use failover",
8090 prereq=True, ecode=errors.ECODE_STATE)
8092 assert not (self.failover and self.cleanup)
8094 if not self.failover:
8095 if self.lu.op.live is not None and self.lu.op.mode is not None:
8096 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
8097 " parameters are accepted",
8099 if self.lu.op.live is not None:
8101 self.lu.op.mode = constants.HT_MIGRATION_LIVE
8103 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
8104 # reset the 'live' parameter to None so that repeated
8105 # invocations of CheckPrereq do not raise an exception
8106 self.lu.op.live = None
8107 elif self.lu.op.mode is None:
8108 # read the default value from the hypervisor
8109 i_hv = cluster.FillHV(self.instance, skip_globals=False)
8110 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
8112 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
8114 # Failover is never live
8117 if not (self.failover or self.cleanup):
8118 remote_info = self.rpc.call_instance_info(instance.primary_node,
8120 instance.hypervisor)
8121 remote_info.Raise("Error checking instance on node %s" %
8122 instance.primary_node)
8123 instance_running = bool(remote_info.payload)
8124 if instance_running:
8125 self.current_mem = int(remote_info.payload["memory"])
8127 def _RunAllocator(self):
8128 """Run the allocator based on input opcode.
8131 # FIXME: add a self.ignore_ipolicy option
8132 ial = IAllocator(self.cfg, self.rpc,
8133 mode=constants.IALLOCATOR_MODE_RELOC,
8134 name=self.instance_name,
8135 relocate_from=[self.instance.primary_node],
8138 ial.Run(self.lu.op.iallocator)
8141 raise errors.OpPrereqError("Can't compute nodes using"
8142 " iallocator '%s': %s" %
8143 (self.lu.op.iallocator, ial.info),
8145 if len(ial.result) != ial.required_nodes:
8146 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8147 " of nodes (%s), required %s" %
8148 (self.lu.op.iallocator, len(ial.result),
8149 ial.required_nodes), errors.ECODE_FAULT)
8150 self.target_node = ial.result[0]
8151 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8152 self.instance_name, self.lu.op.iallocator,
8153 utils.CommaJoin(ial.result))
8155 def _WaitUntilSync(self):
8156 """Poll with custom rpc for disk sync.
8158 This uses our own step-based rpc call.
8161 self.feedback_fn("* wait until resync is done")
8165 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8167 (self.instance.disks,
8170 for node, nres in result.items():
8171 nres.Raise("Cannot resync disks on node %s" % node)
8172 node_done, node_percent = nres.payload
8173 all_done = all_done and node_done
8174 if node_percent is not None:
8175 min_percent = min(min_percent, node_percent)
8177 if min_percent < 100:
8178 self.feedback_fn(" - progress: %.1f%%" % min_percent)
8181 def _EnsureSecondary(self, node):
8182 """Demote a node to secondary.
8185 self.feedback_fn("* switching node %s to secondary mode" % node)
8187 for dev in self.instance.disks:
8188 self.cfg.SetDiskID(dev, node)
8190 result = self.rpc.call_blockdev_close(node, self.instance.name,
8191 self.instance.disks)
8192 result.Raise("Cannot change disk to secondary on node %s" % node)
8194 def _GoStandalone(self):
8195 """Disconnect from the network.
8198 self.feedback_fn("* changing into standalone mode")
8199 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
8200 self.instance.disks)
8201 for node, nres in result.items():
8202 nres.Raise("Cannot disconnect disks node %s" % node)
8204 def _GoReconnect(self, multimaster):
8205 """Reconnect to the network.
8211 msg = "single-master"
8212 self.feedback_fn("* changing disks into %s mode" % msg)
8213 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8214 (self.instance.disks, self.instance),
8215 self.instance.name, multimaster)
8216 for node, nres in result.items():
8217 nres.Raise("Cannot change disks config on node %s" % node)
8219 def _ExecCleanup(self):
8220 """Try to cleanup after a failed migration.
8222 The cleanup is done by:
8223 - check that the instance is running only on one node
8224 (and update the config if needed)
8225 - change disks on its secondary node to secondary
8226 - wait until disks are fully synchronized
8227 - disconnect from the network
8228 - change disks into single-master mode
8229 - wait again until disks are fully synchronized
8232 instance = self.instance
8233 target_node = self.target_node
8234 source_node = self.source_node
8236 # check running on only one node
8237 self.feedback_fn("* checking where the instance actually runs"
8238 " (if this hangs, the hypervisor might be in"
8240 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
8241 for node, result in ins_l.items():
8242 result.Raise("Can't contact node %s" % node)
8244 runningon_source = instance.name in ins_l[source_node].payload
8245 runningon_target = instance.name in ins_l[target_node].payload
8247 if runningon_source and runningon_target:
8248 raise errors.OpExecError("Instance seems to be running on two nodes,"
8249 " or the hypervisor is confused; you will have"
8250 " to ensure manually that it runs only on one"
8251 " and restart this operation")
8253 if not (runningon_source or runningon_target):
8254 raise errors.OpExecError("Instance does not seem to be running at all;"
8255 " in this case it's safer to repair by"
8256 " running 'gnt-instance stop' to ensure disk"
8257 " shutdown, and then restarting it")
8259 if runningon_target:
8260 # the migration has actually succeeded, we need to update the config
8261 self.feedback_fn("* instance running on secondary node (%s),"
8262 " updating config" % target_node)
8263 instance.primary_node = target_node
8264 self.cfg.Update(instance, self.feedback_fn)
8265 demoted_node = source_node
8267 self.feedback_fn("* instance confirmed to be running on its"
8268 " primary node (%s)" % source_node)
8269 demoted_node = target_node
8271 if instance.disk_template in constants.DTS_INT_MIRROR:
8272 self._EnsureSecondary(demoted_node)
8274 self._WaitUntilSync()
8275 except errors.OpExecError:
8276 # we ignore here errors, since if the device is standalone, it
8277 # won't be able to sync
8279 self._GoStandalone()
8280 self._GoReconnect(False)
8281 self._WaitUntilSync()
8283 self.feedback_fn("* done")
8285 def _RevertDiskStatus(self):
8286 """Try to revert the disk status after a failed migration.
8289 target_node = self.target_node
8290 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
8294 self._EnsureSecondary(target_node)
8295 self._GoStandalone()
8296 self._GoReconnect(False)
8297 self._WaitUntilSync()
8298 except errors.OpExecError, err:
8299 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
8300 " please try to recover the instance manually;"
8301 " error '%s'" % str(err))
8303 def _AbortMigration(self):
8304 """Call the hypervisor code to abort a started migration.
8307 instance = self.instance
8308 target_node = self.target_node
8309 source_node = self.source_node
8310 migration_info = self.migration_info
8312 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
8316 abort_msg = abort_result.fail_msg
8318 logging.error("Aborting migration failed on target node %s: %s",
8319 target_node, abort_msg)
8320 # Don't raise an exception here, as we stil have to try to revert the
8321 # disk status, even if this step failed.
8323 abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
8324 instance, False, self.live)
8325 abort_msg = abort_result.fail_msg
8327 logging.error("Aborting migration failed on source node %s: %s",
8328 source_node, abort_msg)
8330 def _ExecMigration(self):
8331 """Migrate an instance.
8333 The migrate is done by:
8334 - change the disks into dual-master mode
8335 - wait until disks are fully synchronized again
8336 - migrate the instance
8337 - change disks on the new secondary node (the old primary) to secondary
8338 - wait until disks are fully synchronized
8339 - change disks into single-master mode
8342 instance = self.instance
8343 target_node = self.target_node
8344 source_node = self.source_node
8346 # Check for hypervisor version mismatch and warn the user.
8347 nodeinfo = self.rpc.call_node_info([source_node, target_node],
8348 None, [self.instance.hypervisor])
8349 for ninfo in nodeinfo.values():
8350 ninfo.Raise("Unable to retrieve node information from node '%s'" %
8352 (_, _, (src_info, )) = nodeinfo[source_node].payload
8353 (_, _, (dst_info, )) = nodeinfo[target_node].payload
8355 if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
8356 (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
8357 src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
8358 dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
8359 if src_version != dst_version:
8360 self.feedback_fn("* warning: hypervisor version mismatch between"
8361 " source (%s) and target (%s) node" %
8362 (src_version, dst_version))
8364 self.feedback_fn("* checking disk consistency between source and target")
8365 for (idx, dev) in enumerate(instance.disks):
8366 if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8367 raise errors.OpExecError("Disk %s is degraded or not fully"
8368 " synchronized on target node,"
8369 " aborting migration" % idx)
8371 if self.current_mem > self.tgt_free_mem:
8372 if not self.allow_runtime_changes:
8373 raise errors.OpExecError("Memory ballooning not allowed and not enough"
8374 " free memory to fit instance %s on target"
8375 " node %s (have %dMB, need %dMB)" %
8376 (instance.name, target_node,
8377 self.tgt_free_mem, self.current_mem))
8378 self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
8379 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
8382 rpcres.Raise("Cannot modify instance runtime memory")
8384 # First get the migration information from the remote node
8385 result = self.rpc.call_migration_info(source_node, instance)
8386 msg = result.fail_msg
8388 log_err = ("Failed fetching source migration information from %s: %s" %
8390 logging.error(log_err)
8391 raise errors.OpExecError(log_err)
8393 self.migration_info = migration_info = result.payload
8395 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8396 # Then switch the disks to master/master mode
8397 self._EnsureSecondary(target_node)
8398 self._GoStandalone()
8399 self._GoReconnect(True)
8400 self._WaitUntilSync()
8402 self.feedback_fn("* preparing %s to accept the instance" % target_node)
8403 result = self.rpc.call_accept_instance(target_node,
8406 self.nodes_ip[target_node])
8408 msg = result.fail_msg
8410 logging.error("Instance pre-migration failed, trying to revert"
8411 " disk status: %s", msg)
8412 self.feedback_fn("Pre-migration failed, aborting")
8413 self._AbortMigration()
8414 self._RevertDiskStatus()
8415 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
8416 (instance.name, msg))
8418 self.feedback_fn("* migrating instance to %s" % target_node)
8419 result = self.rpc.call_instance_migrate(source_node, instance,
8420 self.nodes_ip[target_node],
8422 msg = result.fail_msg
8424 logging.error("Instance migration failed, trying to revert"
8425 " disk status: %s", msg)
8426 self.feedback_fn("Migration failed, aborting")
8427 self._AbortMigration()
8428 self._RevertDiskStatus()
8429 raise errors.OpExecError("Could not migrate instance %s: %s" %
8430 (instance.name, msg))
8432 self.feedback_fn("* starting memory transfer")
8433 last_feedback = time.time()
8435 result = self.rpc.call_instance_get_migration_status(source_node,
8437 msg = result.fail_msg
8438 ms = result.payload # MigrationStatus instance
8439 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
8440 logging.error("Instance migration failed, trying to revert"
8441 " disk status: %s", msg)
8442 self.feedback_fn("Migration failed, aborting")
8443 self._AbortMigration()
8444 self._RevertDiskStatus()
8445 raise errors.OpExecError("Could not migrate instance %s: %s" %
8446 (instance.name, msg))
8448 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
8449 self.feedback_fn("* memory transfer complete")
8452 if (utils.TimeoutExpired(last_feedback,
8453 self._MIGRATION_FEEDBACK_INTERVAL) and
8454 ms.transferred_ram is not None):
8455 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
8456 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
8457 last_feedback = time.time()
8459 time.sleep(self._MIGRATION_POLL_INTERVAL)
8461 result = self.rpc.call_instance_finalize_migration_src(source_node,
8465 msg = result.fail_msg
8467 logging.error("Instance migration succeeded, but finalization failed"
8468 " on the source node: %s", msg)
8469 raise errors.OpExecError("Could not finalize instance migration: %s" %
8472 instance.primary_node = target_node
8474 # distribute new instance config to the other nodes
8475 self.cfg.Update(instance, self.feedback_fn)
8477 result = self.rpc.call_instance_finalize_migration_dst(target_node,
8481 msg = result.fail_msg
8483 logging.error("Instance migration succeeded, but finalization failed"
8484 " on the target node: %s", msg)
8485 raise errors.OpExecError("Could not finalize instance migration: %s" %
8488 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8489 self._EnsureSecondary(source_node)
8490 self._WaitUntilSync()
8491 self._GoStandalone()
8492 self._GoReconnect(False)
8493 self._WaitUntilSync()
8495 # If the instance's disk template is `rbd' and there was a successful
8496 # migration, unmap the device from the source node.
8497 if self.instance.disk_template == constants.DT_RBD:
8498 disks = _ExpandCheckDisks(instance, instance.disks)
8499 self.feedback_fn("* unmapping instance's disks from %s" % source_node)
8501 result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
8502 msg = result.fail_msg
8504 logging.error("Migration was successful, but couldn't unmap the"
8505 " block device %s on source node %s: %s",
8506 disk.iv_name, source_node, msg)
8507 logging.error("You need to unmap the device %s manually on %s",
8508 disk.iv_name, source_node)
8510 self.feedback_fn("* done")
8512 def _ExecFailover(self):
8513 """Failover an instance.
8515 The failover is done by shutting it down on its present node and
8516 starting it on the secondary.
8519 instance = self.instance
8520 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
8522 source_node = instance.primary_node
8523 target_node = self.target_node
8525 if instance.admin_state == constants.ADMINST_UP:
8526 self.feedback_fn("* checking disk consistency between source and target")
8527 for (idx, dev) in enumerate(instance.disks):
8528 # for drbd, these are drbd over lvm
8529 if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8531 if primary_node.offline:
8532 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8534 (primary_node.name, idx, target_node))
8535 elif not self.ignore_consistency:
8536 raise errors.OpExecError("Disk %s is degraded on target node,"
8537 " aborting failover" % idx)
8539 self.feedback_fn("* not checking disk consistency as instance is not"
8542 self.feedback_fn("* shutting down instance on source node")
8543 logging.info("Shutting down instance %s on node %s",
8544 instance.name, source_node)
8546 result = self.rpc.call_instance_shutdown(source_node, instance,
8547 self.shutdown_timeout)
8548 msg = result.fail_msg
8550 if self.ignore_consistency or primary_node.offline:
8551 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
8552 " proceeding anyway; please make sure node"
8553 " %s is down; error details: %s",
8554 instance.name, source_node, source_node, msg)
8556 raise errors.OpExecError("Could not shutdown instance %s on"
8558 (instance.name, source_node, msg))
8560 self.feedback_fn("* deactivating the instance's disks on source node")
8561 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
8562 raise errors.OpExecError("Can't shut down the instance's disks")
8564 instance.primary_node = target_node
8565 # distribute new instance config to the other nodes
8566 self.cfg.Update(instance, self.feedback_fn)
8568 # Only start the instance if it's marked as up
8569 if instance.admin_state == constants.ADMINST_UP:
8570 self.feedback_fn("* activating the instance's disks on target node %s" %
8572 logging.info("Starting instance %s on node %s",
8573 instance.name, target_node)
8575 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
8576 ignore_secondaries=True)
8578 _ShutdownInstanceDisks(self.lu, instance)
8579 raise errors.OpExecError("Can't activate the instance's disks")
8581 self.feedback_fn("* starting the instance on the target node %s" %
8583 result = self.rpc.call_instance_start(target_node, (instance, None, None),
8585 msg = result.fail_msg
8587 _ShutdownInstanceDisks(self.lu, instance)
8588 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8589 (instance.name, target_node, msg))
8591 def Exec(self, feedback_fn):
8592 """Perform the migration.
8595 self.feedback_fn = feedback_fn
8596 self.source_node = self.instance.primary_node
8598 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
8599 if self.instance.disk_template in constants.DTS_INT_MIRROR:
8600 self.target_node = self.instance.secondary_nodes[0]
8601 # Otherwise self.target_node has been populated either
8602 # directly, or through an iallocator.
8604 self.all_nodes = [self.source_node, self.target_node]
8605 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
8606 in self.cfg.GetMultiNodeInfo(self.all_nodes))
8609 feedback_fn("Failover instance %s" % self.instance.name)
8610 self._ExecFailover()
8612 feedback_fn("Migrating instance %s" % self.instance.name)
8615 return self._ExecCleanup()
8617 return self._ExecMigration()
8620 def _CreateBlockDev(lu, node, instance, device, force_create, info,
8622 """Wrapper around L{_CreateBlockDevInner}.
8624 This method annotates the root device first.
8627 (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
8628 return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
8632 def _CreateBlockDevInner(lu, node, instance, device, force_create,
8634 """Create a tree of block devices on a given node.
8636 If this device type has to be created on secondaries, create it and
8639 If not, just recurse to children keeping the same 'force' value.
8641 @attention: The device has to be annotated already.
8643 @param lu: the lu on whose behalf we execute
8644 @param node: the node on which to create the device
8645 @type instance: L{objects.Instance}
8646 @param instance: the instance which owns the device
8647 @type device: L{objects.Disk}
8648 @param device: the device to create
8649 @type force_create: boolean
8650 @param force_create: whether to force creation of this device; this
8651 will be change to True whenever we find a device which has
8652 CreateOnSecondary() attribute
8653 @param info: the extra 'metadata' we should attach to the device
8654 (this will be represented as a LVM tag)
8655 @type force_open: boolean
8656 @param force_open: this parameter will be passes to the
8657 L{backend.BlockdevCreate} function where it specifies
8658 whether we run on primary or not, and it affects both
8659 the child assembly and the device own Open() execution
8662 if device.CreateOnSecondary():
8666 for child in device.children:
8667 _CreateBlockDevInner(lu, node, instance, child, force_create,
8670 if not force_create:
8673 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
8676 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
8677 """Create a single block device on a given node.
8679 This will not recurse over children of the device, so they must be
8682 @param lu: the lu on whose behalf we execute
8683 @param node: the node on which to create the device
8684 @type instance: L{objects.Instance}
8685 @param instance: the instance which owns the device
8686 @type device: L{objects.Disk}
8687 @param device: the device to create
8688 @param info: the extra 'metadata' we should attach to the device
8689 (this will be represented as a LVM tag)
8690 @type force_open: boolean
8691 @param force_open: this parameter will be passes to the
8692 L{backend.BlockdevCreate} function where it specifies
8693 whether we run on primary or not, and it affects both
8694 the child assembly and the device own Open() execution
8697 lu.cfg.SetDiskID(device, node)
8698 result = lu.rpc.call_blockdev_create(node, device, device.size,
8699 instance.name, force_open, info)
8700 result.Raise("Can't create block device %s on"
8701 " node %s for instance %s" % (device, node, instance.name))
8702 if device.physical_id is None:
8703 device.physical_id = result.payload
8706 def _GenerateUniqueNames(lu, exts):
8707 """Generate a suitable LV name.
8709 This will generate a logical volume name for the given instance.
8714 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
8715 results.append("%s%s" % (new_id, val))
8719 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
8720 iv_name, p_minor, s_minor):
8721 """Generate a drbd8 device complete with its children.
8724 assert len(vgnames) == len(names) == 2
8725 port = lu.cfg.AllocatePort()
8726 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
8728 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
8729 logical_id=(vgnames[0], names[0]),
8731 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
8732 logical_id=(vgnames[1], names[1]),
8734 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
8735 logical_id=(primary, secondary, port,
8738 children=[dev_data, dev_meta],
8739 iv_name=iv_name, params={})
8743 _DISK_TEMPLATE_NAME_PREFIX = {
8744 constants.DT_PLAIN: "",
8745 constants.DT_RBD: ".rbd",
8749 _DISK_TEMPLATE_DEVICE_TYPE = {
8750 constants.DT_PLAIN: constants.LD_LV,
8751 constants.DT_FILE: constants.LD_FILE,
8752 constants.DT_SHARED_FILE: constants.LD_FILE,
8753 constants.DT_BLOCK: constants.LD_BLOCKDEV,
8754 constants.DT_RBD: constants.LD_RBD,
8758 def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
8759 secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
8760 feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8761 _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8762 """Generate the entire disk layout for a given template type.
8765 #TODO: compute space requirements
8767 vgname = lu.cfg.GetVGName()
8768 disk_count = len(disk_info)
8771 if template_name == constants.DT_DISKLESS:
8773 elif template_name == constants.DT_DRBD8:
8774 if len(secondary_nodes) != 1:
8775 raise errors.ProgrammerError("Wrong template configuration")
8776 remote_node = secondary_nodes[0]
8777 minors = lu.cfg.AllocateDRBDMinor(
8778 [primary_node, remote_node] * len(disk_info), instance_name)
8780 (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
8782 drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
8785 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
8786 for i in range(disk_count)]):
8787 names.append(lv_prefix + "_data")
8788 names.append(lv_prefix + "_meta")
8789 for idx, disk in enumerate(disk_info):
8790 disk_index = idx + base_index
8791 data_vg = disk.get(constants.IDISK_VG, vgname)
8792 meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
8793 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
8794 disk[constants.IDISK_SIZE],
8796 names[idx * 2:idx * 2 + 2],
8797 "disk/%d" % disk_index,
8798 minors[idx * 2], minors[idx * 2 + 1])
8799 disk_dev.mode = disk[constants.IDISK_MODE]
8800 disks.append(disk_dev)
8803 raise errors.ProgrammerError("Wrong template configuration")
8805 if template_name == constants.DT_FILE:
8807 elif template_name == constants.DT_SHARED_FILE:
8808 _req_shr_file_storage()
8810 name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
8811 if name_prefix is None:
8814 names = _GenerateUniqueNames(lu, ["%s.disk%s" %
8815 (name_prefix, base_index + i)
8816 for i in range(disk_count)])
8818 if template_name == constants.DT_PLAIN:
8819 def logical_id_fn(idx, _, disk):
8820 vg = disk.get(constants.IDISK_VG, vgname)
8821 return (vg, names[idx])
8822 elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
8824 lambda _, disk_index, disk: (file_driver,
8825 "%s/disk%d" % (file_storage_dir,
8827 elif template_name == constants.DT_BLOCK:
8829 lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
8830 disk[constants.IDISK_ADOPT])
8831 elif template_name == constants.DT_RBD:
8832 logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
8834 raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
8836 dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
8838 for idx, disk in enumerate(disk_info):
8839 disk_index = idx + base_index
8840 size = disk[constants.IDISK_SIZE]
8841 feedback_fn("* disk %s, size %s" %
8842 (disk_index, utils.FormatUnit(size, "h")))
8843 disks.append(objects.Disk(dev_type=dev_type, size=size,
8844 logical_id=logical_id_fn(idx, disk_index, disk),
8845 iv_name="disk/%d" % disk_index,
8846 mode=disk[constants.IDISK_MODE],
8852 def _GetInstanceInfoText(instance):
8853 """Compute that text that should be added to the disk's metadata.
8856 return "originstname+%s" % instance.name
8859 def _CalcEta(time_taken, written, total_size):
8860 """Calculates the ETA based on size written and total size.
8862 @param time_taken: The time taken so far
8863 @param written: amount written so far
8864 @param total_size: The total size of data to be written
8865 @return: The remaining time in seconds
8868 avg_time = time_taken / float(written)
8869 return (total_size - written) * avg_time
8872 def _WipeDisks(lu, instance):
8873 """Wipes instance disks.
8875 @type lu: L{LogicalUnit}
8876 @param lu: the logical unit on whose behalf we execute
8877 @type instance: L{objects.Instance}
8878 @param instance: the instance whose disks we should create
8879 @return: the success of the wipe
8882 node = instance.primary_node
8884 for device in instance.disks:
8885 lu.cfg.SetDiskID(device, node)
8887 logging.info("Pause sync of instance %s disks", instance.name)
8888 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8889 (instance.disks, instance),
8891 result.Raise("Failed RPC to node %s for pausing the disk syncing" % node)
8893 for idx, success in enumerate(result.payload):
8895 logging.warn("pause-sync of instance %s for disks %d failed",
8899 for idx, device in enumerate(instance.disks):
8900 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
8901 # MAX_WIPE_CHUNK at max
8902 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
8903 constants.MIN_WIPE_CHUNK_PERCENT)
8904 # we _must_ make this an int, otherwise rounding errors will
8906 wipe_chunk_size = int(wipe_chunk_size)
8908 lu.LogInfo("* Wiping disk %d", idx)
8909 logging.info("Wiping disk %d for instance %s, node %s using"
8910 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
8915 start_time = time.time()
8917 while offset < size:
8918 wipe_size = min(wipe_chunk_size, size - offset)
8919 logging.debug("Wiping disk %d, offset %s, chunk %s",
8920 idx, offset, wipe_size)
8921 result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
8923 result.Raise("Could not wipe disk %d at offset %d for size %d" %
8924 (idx, offset, wipe_size))
8927 if now - last_output >= 60:
8928 eta = _CalcEta(now - start_time, offset, size)
8929 lu.LogInfo(" - done: %.1f%% ETA: %s" %
8930 (offset / float(size) * 100, utils.FormatSeconds(eta)))
8933 logging.info("Resume sync of instance %s disks", instance.name)
8935 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8936 (instance.disks, instance),
8940 lu.LogWarning("RPC call to %s for resuming disk syncing failed,"
8941 " please have a look at the status and troubleshoot"
8942 " the issue: %s", node, result.fail_msg)
8944 for idx, success in enumerate(result.payload):
8946 lu.LogWarning("Resume sync of disk %d failed, please have a"
8947 " look at the status and troubleshoot the issue", idx)
8948 logging.warn("resume-sync of instance %s for disks %d failed",
8952 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
8953 """Create all disks for an instance.
8955 This abstracts away some work from AddInstance.
8957 @type lu: L{LogicalUnit}
8958 @param lu: the logical unit on whose behalf we execute
8959 @type instance: L{objects.Instance}
8960 @param instance: the instance whose disks we should create
8962 @param to_skip: list of indices to skip
8963 @type target_node: string
8964 @param target_node: if passed, overrides the target node for creation
8966 @return: the success of the creation
8969 info = _GetInstanceInfoText(instance)
8970 if target_node is None:
8971 pnode = instance.primary_node
8972 all_nodes = instance.all_nodes
8977 if instance.disk_template in constants.DTS_FILEBASED:
8978 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8979 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
8981 result.Raise("Failed to create directory '%s' on"
8982 " node %s" % (file_storage_dir, pnode))
8984 # Note: this needs to be kept in sync with adding of disks in
8985 # LUInstanceSetParams
8986 for idx, device in enumerate(instance.disks):
8987 if to_skip and idx in to_skip:
8989 logging.info("Creating disk %s for instance '%s'", idx, instance.name)
8991 for node in all_nodes:
8992 f_create = node == pnode
8993 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
8996 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
8997 """Remove all disks for an instance.
8999 This abstracts away some work from `AddInstance()` and
9000 `RemoveInstance()`. Note that in case some of the devices couldn't
9001 be removed, the removal will continue with the other ones (compare
9002 with `_CreateDisks()`).
9004 @type lu: L{LogicalUnit}
9005 @param lu: the logical unit on whose behalf we execute
9006 @type instance: L{objects.Instance}
9007 @param instance: the instance whose disks we should remove
9008 @type target_node: string
9009 @param target_node: used to override the node on which to remove the disks
9011 @return: the success of the removal
9014 logging.info("Removing block devices for instance %s", instance.name)
9017 ports_to_release = set()
9018 anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
9019 for (idx, device) in enumerate(anno_disks):
9021 edata = [(target_node, device)]
9023 edata = device.ComputeNodeTree(instance.primary_node)
9024 for node, disk in edata:
9025 lu.cfg.SetDiskID(disk, node)
9026 result = lu.rpc.call_blockdev_remove(node, disk)
9028 lu.LogWarning("Could not remove disk %s on node %s,"
9029 " continuing anyway: %s", idx, node, result.fail_msg)
9030 if not (result.offline and node != instance.primary_node):
9033 # if this is a DRBD disk, return its port to the pool
9034 if device.dev_type in constants.LDS_DRBD:
9035 ports_to_release.add(device.logical_id[2])
9037 if all_result or ignore_failures:
9038 for port in ports_to_release:
9039 lu.cfg.AddTcpUdpPort(port)
9041 if instance.disk_template in constants.DTS_FILEBASED:
9042 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9046 tgt = instance.primary_node
9047 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
9049 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
9050 file_storage_dir, instance.primary_node, result.fail_msg)
9056 def _ComputeDiskSizePerVG(disk_template, disks):
9057 """Compute disk size requirements in the volume group
9060 def _compute(disks, payload):
9061 """Universal algorithm.
9066 vgs[disk[constants.IDISK_VG]] = \
9067 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
9071 # Required free disk space as a function of disk and swap space
9073 constants.DT_DISKLESS: {},
9074 constants.DT_PLAIN: _compute(disks, 0),
9075 # 128 MB are added for drbd metadata for each disk
9076 constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
9077 constants.DT_FILE: {},
9078 constants.DT_SHARED_FILE: {},
9081 if disk_template not in req_size_dict:
9082 raise errors.ProgrammerError("Disk template '%s' size requirement"
9083 " is unknown" % disk_template)
9085 return req_size_dict[disk_template]
9088 def _ComputeDiskSize(disk_template, disks):
9089 """Compute disk size requirements according to disk template
9092 # Required free disk space as a function of disk and swap space
9094 constants.DT_DISKLESS: None,
9095 constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
9096 # 128 MB are added for drbd metadata for each disk
9098 sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
9099 constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
9100 constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
9101 constants.DT_BLOCK: 0,
9102 constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
9105 if disk_template not in req_size_dict:
9106 raise errors.ProgrammerError("Disk template '%s' size requirement"
9107 " is unknown" % disk_template)
9109 return req_size_dict[disk_template]
9112 def _FilterVmNodes(lu, nodenames):
9113 """Filters out non-vm_capable nodes from a list.
9115 @type lu: L{LogicalUnit}
9116 @param lu: the logical unit for which we check
9117 @type nodenames: list
9118 @param nodenames: the list of nodes on which we should check
9120 @return: the list of vm-capable nodes
9123 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
9124 return [name for name in nodenames if name not in vm_nodes]
9127 def _CheckHVParams(lu, nodenames, hvname, hvparams):
9128 """Hypervisor parameter validation.
9130 This function abstract the hypervisor parameter validation to be
9131 used in both instance create and instance modify.
9133 @type lu: L{LogicalUnit}
9134 @param lu: the logical unit for which we check
9135 @type nodenames: list
9136 @param nodenames: the list of nodes on which we should check
9137 @type hvname: string
9138 @param hvname: the name of the hypervisor we should use
9139 @type hvparams: dict
9140 @param hvparams: the parameters which we need to check
9141 @raise errors.OpPrereqError: if the parameters are not valid
9144 nodenames = _FilterVmNodes(lu, nodenames)
9146 cluster = lu.cfg.GetClusterInfo()
9147 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
9149 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
9150 for node in nodenames:
9154 info.Raise("Hypervisor parameter validation failed on node %s" % node)
9157 def _CheckOSParams(lu, required, nodenames, osname, osparams):
9158 """OS parameters validation.
9160 @type lu: L{LogicalUnit}
9161 @param lu: the logical unit for which we check
9162 @type required: boolean
9163 @param required: whether the validation should fail if the OS is not
9165 @type nodenames: list
9166 @param nodenames: the list of nodes on which we should check
9167 @type osname: string
9168 @param osname: the name of the hypervisor we should use
9169 @type osparams: dict
9170 @param osparams: the parameters which we need to check
9171 @raise errors.OpPrereqError: if the parameters are not valid
9174 nodenames = _FilterVmNodes(lu, nodenames)
9175 result = lu.rpc.call_os_validate(nodenames, required, osname,
9176 [constants.OS_VALIDATE_PARAMETERS],
9178 for node, nres in result.items():
9179 # we don't check for offline cases since this should be run only
9180 # against the master node and/or an instance's nodes
9181 nres.Raise("OS Parameters validation failed on node %s" % node)
9182 if not nres.payload:
9183 lu.LogInfo("OS %s not found on node %s, validation skipped",
9187 class LUInstanceCreate(LogicalUnit):
9188 """Create an instance.
9191 HPATH = "instance-add"
9192 HTYPE = constants.HTYPE_INSTANCE
9195 def CheckArguments(self):
9199 # do not require name_check to ease forward/backward compatibility
9201 if self.op.no_install and self.op.start:
9202 self.LogInfo("No-installation mode selected, disabling startup")
9203 self.op.start = False
9204 # validate/normalize the instance name
9205 self.op.instance_name = \
9206 netutils.Hostname.GetNormalizedName(self.op.instance_name)
9208 if self.op.ip_check and not self.op.name_check:
9209 # TODO: make the ip check more flexible and not depend on the name check
9210 raise errors.OpPrereqError("Cannot do IP address check without a name"
9211 " check", errors.ECODE_INVAL)
9213 # check nics' parameter names
9214 for nic in self.op.nics:
9215 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
9217 # check disks. parameter names and consistent adopt/no-adopt strategy
9218 has_adopt = has_no_adopt = False
9219 for disk in self.op.disks:
9220 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
9221 if constants.IDISK_ADOPT in disk:
9225 if has_adopt and has_no_adopt:
9226 raise errors.OpPrereqError("Either all disks are adopted or none is",
9229 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
9230 raise errors.OpPrereqError("Disk adoption is not supported for the"
9231 " '%s' disk template" %
9232 self.op.disk_template,
9234 if self.op.iallocator is not None:
9235 raise errors.OpPrereqError("Disk adoption not allowed with an"
9236 " iallocator script", errors.ECODE_INVAL)
9237 if self.op.mode == constants.INSTANCE_IMPORT:
9238 raise errors.OpPrereqError("Disk adoption not allowed for"
9239 " instance import", errors.ECODE_INVAL)
9241 if self.op.disk_template in constants.DTS_MUST_ADOPT:
9242 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
9243 " but no 'adopt' parameter given" %
9244 self.op.disk_template,
9247 self.adopt_disks = has_adopt
9249 # instance name verification
9250 if self.op.name_check:
9251 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
9252 self.op.instance_name = self.hostname1.name
9253 # used in CheckPrereq for ip ping check
9254 self.check_ip = self.hostname1.ip
9256 self.check_ip = None
9258 # file storage checks
9259 if (self.op.file_driver and
9260 not self.op.file_driver in constants.FILE_DRIVER):
9261 raise errors.OpPrereqError("Invalid file driver name '%s'" %
9262 self.op.file_driver, errors.ECODE_INVAL)
9264 if self.op.disk_template == constants.DT_FILE:
9265 opcodes.RequireFileStorage()
9266 elif self.op.disk_template == constants.DT_SHARED_FILE:
9267 opcodes.RequireSharedFileStorage()
9269 ### Node/iallocator related checks
9270 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
9272 if self.op.pnode is not None:
9273 if self.op.disk_template in constants.DTS_INT_MIRROR:
9274 if self.op.snode is None:
9275 raise errors.OpPrereqError("The networked disk templates need"
9276 " a mirror node", errors.ECODE_INVAL)
9278 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
9280 self.op.snode = None
9282 self._cds = _GetClusterDomainSecret()
9284 if self.op.mode == constants.INSTANCE_IMPORT:
9285 # On import force_variant must be True, because if we forced it at
9286 # initial install, our only chance when importing it back is that it
9288 self.op.force_variant = True
9290 if self.op.no_install:
9291 self.LogInfo("No-installation mode has no effect during import")
9293 elif self.op.mode == constants.INSTANCE_CREATE:
9294 if self.op.os_type is None:
9295 raise errors.OpPrereqError("No guest OS specified",
9297 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
9298 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
9299 " installation" % self.op.os_type,
9301 if self.op.disk_template is None:
9302 raise errors.OpPrereqError("No disk template specified",
9305 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9306 # Check handshake to ensure both clusters have the same domain secret
9307 src_handshake = self.op.source_handshake
9308 if not src_handshake:
9309 raise errors.OpPrereqError("Missing source handshake",
9312 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
9315 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
9318 # Load and check source CA
9319 self.source_x509_ca_pem = self.op.source_x509_ca
9320 if not self.source_x509_ca_pem:
9321 raise errors.OpPrereqError("Missing source X509 CA",
9325 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
9327 except OpenSSL.crypto.Error, err:
9328 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
9329 (err, ), errors.ECODE_INVAL)
9331 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9332 if errcode is not None:
9333 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
9336 self.source_x509_ca = cert
9338 src_instance_name = self.op.source_instance_name
9339 if not src_instance_name:
9340 raise errors.OpPrereqError("Missing source instance name",
9343 self.source_instance_name = \
9344 netutils.GetHostname(name=src_instance_name).name
9347 raise errors.OpPrereqError("Invalid instance creation mode %r" %
9348 self.op.mode, errors.ECODE_INVAL)
9350 def ExpandNames(self):
9351 """ExpandNames for CreateInstance.
9353 Figure out the right locks for instance creation.
9356 self.needed_locks = {}
9358 instance_name = self.op.instance_name
9359 # this is just a preventive check, but someone might still add this
9360 # instance in the meantime, and creation will fail at lock-add time
9361 if instance_name in self.cfg.GetInstanceList():
9362 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
9363 instance_name, errors.ECODE_EXISTS)
9365 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
9367 if self.op.iallocator:
9368 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
9369 # specifying a group on instance creation and then selecting nodes from
9371 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9372 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
9374 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
9375 nodelist = [self.op.pnode]
9376 if self.op.snode is not None:
9377 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
9378 nodelist.append(self.op.snode)
9379 self.needed_locks[locking.LEVEL_NODE] = nodelist
9380 # Lock resources of instance's primary and secondary nodes (copy to
9381 # prevent accidential modification)
9382 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
9384 # in case of import lock the source node too
9385 if self.op.mode == constants.INSTANCE_IMPORT:
9386 src_node = self.op.src_node
9387 src_path = self.op.src_path
9389 if src_path is None:
9390 self.op.src_path = src_path = self.op.instance_name
9392 if src_node is None:
9393 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9394 self.op.src_node = None
9395 if os.path.isabs(src_path):
9396 raise errors.OpPrereqError("Importing an instance from a path"
9397 " requires a source node option",
9400 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
9401 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
9402 self.needed_locks[locking.LEVEL_NODE].append(src_node)
9403 if not os.path.isabs(src_path):
9404 self.op.src_path = src_path = \
9405 utils.PathJoin(constants.EXPORT_DIR, src_path)
9407 def _RunAllocator(self):
9408 """Run the allocator based on input opcode.
9411 nics = [n.ToDict() for n in self.nics]
9412 ial = IAllocator(self.cfg, self.rpc,
9413 mode=constants.IALLOCATOR_MODE_ALLOC,
9414 name=self.op.instance_name,
9415 disk_template=self.op.disk_template,
9418 vcpus=self.be_full[constants.BE_VCPUS],
9419 memory=self.be_full[constants.BE_MAXMEM],
9420 spindle_use=self.be_full[constants.BE_SPINDLE_USE],
9423 hypervisor=self.op.hypervisor,
9426 ial.Run(self.op.iallocator)
9429 raise errors.OpPrereqError("Can't compute nodes using"
9430 " iallocator '%s': %s" %
9431 (self.op.iallocator, ial.info),
9433 if len(ial.result) != ial.required_nodes:
9434 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9435 " of nodes (%s), required %s" %
9436 (self.op.iallocator, len(ial.result),
9437 ial.required_nodes), errors.ECODE_FAULT)
9438 self.op.pnode = ial.result[0]
9439 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
9440 self.op.instance_name, self.op.iallocator,
9441 utils.CommaJoin(ial.result))
9442 if ial.required_nodes == 2:
9443 self.op.snode = ial.result[1]
9445 def BuildHooksEnv(self):
9448 This runs on master, primary and secondary nodes of the instance.
9452 "ADD_MODE": self.op.mode,
9454 if self.op.mode == constants.INSTANCE_IMPORT:
9455 env["SRC_NODE"] = self.op.src_node
9456 env["SRC_PATH"] = self.op.src_path
9457 env["SRC_IMAGES"] = self.src_images
9459 env.update(_BuildInstanceHookEnv(
9460 name=self.op.instance_name,
9461 primary_node=self.op.pnode,
9462 secondary_nodes=self.secondaries,
9463 status=self.op.start,
9464 os_type=self.op.os_type,
9465 minmem=self.be_full[constants.BE_MINMEM],
9466 maxmem=self.be_full[constants.BE_MAXMEM],
9467 vcpus=self.be_full[constants.BE_VCPUS],
9468 nics=_NICListToTuple(self, self.nics),
9469 disk_template=self.op.disk_template,
9470 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
9471 for d in self.disks],
9474 hypervisor_name=self.op.hypervisor,
9480 def BuildHooksNodes(self):
9481 """Build hooks nodes.
9484 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
9487 def _ReadExportInfo(self):
9488 """Reads the export information from disk.
9490 It will override the opcode source node and path with the actual
9491 information, if these two were not specified before.
9493 @return: the export information
9496 assert self.op.mode == constants.INSTANCE_IMPORT
9498 src_node = self.op.src_node
9499 src_path = self.op.src_path
9501 if src_node is None:
9502 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
9503 exp_list = self.rpc.call_export_list(locked_nodes)
9505 for node in exp_list:
9506 if exp_list[node].fail_msg:
9508 if src_path in exp_list[node].payload:
9510 self.op.src_node = src_node = node
9511 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
9515 raise errors.OpPrereqError("No export found for relative path %s" %
9516 src_path, errors.ECODE_INVAL)
9518 _CheckNodeOnline(self, src_node)
9519 result = self.rpc.call_export_info(src_node, src_path)
9520 result.Raise("No export or invalid export found in dir %s" % src_path)
9522 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
9523 if not export_info.has_section(constants.INISECT_EXP):
9524 raise errors.ProgrammerError("Corrupted export config",
9525 errors.ECODE_ENVIRON)
9527 ei_version = export_info.get(constants.INISECT_EXP, "version")
9528 if (int(ei_version) != constants.EXPORT_VERSION):
9529 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
9530 (ei_version, constants.EXPORT_VERSION),
9531 errors.ECODE_ENVIRON)
9534 def _ReadExportParams(self, einfo):
9535 """Use export parameters as defaults.
9537 In case the opcode doesn't specify (as in override) some instance
9538 parameters, then try to use them from the export information, if
9542 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
9544 if self.op.disk_template is None:
9545 if einfo.has_option(constants.INISECT_INS, "disk_template"):
9546 self.op.disk_template = einfo.get(constants.INISECT_INS,
9548 if self.op.disk_template not in constants.DISK_TEMPLATES:
9549 raise errors.OpPrereqError("Disk template specified in configuration"
9550 " file is not one of the allowed values:"
9551 " %s" % " ".join(constants.DISK_TEMPLATES))
9553 raise errors.OpPrereqError("No disk template specified and the export"
9554 " is missing the disk_template information",
9557 if not self.op.disks:
9559 # TODO: import the disk iv_name too
9560 for idx in range(constants.MAX_DISKS):
9561 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
9562 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
9563 disks.append({constants.IDISK_SIZE: disk_sz})
9564 self.op.disks = disks
9565 if not disks and self.op.disk_template != constants.DT_DISKLESS:
9566 raise errors.OpPrereqError("No disk info specified and the export"
9567 " is missing the disk information",
9570 if not self.op.nics:
9572 for idx in range(constants.MAX_NICS):
9573 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
9575 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
9576 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
9583 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
9584 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
9586 if (self.op.hypervisor is None and
9587 einfo.has_option(constants.INISECT_INS, "hypervisor")):
9588 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
9590 if einfo.has_section(constants.INISECT_HYP):
9591 # use the export parameters but do not override the ones
9592 # specified by the user
9593 for name, value in einfo.items(constants.INISECT_HYP):
9594 if name not in self.op.hvparams:
9595 self.op.hvparams[name] = value
9597 if einfo.has_section(constants.INISECT_BEP):
9598 # use the parameters, without overriding
9599 for name, value in einfo.items(constants.INISECT_BEP):
9600 if name not in self.op.beparams:
9601 self.op.beparams[name] = value
9602 # Compatibility for the old "memory" be param
9603 if name == constants.BE_MEMORY:
9604 if constants.BE_MAXMEM not in self.op.beparams:
9605 self.op.beparams[constants.BE_MAXMEM] = value
9606 if constants.BE_MINMEM not in self.op.beparams:
9607 self.op.beparams[constants.BE_MINMEM] = value
9609 # try to read the parameters old style, from the main section
9610 for name in constants.BES_PARAMETERS:
9611 if (name not in self.op.beparams and
9612 einfo.has_option(constants.INISECT_INS, name)):
9613 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
9615 if einfo.has_section(constants.INISECT_OSP):
9616 # use the parameters, without overriding
9617 for name, value in einfo.items(constants.INISECT_OSP):
9618 if name not in self.op.osparams:
9619 self.op.osparams[name] = value
9621 def _RevertToDefaults(self, cluster):
9622 """Revert the instance parameters to the default values.
9626 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
9627 for name in self.op.hvparams.keys():
9628 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
9629 del self.op.hvparams[name]
9631 be_defs = cluster.SimpleFillBE({})
9632 for name in self.op.beparams.keys():
9633 if name in be_defs and be_defs[name] == self.op.beparams[name]:
9634 del self.op.beparams[name]
9636 nic_defs = cluster.SimpleFillNIC({})
9637 for nic in self.op.nics:
9638 for name in constants.NICS_PARAMETERS:
9639 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
9642 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
9643 for name in self.op.osparams.keys():
9644 if name in os_defs and os_defs[name] == self.op.osparams[name]:
9645 del self.op.osparams[name]
9647 def _CalculateFileStorageDir(self):
9648 """Calculate final instance file storage dir.
9651 # file storage dir calculation/check
9652 self.instance_file_storage_dir = None
9653 if self.op.disk_template in constants.DTS_FILEBASED:
9654 # build the full file storage dir path
9657 if self.op.disk_template == constants.DT_SHARED_FILE:
9658 get_fsd_fn = self.cfg.GetSharedFileStorageDir
9660 get_fsd_fn = self.cfg.GetFileStorageDir
9662 cfg_storagedir = get_fsd_fn()
9663 if not cfg_storagedir:
9664 raise errors.OpPrereqError("Cluster file storage dir not defined")
9665 joinargs.append(cfg_storagedir)
9667 if self.op.file_storage_dir is not None:
9668 joinargs.append(self.op.file_storage_dir)
9670 joinargs.append(self.op.instance_name)
9672 # pylint: disable=W0142
9673 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
9675 def CheckPrereq(self): # pylint: disable=R0914
9676 """Check prerequisites.
9679 self._CalculateFileStorageDir()
9681 if self.op.mode == constants.INSTANCE_IMPORT:
9682 export_info = self._ReadExportInfo()
9683 self._ReadExportParams(export_info)
9684 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
9686 self._old_instance_name = None
9688 if (not self.cfg.GetVGName() and
9689 self.op.disk_template not in constants.DTS_NOT_LVM):
9690 raise errors.OpPrereqError("Cluster does not support lvm-based"
9691 " instances", errors.ECODE_STATE)
9693 if (self.op.hypervisor is None or
9694 self.op.hypervisor == constants.VALUE_AUTO):
9695 self.op.hypervisor = self.cfg.GetHypervisorType()
9697 cluster = self.cfg.GetClusterInfo()
9698 enabled_hvs = cluster.enabled_hypervisors
9699 if self.op.hypervisor not in enabled_hvs:
9700 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
9701 " cluster (%s)" % (self.op.hypervisor,
9702 ",".join(enabled_hvs)),
9705 # Check tag validity
9706 for tag in self.op.tags:
9707 objects.TaggableObject.ValidateTag(tag)
9709 # check hypervisor parameter syntax (locally)
9710 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
9711 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
9713 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
9714 hv_type.CheckParameterSyntax(filled_hvp)
9715 self.hv_full = filled_hvp
9716 # check that we don't specify global parameters on an instance
9717 _CheckGlobalHvParams(self.op.hvparams)
9719 # fill and remember the beparams dict
9720 default_beparams = cluster.beparams[constants.PP_DEFAULT]
9721 for param, value in self.op.beparams.iteritems():
9722 if value == constants.VALUE_AUTO:
9723 self.op.beparams[param] = default_beparams[param]
9724 objects.UpgradeBeParams(self.op.beparams)
9725 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
9726 self.be_full = cluster.SimpleFillBE(self.op.beparams)
9728 # build os parameters
9729 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
9731 # now that hvp/bep are in final format, let's reset to defaults,
9733 if self.op.identify_defaults:
9734 self._RevertToDefaults(cluster)
9738 for idx, nic in enumerate(self.op.nics):
9739 nic_mode_req = nic.get(constants.INIC_MODE, None)
9740 nic_mode = nic_mode_req
9741 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
9742 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
9744 # in routed mode, for the first nic, the default ip is 'auto'
9745 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
9746 default_ip_mode = constants.VALUE_AUTO
9748 default_ip_mode = constants.VALUE_NONE
9750 # ip validity checks
9751 ip = nic.get(constants.INIC_IP, default_ip_mode)
9752 if ip is None or ip.lower() == constants.VALUE_NONE:
9754 elif ip.lower() == constants.VALUE_AUTO:
9755 if not self.op.name_check:
9756 raise errors.OpPrereqError("IP address set to auto but name checks"
9757 " have been skipped",
9759 nic_ip = self.hostname1.ip
9761 if not netutils.IPAddress.IsValid(ip):
9762 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
9766 # TODO: check the ip address for uniqueness
9767 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
9768 raise errors.OpPrereqError("Routed nic mode requires an ip address",
9771 # MAC address verification
9772 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
9773 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9774 mac = utils.NormalizeAndValidateMac(mac)
9777 self.cfg.ReserveMAC(mac, self.proc.GetECId())
9778 except errors.ReservationError:
9779 raise errors.OpPrereqError("MAC address %s already in use"
9780 " in cluster" % mac,
9781 errors.ECODE_NOTUNIQUE)
9783 # Build nic parameters
9784 link = nic.get(constants.INIC_LINK, None)
9785 if link == constants.VALUE_AUTO:
9786 link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
9789 nicparams[constants.NIC_MODE] = nic_mode
9791 nicparams[constants.NIC_LINK] = link
9793 check_params = cluster.SimpleFillNIC(nicparams)
9794 objects.NIC.CheckParameterSyntax(check_params)
9795 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
9797 # disk checks/pre-build
9798 default_vg = self.cfg.GetVGName()
9800 for disk in self.op.disks:
9801 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
9802 if mode not in constants.DISK_ACCESS_SET:
9803 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
9804 mode, errors.ECODE_INVAL)
9805 size = disk.get(constants.IDISK_SIZE, None)
9807 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
9810 except (TypeError, ValueError):
9811 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
9814 data_vg = disk.get(constants.IDISK_VG, default_vg)
9816 constants.IDISK_SIZE: size,
9817 constants.IDISK_MODE: mode,
9818 constants.IDISK_VG: data_vg,
9820 if constants.IDISK_METAVG in disk:
9821 new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
9822 if constants.IDISK_ADOPT in disk:
9823 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
9824 self.disks.append(new_disk)
9826 if self.op.mode == constants.INSTANCE_IMPORT:
9828 for idx in range(len(self.disks)):
9829 option = "disk%d_dump" % idx
9830 if export_info.has_option(constants.INISECT_INS, option):
9831 # FIXME: are the old os-es, disk sizes, etc. useful?
9832 export_name = export_info.get(constants.INISECT_INS, option)
9833 image = utils.PathJoin(self.op.src_path, export_name)
9834 disk_images.append(image)
9836 disk_images.append(False)
9838 self.src_images = disk_images
9840 if self.op.instance_name == self._old_instance_name:
9841 for idx, nic in enumerate(self.nics):
9842 if nic.mac == constants.VALUE_AUTO:
9843 nic_mac_ini = "nic%d_mac" % idx
9844 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
9846 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
9848 # ip ping checks (we use the same ip that was resolved in ExpandNames)
9849 if self.op.ip_check:
9850 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
9851 raise errors.OpPrereqError("IP %s of instance %s already in use" %
9852 (self.check_ip, self.op.instance_name),
9853 errors.ECODE_NOTUNIQUE)
9855 #### mac address generation
9856 # By generating here the mac address both the allocator and the hooks get
9857 # the real final mac address rather than the 'auto' or 'generate' value.
9858 # There is a race condition between the generation and the instance object
9859 # creation, which means that we know the mac is valid now, but we're not
9860 # sure it will be when we actually add the instance. If things go bad
9861 # adding the instance will abort because of a duplicate mac, and the
9862 # creation job will fail.
9863 for nic in self.nics:
9864 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9865 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
9869 if self.op.iallocator is not None:
9870 self._RunAllocator()
9872 # Release all unneeded node locks
9873 _ReleaseLocks(self, locking.LEVEL_NODE,
9874 keep=filter(None, [self.op.pnode, self.op.snode,
9876 _ReleaseLocks(self, locking.LEVEL_NODE_RES,
9877 keep=filter(None, [self.op.pnode, self.op.snode,
9880 #### node related checks
9882 # check primary node
9883 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
9884 assert self.pnode is not None, \
9885 "Cannot retrieve locked node %s" % self.op.pnode
9887 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
9888 pnode.name, errors.ECODE_STATE)
9890 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
9891 pnode.name, errors.ECODE_STATE)
9892 if not pnode.vm_capable:
9893 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
9894 " '%s'" % pnode.name, errors.ECODE_STATE)
9896 self.secondaries = []
9898 # mirror node verification
9899 if self.op.disk_template in constants.DTS_INT_MIRROR:
9900 if self.op.snode == pnode.name:
9901 raise errors.OpPrereqError("The secondary node cannot be the"
9902 " primary node", errors.ECODE_INVAL)
9903 _CheckNodeOnline(self, self.op.snode)
9904 _CheckNodeNotDrained(self, self.op.snode)
9905 _CheckNodeVmCapable(self, self.op.snode)
9906 self.secondaries.append(self.op.snode)
9908 snode = self.cfg.GetNodeInfo(self.op.snode)
9909 if pnode.group != snode.group:
9910 self.LogWarning("The primary and secondary nodes are in two"
9911 " different node groups; the disk parameters"
9912 " from the first disk's node group will be"
9915 nodenames = [pnode.name] + self.secondaries
9917 if not self.adopt_disks:
9918 if self.op.disk_template == constants.DT_RBD:
9919 # _CheckRADOSFreeSpace() is just a placeholder.
9920 # Any function that checks prerequisites can be placed here.
9921 # Check if there is enough space on the RADOS cluster.
9922 _CheckRADOSFreeSpace()
9924 # Check lv size requirements, if not adopting
9925 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
9926 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
9928 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
9929 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
9930 disk[constants.IDISK_ADOPT])
9931 for disk in self.disks])
9932 if len(all_lvs) != len(self.disks):
9933 raise errors.OpPrereqError("Duplicate volume names given for adoption",
9935 for lv_name in all_lvs:
9937 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
9938 # to ReserveLV uses the same syntax
9939 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
9940 except errors.ReservationError:
9941 raise errors.OpPrereqError("LV named %s used by another instance" %
9942 lv_name, errors.ECODE_NOTUNIQUE)
9944 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
9945 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
9947 node_lvs = self.rpc.call_lv_list([pnode.name],
9948 vg_names.payload.keys())[pnode.name]
9949 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
9950 node_lvs = node_lvs.payload
9952 delta = all_lvs.difference(node_lvs.keys())
9954 raise errors.OpPrereqError("Missing logical volume(s): %s" %
9955 utils.CommaJoin(delta),
9957 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
9959 raise errors.OpPrereqError("Online logical volumes found, cannot"
9960 " adopt: %s" % utils.CommaJoin(online_lvs),
9962 # update the size of disk based on what is found
9963 for dsk in self.disks:
9964 dsk[constants.IDISK_SIZE] = \
9965 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
9966 dsk[constants.IDISK_ADOPT])][0]))
9968 elif self.op.disk_template == constants.DT_BLOCK:
9969 # Normalize and de-duplicate device paths
9970 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
9971 for disk in self.disks])
9972 if len(all_disks) != len(self.disks):
9973 raise errors.OpPrereqError("Duplicate disk names given for adoption",
9975 baddisks = [d for d in all_disks
9976 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
9978 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
9979 " cannot be adopted" %
9980 (", ".join(baddisks),
9981 constants.ADOPTABLE_BLOCKDEV_ROOT),
9984 node_disks = self.rpc.call_bdev_sizes([pnode.name],
9985 list(all_disks))[pnode.name]
9986 node_disks.Raise("Cannot get block device information from node %s" %
9988 node_disks = node_disks.payload
9989 delta = all_disks.difference(node_disks.keys())
9991 raise errors.OpPrereqError("Missing block device(s): %s" %
9992 utils.CommaJoin(delta),
9994 for dsk in self.disks:
9995 dsk[constants.IDISK_SIZE] = \
9996 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
9998 # Verify instance specs
9999 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
10001 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
10002 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
10003 constants.ISPEC_DISK_COUNT: len(self.disks),
10004 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
10005 for disk in self.disks],
10006 constants.ISPEC_NIC_COUNT: len(self.nics),
10007 constants.ISPEC_SPINDLE_USE: spindle_use,
10010 group_info = self.cfg.GetNodeGroup(pnode.group)
10011 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
10012 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
10013 if not self.op.ignore_ipolicy and res:
10014 raise errors.OpPrereqError(("Instance allocation to group %s violates"
10015 " policy: %s") % (pnode.group,
10016 utils.CommaJoin(res)),
10017 errors.ECODE_INVAL)
10019 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
10021 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
10022 # check OS parameters (remotely)
10023 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
10025 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
10027 # memory check on primary node
10028 #TODO(dynmem): use MINMEM for checking
10030 _CheckNodeFreeMemory(self, self.pnode.name,
10031 "creating instance %s" % self.op.instance_name,
10032 self.be_full[constants.BE_MAXMEM],
10033 self.op.hypervisor)
10035 self.dry_run_result = list(nodenames)
10037 def Exec(self, feedback_fn):
10038 """Create and add the instance to the cluster.
10041 instance = self.op.instance_name
10042 pnode_name = self.pnode.name
10044 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
10045 self.owned_locks(locking.LEVEL_NODE)), \
10046 "Node locks differ from node resource locks"
10048 ht_kind = self.op.hypervisor
10049 if ht_kind in constants.HTS_REQ_PORT:
10050 network_port = self.cfg.AllocatePort()
10052 network_port = None
10054 # This is ugly but we got a chicken-egg problem here
10055 # We can only take the group disk parameters, as the instance
10056 # has no disks yet (we are generating them right here).
10057 node = self.cfg.GetNodeInfo(pnode_name)
10058 nodegroup = self.cfg.GetNodeGroup(node.group)
10059 disks = _GenerateDiskTemplate(self,
10060 self.op.disk_template,
10061 instance, pnode_name,
10064 self.instance_file_storage_dir,
10065 self.op.file_driver,
10068 self.cfg.GetGroupDiskParams(nodegroup))
10070 iobj = objects.Instance(name=instance, os=self.op.os_type,
10071 primary_node=pnode_name,
10072 nics=self.nics, disks=disks,
10073 disk_template=self.op.disk_template,
10074 admin_state=constants.ADMINST_DOWN,
10075 network_port=network_port,
10076 beparams=self.op.beparams,
10077 hvparams=self.op.hvparams,
10078 hypervisor=self.op.hypervisor,
10079 osparams=self.op.osparams,
10083 for tag in self.op.tags:
10086 if self.adopt_disks:
10087 if self.op.disk_template == constants.DT_PLAIN:
10088 # rename LVs to the newly-generated names; we need to construct
10089 # 'fake' LV disks with the old data, plus the new unique_id
10090 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
10092 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
10093 rename_to.append(t_dsk.logical_id)
10094 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
10095 self.cfg.SetDiskID(t_dsk, pnode_name)
10096 result = self.rpc.call_blockdev_rename(pnode_name,
10097 zip(tmp_disks, rename_to))
10098 result.Raise("Failed to rename adoped LVs")
10100 feedback_fn("* creating instance disks...")
10102 _CreateDisks(self, iobj)
10103 except errors.OpExecError:
10104 self.LogWarning("Device creation failed, reverting...")
10106 _RemoveDisks(self, iobj)
10108 self.cfg.ReleaseDRBDMinors(instance)
10111 feedback_fn("adding instance %s to cluster config" % instance)
10113 self.cfg.AddInstance(iobj, self.proc.GetECId())
10115 # Declare that we don't want to remove the instance lock anymore, as we've
10116 # added the instance to the config
10117 del self.remove_locks[locking.LEVEL_INSTANCE]
10119 if self.op.mode == constants.INSTANCE_IMPORT:
10120 # Release unused nodes
10121 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
10123 # Release all nodes
10124 _ReleaseLocks(self, locking.LEVEL_NODE)
10127 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
10128 feedback_fn("* wiping instance disks...")
10130 _WipeDisks(self, iobj)
10131 except errors.OpExecError, err:
10132 logging.exception("Wiping disks failed")
10133 self.LogWarning("Wiping instance disks failed (%s)", err)
10137 # Something is already wrong with the disks, don't do anything else
10139 elif self.op.wait_for_sync:
10140 disk_abort = not _WaitForSync(self, iobj)
10141 elif iobj.disk_template in constants.DTS_INT_MIRROR:
10142 # make sure the disks are not degraded (still sync-ing is ok)
10143 feedback_fn("* checking mirrors status")
10144 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
10149 _RemoveDisks(self, iobj)
10150 self.cfg.RemoveInstance(iobj.name)
10151 # Make sure the instance lock gets removed
10152 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
10153 raise errors.OpExecError("There are some degraded disks for"
10156 # Release all node resource locks
10157 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
10159 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
10160 # we need to set the disks ID to the primary node, since the
10161 # preceding code might or might have not done it, depending on
10162 # disk template and other options
10163 for disk in iobj.disks:
10164 self.cfg.SetDiskID(disk, pnode_name)
10165 if self.op.mode == constants.INSTANCE_CREATE:
10166 if not self.op.no_install:
10167 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
10168 not self.op.wait_for_sync)
10170 feedback_fn("* pausing disk sync to install instance OS")
10171 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10174 for idx, success in enumerate(result.payload):
10176 logging.warn("pause-sync of instance %s for disk %d failed",
10179 feedback_fn("* running the instance OS create scripts...")
10180 # FIXME: pass debug option from opcode to backend
10182 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
10183 self.op.debug_level)
10185 feedback_fn("* resuming disk sync")
10186 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10189 for idx, success in enumerate(result.payload):
10191 logging.warn("resume-sync of instance %s for disk %d failed",
10194 os_add_result.Raise("Could not add os for instance %s"
10195 " on node %s" % (instance, pnode_name))
10198 if self.op.mode == constants.INSTANCE_IMPORT:
10199 feedback_fn("* running the instance OS import scripts...")
10203 for idx, image in enumerate(self.src_images):
10207 # FIXME: pass debug option from opcode to backend
10208 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
10209 constants.IEIO_FILE, (image, ),
10210 constants.IEIO_SCRIPT,
10211 (iobj.disks[idx], idx),
10213 transfers.append(dt)
10216 masterd.instance.TransferInstanceData(self, feedback_fn,
10217 self.op.src_node, pnode_name,
10218 self.pnode.secondary_ip,
10220 if not compat.all(import_result):
10221 self.LogWarning("Some disks for instance %s on node %s were not"
10222 " imported successfully" % (instance, pnode_name))
10224 rename_from = self._old_instance_name
10226 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
10227 feedback_fn("* preparing remote import...")
10228 # The source cluster will stop the instance before attempting to make
10229 # a connection. In some cases stopping an instance can take a long
10230 # time, hence the shutdown timeout is added to the connection
10232 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
10233 self.op.source_shutdown_timeout)
10234 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10236 assert iobj.primary_node == self.pnode.name
10238 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
10239 self.source_x509_ca,
10240 self._cds, timeouts)
10241 if not compat.all(disk_results):
10242 # TODO: Should the instance still be started, even if some disks
10243 # failed to import (valid for local imports, too)?
10244 self.LogWarning("Some disks for instance %s on node %s were not"
10245 " imported successfully" % (instance, pnode_name))
10247 rename_from = self.source_instance_name
10250 # also checked in the prereq part
10251 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
10254 # Run rename script on newly imported instance
10255 assert iobj.name == instance
10256 feedback_fn("Running rename script for %s" % instance)
10257 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
10259 self.op.debug_level)
10260 if result.fail_msg:
10261 self.LogWarning("Failed to run rename script for %s on node"
10262 " %s: %s" % (instance, pnode_name, result.fail_msg))
10264 assert not self.owned_locks(locking.LEVEL_NODE_RES)
10267 iobj.admin_state = constants.ADMINST_UP
10268 self.cfg.Update(iobj, feedback_fn)
10269 logging.info("Starting instance %s on node %s", instance, pnode_name)
10270 feedback_fn("* starting instance...")
10271 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
10273 result.Raise("Could not start instance")
10275 return list(iobj.all_nodes)
10278 def _CheckRADOSFreeSpace():
10279 """Compute disk size requirements inside the RADOS cluster.
10282 # For the RADOS cluster we assume there is always enough space.
10286 class LUInstanceConsole(NoHooksLU):
10287 """Connect to an instance's console.
10289 This is somewhat special in that it returns the command line that
10290 you need to run on the master node in order to connect to the
10296 def ExpandNames(self):
10297 self.share_locks = _ShareAll()
10298 self._ExpandAndLockInstance()
10300 def CheckPrereq(self):
10301 """Check prerequisites.
10303 This checks that the instance is in the cluster.
10306 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10307 assert self.instance is not None, \
10308 "Cannot retrieve locked instance %s" % self.op.instance_name
10309 _CheckNodeOnline(self, self.instance.primary_node)
10311 def Exec(self, feedback_fn):
10312 """Connect to the console of an instance
10315 instance = self.instance
10316 node = instance.primary_node
10318 node_insts = self.rpc.call_instance_list([node],
10319 [instance.hypervisor])[node]
10320 node_insts.Raise("Can't get node information from %s" % node)
10322 if instance.name not in node_insts.payload:
10323 if instance.admin_state == constants.ADMINST_UP:
10324 state = constants.INSTST_ERRORDOWN
10325 elif instance.admin_state == constants.ADMINST_DOWN:
10326 state = constants.INSTST_ADMINDOWN
10328 state = constants.INSTST_ADMINOFFLINE
10329 raise errors.OpExecError("Instance %s is not running (state %s)" %
10330 (instance.name, state))
10332 logging.debug("Connecting to console of %s on %s", instance.name, node)
10334 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
10337 def _GetInstanceConsole(cluster, instance):
10338 """Returns console information for an instance.
10340 @type cluster: L{objects.Cluster}
10341 @type instance: L{objects.Instance}
10345 hyper = hypervisor.GetHypervisor(instance.hypervisor)
10346 # beparams and hvparams are passed separately, to avoid editing the
10347 # instance and then saving the defaults in the instance itself.
10348 hvparams = cluster.FillHV(instance)
10349 beparams = cluster.FillBE(instance)
10350 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
10352 assert console.instance == instance.name
10353 assert console.Validate()
10355 return console.ToDict()
10358 class LUInstanceReplaceDisks(LogicalUnit):
10359 """Replace the disks of an instance.
10362 HPATH = "mirrors-replace"
10363 HTYPE = constants.HTYPE_INSTANCE
10366 def CheckArguments(self):
10367 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
10368 self.op.iallocator)
10370 def ExpandNames(self):
10371 self._ExpandAndLockInstance()
10373 assert locking.LEVEL_NODE not in self.needed_locks
10374 assert locking.LEVEL_NODE_RES not in self.needed_locks
10375 assert locking.LEVEL_NODEGROUP not in self.needed_locks
10377 assert self.op.iallocator is None or self.op.remote_node is None, \
10378 "Conflicting options"
10380 if self.op.remote_node is not None:
10381 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10383 # Warning: do not remove the locking of the new secondary here
10384 # unless DRBD8.AddChildren is changed to work in parallel;
10385 # currently it doesn't since parallel invocations of
10386 # FindUnusedMinor will conflict
10387 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
10388 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
10390 self.needed_locks[locking.LEVEL_NODE] = []
10391 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10393 if self.op.iallocator is not None:
10394 # iallocator will select a new node in the same group
10395 self.needed_locks[locking.LEVEL_NODEGROUP] = []
10397 self.needed_locks[locking.LEVEL_NODE_RES] = []
10399 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
10400 self.op.iallocator, self.op.remote_node,
10401 self.op.disks, False, self.op.early_release,
10402 self.op.ignore_ipolicy)
10404 self.tasklets = [self.replacer]
10406 def DeclareLocks(self, level):
10407 if level == locking.LEVEL_NODEGROUP:
10408 assert self.op.remote_node is None
10409 assert self.op.iallocator is not None
10410 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
10412 self.share_locks[locking.LEVEL_NODEGROUP] = 1
10413 # Lock all groups used by instance optimistically; this requires going
10414 # via the node before it's locked, requiring verification later on
10415 self.needed_locks[locking.LEVEL_NODEGROUP] = \
10416 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
10418 elif level == locking.LEVEL_NODE:
10419 if self.op.iallocator is not None:
10420 assert self.op.remote_node is None
10421 assert not self.needed_locks[locking.LEVEL_NODE]
10423 # Lock member nodes of all locked groups
10424 self.needed_locks[locking.LEVEL_NODE] = [node_name
10425 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10426 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10428 self._LockInstancesNodes()
10429 elif level == locking.LEVEL_NODE_RES:
10431 self.needed_locks[locking.LEVEL_NODE_RES] = \
10432 self.needed_locks[locking.LEVEL_NODE]
10434 def BuildHooksEnv(self):
10435 """Build hooks env.
10437 This runs on the master, the primary and all the secondaries.
10440 instance = self.replacer.instance
10442 "MODE": self.op.mode,
10443 "NEW_SECONDARY": self.op.remote_node,
10444 "OLD_SECONDARY": instance.secondary_nodes[0],
10446 env.update(_BuildInstanceHookEnvByObject(self, instance))
10449 def BuildHooksNodes(self):
10450 """Build hooks nodes.
10453 instance = self.replacer.instance
10455 self.cfg.GetMasterNode(),
10456 instance.primary_node,
10458 if self.op.remote_node is not None:
10459 nl.append(self.op.remote_node)
10462 def CheckPrereq(self):
10463 """Check prerequisites.
10466 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
10467 self.op.iallocator is None)
10469 # Verify if node group locks are still correct
10470 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10472 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
10474 return LogicalUnit.CheckPrereq(self)
10477 class TLReplaceDisks(Tasklet):
10478 """Replaces disks for an instance.
10480 Note: Locking is not within the scope of this class.
10483 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
10484 disks, delay_iallocator, early_release, ignore_ipolicy):
10485 """Initializes this class.
10488 Tasklet.__init__(self, lu)
10491 self.instance_name = instance_name
10493 self.iallocator_name = iallocator_name
10494 self.remote_node = remote_node
10496 self.delay_iallocator = delay_iallocator
10497 self.early_release = early_release
10498 self.ignore_ipolicy = ignore_ipolicy
10501 self.instance = None
10502 self.new_node = None
10503 self.target_node = None
10504 self.other_node = None
10505 self.remote_node_info = None
10506 self.node_secondary_ip = None
10509 def CheckArguments(mode, remote_node, iallocator):
10510 """Helper function for users of this class.
10513 # check for valid parameter combination
10514 if mode == constants.REPLACE_DISK_CHG:
10515 if remote_node is None and iallocator is None:
10516 raise errors.OpPrereqError("When changing the secondary either an"
10517 " iallocator script must be used or the"
10518 " new node given", errors.ECODE_INVAL)
10520 if remote_node is not None and iallocator is not None:
10521 raise errors.OpPrereqError("Give either the iallocator or the new"
10522 " secondary, not both", errors.ECODE_INVAL)
10524 elif remote_node is not None or iallocator is not None:
10525 # Not replacing the secondary
10526 raise errors.OpPrereqError("The iallocator and new node options can"
10527 " only be used when changing the"
10528 " secondary node", errors.ECODE_INVAL)
10531 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
10532 """Compute a new secondary node using an IAllocator.
10535 ial = IAllocator(lu.cfg, lu.rpc,
10536 mode=constants.IALLOCATOR_MODE_RELOC,
10537 name=instance_name,
10538 relocate_from=list(relocate_from))
10540 ial.Run(iallocator_name)
10542 if not ial.success:
10543 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
10544 " %s" % (iallocator_name, ial.info),
10545 errors.ECODE_NORES)
10547 if len(ial.result) != ial.required_nodes:
10548 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
10549 " of nodes (%s), required %s" %
10551 len(ial.result), ial.required_nodes),
10552 errors.ECODE_FAULT)
10554 remote_node_name = ial.result[0]
10556 lu.LogInfo("Selected new secondary for instance '%s': %s",
10557 instance_name, remote_node_name)
10559 return remote_node_name
10561 def _FindFaultyDisks(self, node_name):
10562 """Wrapper for L{_FindFaultyInstanceDisks}.
10565 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
10568 def _CheckDisksActivated(self, instance):
10569 """Checks if the instance disks are activated.
10571 @param instance: The instance to check disks
10572 @return: True if they are activated, False otherwise
10575 nodes = instance.all_nodes
10577 for idx, dev in enumerate(instance.disks):
10579 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
10580 self.cfg.SetDiskID(dev, node)
10582 result = _BlockdevFind(self, node, dev, instance)
10586 elif result.fail_msg or not result.payload:
10591 def CheckPrereq(self):
10592 """Check prerequisites.
10594 This checks that the instance is in the cluster.
10597 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
10598 assert instance is not None, \
10599 "Cannot retrieve locked instance %s" % self.instance_name
10601 if instance.disk_template != constants.DT_DRBD8:
10602 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
10603 " instances", errors.ECODE_INVAL)
10605 if len(instance.secondary_nodes) != 1:
10606 raise errors.OpPrereqError("The instance has a strange layout,"
10607 " expected one secondary but found %d" %
10608 len(instance.secondary_nodes),
10609 errors.ECODE_FAULT)
10611 if not self.delay_iallocator:
10612 self._CheckPrereq2()
10614 def _CheckPrereq2(self):
10615 """Check prerequisites, second part.
10617 This function should always be part of CheckPrereq. It was separated and is
10618 now called from Exec because during node evacuation iallocator was only
10619 called with an unmodified cluster model, not taking planned changes into
10623 instance = self.instance
10624 secondary_node = instance.secondary_nodes[0]
10626 if self.iallocator_name is None:
10627 remote_node = self.remote_node
10629 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
10630 instance.name, instance.secondary_nodes)
10632 if remote_node is None:
10633 self.remote_node_info = None
10635 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
10636 "Remote node '%s' is not locked" % remote_node
10638 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
10639 assert self.remote_node_info is not None, \
10640 "Cannot retrieve locked node %s" % remote_node
10642 if remote_node == self.instance.primary_node:
10643 raise errors.OpPrereqError("The specified node is the primary node of"
10644 " the instance", errors.ECODE_INVAL)
10646 if remote_node == secondary_node:
10647 raise errors.OpPrereqError("The specified node is already the"
10648 " secondary node of the instance",
10649 errors.ECODE_INVAL)
10651 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
10652 constants.REPLACE_DISK_CHG):
10653 raise errors.OpPrereqError("Cannot specify disks to be replaced",
10654 errors.ECODE_INVAL)
10656 if self.mode == constants.REPLACE_DISK_AUTO:
10657 if not self._CheckDisksActivated(instance):
10658 raise errors.OpPrereqError("Please run activate-disks on instance %s"
10659 " first" % self.instance_name,
10660 errors.ECODE_STATE)
10661 faulty_primary = self._FindFaultyDisks(instance.primary_node)
10662 faulty_secondary = self._FindFaultyDisks(secondary_node)
10664 if faulty_primary and faulty_secondary:
10665 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
10666 " one node and can not be repaired"
10667 " automatically" % self.instance_name,
10668 errors.ECODE_STATE)
10671 self.disks = faulty_primary
10672 self.target_node = instance.primary_node
10673 self.other_node = secondary_node
10674 check_nodes = [self.target_node, self.other_node]
10675 elif faulty_secondary:
10676 self.disks = faulty_secondary
10677 self.target_node = secondary_node
10678 self.other_node = instance.primary_node
10679 check_nodes = [self.target_node, self.other_node]
10685 # Non-automatic modes
10686 if self.mode == constants.REPLACE_DISK_PRI:
10687 self.target_node = instance.primary_node
10688 self.other_node = secondary_node
10689 check_nodes = [self.target_node, self.other_node]
10691 elif self.mode == constants.REPLACE_DISK_SEC:
10692 self.target_node = secondary_node
10693 self.other_node = instance.primary_node
10694 check_nodes = [self.target_node, self.other_node]
10696 elif self.mode == constants.REPLACE_DISK_CHG:
10697 self.new_node = remote_node
10698 self.other_node = instance.primary_node
10699 self.target_node = secondary_node
10700 check_nodes = [self.new_node, self.other_node]
10702 _CheckNodeNotDrained(self.lu, remote_node)
10703 _CheckNodeVmCapable(self.lu, remote_node)
10705 old_node_info = self.cfg.GetNodeInfo(secondary_node)
10706 assert old_node_info is not None
10707 if old_node_info.offline and not self.early_release:
10708 # doesn't make sense to delay the release
10709 self.early_release = True
10710 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
10711 " early-release mode", secondary_node)
10714 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
10717 # If not specified all disks should be replaced
10719 self.disks = range(len(self.instance.disks))
10721 # TODO: This is ugly, but right now we can't distinguish between internal
10722 # submitted opcode and external one. We should fix that.
10723 if self.remote_node_info:
10724 # We change the node, lets verify it still meets instance policy
10725 new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
10726 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
10728 _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
10729 ignore=self.ignore_ipolicy)
10731 for node in check_nodes:
10732 _CheckNodeOnline(self.lu, node)
10734 touched_nodes = frozenset(node_name for node_name in [self.new_node,
10737 if node_name is not None)
10739 # Release unneeded node and node resource locks
10740 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
10741 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
10743 # Release any owned node group
10744 if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
10745 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
10747 # Check whether disks are valid
10748 for disk_idx in self.disks:
10749 instance.FindDisk(disk_idx)
10751 # Get secondary node IP addresses
10752 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
10753 in self.cfg.GetMultiNodeInfo(touched_nodes))
10755 def Exec(self, feedback_fn):
10756 """Execute disk replacement.
10758 This dispatches the disk replacement to the appropriate handler.
10761 if self.delay_iallocator:
10762 self._CheckPrereq2()
10765 # Verify owned locks before starting operation
10766 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
10767 assert set(owned_nodes) == set(self.node_secondary_ip), \
10768 ("Incorrect node locks, owning %s, expected %s" %
10769 (owned_nodes, self.node_secondary_ip.keys()))
10770 assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
10771 self.lu.owned_locks(locking.LEVEL_NODE_RES))
10773 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
10774 assert list(owned_instances) == [self.instance_name], \
10775 "Instance '%s' not locked" % self.instance_name
10777 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
10778 "Should not own any node group lock at this point"
10781 feedback_fn("No disks need replacement for instance '%s'" %
10782 self.instance.name)
10785 feedback_fn("Replacing disk(s) %s for instance '%s'" %
10786 (utils.CommaJoin(self.disks), self.instance.name))
10787 feedback_fn("Current primary node: %s", self.instance.primary_node)
10788 feedback_fn("Current seconary node: %s",
10789 utils.CommaJoin(self.instance.secondary_nodes))
10791 activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
10793 # Activate the instance disks if we're replacing them on a down instance
10795 _StartInstanceDisks(self.lu, self.instance, True)
10798 # Should we replace the secondary node?
10799 if self.new_node is not None:
10800 fn = self._ExecDrbd8Secondary
10802 fn = self._ExecDrbd8DiskOnly
10804 result = fn(feedback_fn)
10806 # Deactivate the instance disks if we're replacing them on a
10809 _SafeShutdownInstanceDisks(self.lu, self.instance)
10811 assert not self.lu.owned_locks(locking.LEVEL_NODE)
10814 # Verify owned locks
10815 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
10816 nodes = frozenset(self.node_secondary_ip)
10817 assert ((self.early_release and not owned_nodes) or
10818 (not self.early_release and not (set(owned_nodes) - nodes))), \
10819 ("Not owning the correct locks, early_release=%s, owned=%r,"
10820 " nodes=%r" % (self.early_release, owned_nodes, nodes))
10824 def _CheckVolumeGroup(self, nodes):
10825 self.lu.LogInfo("Checking volume groups")
10827 vgname = self.cfg.GetVGName()
10829 # Make sure volume group exists on all involved nodes
10830 results = self.rpc.call_vg_list(nodes)
10832 raise errors.OpExecError("Can't list volume groups on the nodes")
10835 res = results[node]
10836 res.Raise("Error checking node %s" % node)
10837 if vgname not in res.payload:
10838 raise errors.OpExecError("Volume group '%s' not found on node %s" %
10841 def _CheckDisksExistence(self, nodes):
10842 # Check disk existence
10843 for idx, dev in enumerate(self.instance.disks):
10844 if idx not in self.disks:
10848 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
10849 self.cfg.SetDiskID(dev, node)
10851 result = _BlockdevFind(self, node, dev, self.instance)
10853 msg = result.fail_msg
10854 if msg or not result.payload:
10856 msg = "disk not found"
10857 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
10860 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
10861 for idx, dev in enumerate(self.instance.disks):
10862 if idx not in self.disks:
10865 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
10868 if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
10869 on_primary, ldisk=ldisk):
10870 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
10871 " replace disks for instance %s" %
10872 (node_name, self.instance.name))
10874 def _CreateNewStorage(self, node_name):
10875 """Create new storage on the primary or secondary node.
10877 This is only used for same-node replaces, not for changing the
10878 secondary node, hence we don't want to modify the existing disk.
10883 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
10884 for idx, dev in enumerate(disks):
10885 if idx not in self.disks:
10888 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
10890 self.cfg.SetDiskID(dev, node_name)
10892 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
10893 names = _GenerateUniqueNames(self.lu, lv_names)
10895 (data_disk, meta_disk) = dev.children
10896 vg_data = data_disk.logical_id[0]
10897 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
10898 logical_id=(vg_data, names[0]),
10899 params=data_disk.params)
10900 vg_meta = meta_disk.logical_id[0]
10901 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
10902 logical_id=(vg_meta, names[1]),
10903 params=meta_disk.params)
10905 new_lvs = [lv_data, lv_meta]
10906 old_lvs = [child.Copy() for child in dev.children]
10907 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
10909 # we pass force_create=True to force the LVM creation
10910 for new_lv in new_lvs:
10911 _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
10912 _GetInstanceInfoText(self.instance), False)
10916 def _CheckDevices(self, node_name, iv_names):
10917 for name, (dev, _, _) in iv_names.iteritems():
10918 self.cfg.SetDiskID(dev, node_name)
10920 result = _BlockdevFind(self, node_name, dev, self.instance)
10922 msg = result.fail_msg
10923 if msg or not result.payload:
10925 msg = "disk not found"
10926 raise errors.OpExecError("Can't find DRBD device %s: %s" %
10929 if result.payload.is_degraded:
10930 raise errors.OpExecError("DRBD device %s is degraded!" % name)
10932 def _RemoveOldStorage(self, node_name, iv_names):
10933 for name, (_, old_lvs, _) in iv_names.iteritems():
10934 self.lu.LogInfo("Remove logical volumes for %s" % name)
10937 self.cfg.SetDiskID(lv, node_name)
10939 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
10941 self.lu.LogWarning("Can't remove old LV: %s" % msg,
10942 hint="remove unused LVs manually")
10944 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
10945 """Replace a disk on the primary or secondary for DRBD 8.
10947 The algorithm for replace is quite complicated:
10949 1. for each disk to be replaced:
10951 1. create new LVs on the target node with unique names
10952 1. detach old LVs from the drbd device
10953 1. rename old LVs to name_replaced.<time_t>
10954 1. rename new LVs to old LVs
10955 1. attach the new LVs (with the old names now) to the drbd device
10957 1. wait for sync across all devices
10959 1. for each modified disk:
10961 1. remove old LVs (which have the name name_replaces.<time_t>)
10963 Failures are not very well handled.
10968 # Step: check device activation
10969 self.lu.LogStep(1, steps_total, "Check device existence")
10970 self._CheckDisksExistence([self.other_node, self.target_node])
10971 self._CheckVolumeGroup([self.target_node, self.other_node])
10973 # Step: check other node consistency
10974 self.lu.LogStep(2, steps_total, "Check peer consistency")
10975 self._CheckDisksConsistency(self.other_node,
10976 self.other_node == self.instance.primary_node,
10979 # Step: create new storage
10980 self.lu.LogStep(3, steps_total, "Allocate new storage")
10981 iv_names = self._CreateNewStorage(self.target_node)
10983 # Step: for each lv, detach+rename*2+attach
10984 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
10985 for dev, old_lvs, new_lvs in iv_names.itervalues():
10986 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
10988 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
10990 result.Raise("Can't detach drbd from local storage on node"
10991 " %s for device %s" % (self.target_node, dev.iv_name))
10993 #cfg.Update(instance)
10995 # ok, we created the new LVs, so now we know we have the needed
10996 # storage; as such, we proceed on the target node to rename
10997 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
10998 # using the assumption that logical_id == physical_id (which in
10999 # turn is the unique_id on that node)
11001 # FIXME(iustin): use a better name for the replaced LVs
11002 temp_suffix = int(time.time())
11003 ren_fn = lambda d, suff: (d.physical_id[0],
11004 d.physical_id[1] + "_replaced-%s" % suff)
11006 # Build the rename list based on what LVs exist on the node
11007 rename_old_to_new = []
11008 for to_ren in old_lvs:
11009 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
11010 if not result.fail_msg and result.payload:
11012 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
11014 self.lu.LogInfo("Renaming the old LVs on the target node")
11015 result = self.rpc.call_blockdev_rename(self.target_node,
11017 result.Raise("Can't rename old LVs on node %s" % self.target_node)
11019 # Now we rename the new LVs to the old LVs
11020 self.lu.LogInfo("Renaming the new LVs on the target node")
11021 rename_new_to_old = [(new, old.physical_id)
11022 for old, new in zip(old_lvs, new_lvs)]
11023 result = self.rpc.call_blockdev_rename(self.target_node,
11025 result.Raise("Can't rename new LVs on node %s" % self.target_node)
11027 # Intermediate steps of in memory modifications
11028 for old, new in zip(old_lvs, new_lvs):
11029 new.logical_id = old.logical_id
11030 self.cfg.SetDiskID(new, self.target_node)
11032 # We need to modify old_lvs so that removal later removes the
11033 # right LVs, not the newly added ones; note that old_lvs is a
11035 for disk in old_lvs:
11036 disk.logical_id = ren_fn(disk, temp_suffix)
11037 self.cfg.SetDiskID(disk, self.target_node)
11039 # Now that the new lvs have the old name, we can add them to the device
11040 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
11041 result = self.rpc.call_blockdev_addchildren(self.target_node,
11042 (dev, self.instance), new_lvs)
11043 msg = result.fail_msg
11045 for new_lv in new_lvs:
11046 msg2 = self.rpc.call_blockdev_remove(self.target_node,
11049 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
11050 hint=("cleanup manually the unused logical"
11052 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
11054 cstep = itertools.count(5)
11056 if self.early_release:
11057 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11058 self._RemoveOldStorage(self.target_node, iv_names)
11059 # TODO: Check if releasing locks early still makes sense
11060 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11062 # Release all resource locks except those used by the instance
11063 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11064 keep=self.node_secondary_ip.keys())
11066 # Release all node locks while waiting for sync
11067 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11069 # TODO: Can the instance lock be downgraded here? Take the optional disk
11070 # shutdown in the caller into consideration.
11073 # This can fail as the old devices are degraded and _WaitForSync
11074 # does a combined result over all disks, so we don't check its return value
11075 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11076 _WaitForSync(self.lu, self.instance)
11078 # Check all devices manually
11079 self._CheckDevices(self.instance.primary_node, iv_names)
11081 # Step: remove old storage
11082 if not self.early_release:
11083 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11084 self._RemoveOldStorage(self.target_node, iv_names)
11086 def _ExecDrbd8Secondary(self, feedback_fn):
11087 """Replace the secondary node for DRBD 8.
11089 The algorithm for replace is quite complicated:
11090 - for all disks of the instance:
11091 - create new LVs on the new node with same names
11092 - shutdown the drbd device on the old secondary
11093 - disconnect the drbd network on the primary
11094 - create the drbd device on the new secondary
11095 - network attach the drbd on the primary, using an artifice:
11096 the drbd code for Attach() will connect to the network if it
11097 finds a device which is connected to the good local disks but
11098 not network enabled
11099 - wait for sync across all devices
11100 - remove all disks from the old secondary
11102 Failures are not very well handled.
11107 pnode = self.instance.primary_node
11109 # Step: check device activation
11110 self.lu.LogStep(1, steps_total, "Check device existence")
11111 self._CheckDisksExistence([self.instance.primary_node])
11112 self._CheckVolumeGroup([self.instance.primary_node])
11114 # Step: check other node consistency
11115 self.lu.LogStep(2, steps_total, "Check peer consistency")
11116 self._CheckDisksConsistency(self.instance.primary_node, True, True)
11118 # Step: create new storage
11119 self.lu.LogStep(3, steps_total, "Allocate new storage")
11120 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
11121 for idx, dev in enumerate(disks):
11122 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
11123 (self.new_node, idx))
11124 # we pass force_create=True to force LVM creation
11125 for new_lv in dev.children:
11126 _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
11127 True, _GetInstanceInfoText(self.instance), False)
11129 # Step 4: dbrd minors and drbd setups changes
11130 # after this, we must manually remove the drbd minors on both the
11131 # error and the success paths
11132 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11133 minors = self.cfg.AllocateDRBDMinor([self.new_node
11134 for dev in self.instance.disks],
11135 self.instance.name)
11136 logging.debug("Allocated minors %r", minors)
11139 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
11140 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
11141 (self.new_node, idx))
11142 # create new devices on new_node; note that we create two IDs:
11143 # one without port, so the drbd will be activated without
11144 # networking information on the new node at this stage, and one
11145 # with network, for the latter activation in step 4
11146 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
11147 if self.instance.primary_node == o_node1:
11150 assert self.instance.primary_node == o_node2, "Three-node instance?"
11153 new_alone_id = (self.instance.primary_node, self.new_node, None,
11154 p_minor, new_minor, o_secret)
11155 new_net_id = (self.instance.primary_node, self.new_node, o_port,
11156 p_minor, new_minor, o_secret)
11158 iv_names[idx] = (dev, dev.children, new_net_id)
11159 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
11161 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
11162 logical_id=new_alone_id,
11163 children=dev.children,
11166 (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
11169 _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
11171 _GetInstanceInfoText(self.instance), False)
11172 except errors.GenericError:
11173 self.cfg.ReleaseDRBDMinors(self.instance.name)
11176 # We have new devices, shutdown the drbd on the old secondary
11177 for idx, dev in enumerate(self.instance.disks):
11178 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
11179 self.cfg.SetDiskID(dev, self.target_node)
11180 msg = self.rpc.call_blockdev_shutdown(self.target_node,
11181 (dev, self.instance)).fail_msg
11183 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
11184 "node: %s" % (idx, msg),
11185 hint=("Please cleanup this device manually as"
11186 " soon as possible"))
11188 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
11189 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
11190 self.instance.disks)[pnode]
11192 msg = result.fail_msg
11194 # detaches didn't succeed (unlikely)
11195 self.cfg.ReleaseDRBDMinors(self.instance.name)
11196 raise errors.OpExecError("Can't detach the disks from the network on"
11197 " old node: %s" % (msg,))
11199 # if we managed to detach at least one, we update all the disks of
11200 # the instance to point to the new secondary
11201 self.lu.LogInfo("Updating instance configuration")
11202 for dev, _, new_logical_id in iv_names.itervalues():
11203 dev.logical_id = new_logical_id
11204 self.cfg.SetDiskID(dev, self.instance.primary_node)
11206 self.cfg.Update(self.instance, feedback_fn)
11208 # Release all node locks (the configuration has been updated)
11209 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11211 # and now perform the drbd attach
11212 self.lu.LogInfo("Attaching primary drbds to new secondary"
11213 " (standalone => connected)")
11214 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11216 self.node_secondary_ip,
11217 (self.instance.disks, self.instance),
11218 self.instance.name,
11220 for to_node, to_result in result.items():
11221 msg = to_result.fail_msg
11223 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
11225 hint=("please do a gnt-instance info to see the"
11226 " status of disks"))
11228 cstep = itertools.count(5)
11230 if self.early_release:
11231 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11232 self._RemoveOldStorage(self.target_node, iv_names)
11233 # TODO: Check if releasing locks early still makes sense
11234 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11236 # Release all resource locks except those used by the instance
11237 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11238 keep=self.node_secondary_ip.keys())
11240 # TODO: Can the instance lock be downgraded here? Take the optional disk
11241 # shutdown in the caller into consideration.
11244 # This can fail as the old devices are degraded and _WaitForSync
11245 # does a combined result over all disks, so we don't check its return value
11246 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11247 _WaitForSync(self.lu, self.instance)
11249 # Check all devices manually
11250 self._CheckDevices(self.instance.primary_node, iv_names)
11252 # Step: remove old storage
11253 if not self.early_release:
11254 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11255 self._RemoveOldStorage(self.target_node, iv_names)
11258 class LURepairNodeStorage(NoHooksLU):
11259 """Repairs the volume group on a node.
11264 def CheckArguments(self):
11265 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11267 storage_type = self.op.storage_type
11269 if (constants.SO_FIX_CONSISTENCY not in
11270 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
11271 raise errors.OpPrereqError("Storage units of type '%s' can not be"
11272 " repaired" % storage_type,
11273 errors.ECODE_INVAL)
11275 def ExpandNames(self):
11276 self.needed_locks = {
11277 locking.LEVEL_NODE: [self.op.node_name],
11280 def _CheckFaultyDisks(self, instance, node_name):
11281 """Ensure faulty disks abort the opcode or at least warn."""
11283 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
11285 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
11286 " node '%s'" % (instance.name, node_name),
11287 errors.ECODE_STATE)
11288 except errors.OpPrereqError, err:
11289 if self.op.ignore_consistency:
11290 self.proc.LogWarning(str(err.args[0]))
11294 def CheckPrereq(self):
11295 """Check prerequisites.
11298 # Check whether any instance on this node has faulty disks
11299 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
11300 if inst.admin_state != constants.ADMINST_UP:
11302 check_nodes = set(inst.all_nodes)
11303 check_nodes.discard(self.op.node_name)
11304 for inst_node_name in check_nodes:
11305 self._CheckFaultyDisks(inst, inst_node_name)
11307 def Exec(self, feedback_fn):
11308 feedback_fn("Repairing storage unit '%s' on %s ..." %
11309 (self.op.name, self.op.node_name))
11311 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
11312 result = self.rpc.call_storage_execute(self.op.node_name,
11313 self.op.storage_type, st_args,
11315 constants.SO_FIX_CONSISTENCY)
11316 result.Raise("Failed to repair storage unit '%s' on %s" %
11317 (self.op.name, self.op.node_name))
11320 class LUNodeEvacuate(NoHooksLU):
11321 """Evacuates instances off a list of nodes.
11326 _MODE2IALLOCATOR = {
11327 constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
11328 constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
11329 constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
11331 assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
11332 assert (frozenset(_MODE2IALLOCATOR.values()) ==
11333 constants.IALLOCATOR_NEVAC_MODES)
11335 def CheckArguments(self):
11336 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
11338 def ExpandNames(self):
11339 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11341 if self.op.remote_node is not None:
11342 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
11343 assert self.op.remote_node
11345 if self.op.remote_node == self.op.node_name:
11346 raise errors.OpPrereqError("Can not use evacuated node as a new"
11347 " secondary node", errors.ECODE_INVAL)
11349 if self.op.mode != constants.NODE_EVAC_SEC:
11350 raise errors.OpPrereqError("Without the use of an iallocator only"
11351 " secondary instances can be evacuated",
11352 errors.ECODE_INVAL)
11355 self.share_locks = _ShareAll()
11356 self.needed_locks = {
11357 locking.LEVEL_INSTANCE: [],
11358 locking.LEVEL_NODEGROUP: [],
11359 locking.LEVEL_NODE: [],
11362 # Determine nodes (via group) optimistically, needs verification once locks
11363 # have been acquired
11364 self.lock_nodes = self._DetermineNodes()
11366 def _DetermineNodes(self):
11367 """Gets the list of nodes to operate on.
11370 if self.op.remote_node is None:
11371 # Iallocator will choose any node(s) in the same group
11372 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
11374 group_nodes = frozenset([self.op.remote_node])
11376 # Determine nodes to be locked
11377 return set([self.op.node_name]) | group_nodes
11379 def _DetermineInstances(self):
11380 """Builds list of instances to operate on.
11383 assert self.op.mode in constants.NODE_EVAC_MODES
11385 if self.op.mode == constants.NODE_EVAC_PRI:
11386 # Primary instances only
11387 inst_fn = _GetNodePrimaryInstances
11388 assert self.op.remote_node is None, \
11389 "Evacuating primary instances requires iallocator"
11390 elif self.op.mode == constants.NODE_EVAC_SEC:
11391 # Secondary instances only
11392 inst_fn = _GetNodeSecondaryInstances
11395 assert self.op.mode == constants.NODE_EVAC_ALL
11396 inst_fn = _GetNodeInstances
11397 # TODO: In 2.6, change the iallocator interface to take an evacuation mode
11399 raise errors.OpPrereqError("Due to an issue with the iallocator"
11400 " interface it is not possible to evacuate"
11401 " all instances at once; specify explicitly"
11402 " whether to evacuate primary or secondary"
11404 errors.ECODE_INVAL)
11406 return inst_fn(self.cfg, self.op.node_name)
11408 def DeclareLocks(self, level):
11409 if level == locking.LEVEL_INSTANCE:
11410 # Lock instances optimistically, needs verification once node and group
11411 # locks have been acquired
11412 self.needed_locks[locking.LEVEL_INSTANCE] = \
11413 set(i.name for i in self._DetermineInstances())
11415 elif level == locking.LEVEL_NODEGROUP:
11416 # Lock node groups for all potential target nodes optimistically, needs
11417 # verification once nodes have been acquired
11418 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11419 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
11421 elif level == locking.LEVEL_NODE:
11422 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
11424 def CheckPrereq(self):
11426 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11427 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
11428 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
11430 need_nodes = self._DetermineNodes()
11432 if not owned_nodes.issuperset(need_nodes):
11433 raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
11434 " locks were acquired, current nodes are"
11435 " are '%s', used to be '%s'; retry the"
11437 (self.op.node_name,
11438 utils.CommaJoin(need_nodes),
11439 utils.CommaJoin(owned_nodes)),
11440 errors.ECODE_STATE)
11442 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
11443 if owned_groups != wanted_groups:
11444 raise errors.OpExecError("Node groups changed since locks were acquired,"
11445 " current groups are '%s', used to be '%s';"
11446 " retry the operation" %
11447 (utils.CommaJoin(wanted_groups),
11448 utils.CommaJoin(owned_groups)))
11450 # Determine affected instances
11451 self.instances = self._DetermineInstances()
11452 self.instance_names = [i.name for i in self.instances]
11454 if set(self.instance_names) != owned_instances:
11455 raise errors.OpExecError("Instances on node '%s' changed since locks"
11456 " were acquired, current instances are '%s',"
11457 " used to be '%s'; retry the operation" %
11458 (self.op.node_name,
11459 utils.CommaJoin(self.instance_names),
11460 utils.CommaJoin(owned_instances)))
11462 if self.instance_names:
11463 self.LogInfo("Evacuating instances from node '%s': %s",
11465 utils.CommaJoin(utils.NiceSort(self.instance_names)))
11467 self.LogInfo("No instances to evacuate from node '%s'",
11470 if self.op.remote_node is not None:
11471 for i in self.instances:
11472 if i.primary_node == self.op.remote_node:
11473 raise errors.OpPrereqError("Node %s is the primary node of"
11474 " instance %s, cannot use it as"
11476 (self.op.remote_node, i.name),
11477 errors.ECODE_INVAL)
11479 def Exec(self, feedback_fn):
11480 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
11482 if not self.instance_names:
11483 # No instances to evacuate
11486 elif self.op.iallocator is not None:
11487 # TODO: Implement relocation to other group
11488 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
11489 evac_mode=self._MODE2IALLOCATOR[self.op.mode],
11490 instances=list(self.instance_names))
11492 ial.Run(self.op.iallocator)
11494 if not ial.success:
11495 raise errors.OpPrereqError("Can't compute node evacuation using"
11496 " iallocator '%s': %s" %
11497 (self.op.iallocator, ial.info),
11498 errors.ECODE_NORES)
11500 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
11502 elif self.op.remote_node is not None:
11503 assert self.op.mode == constants.NODE_EVAC_SEC
11505 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
11506 remote_node=self.op.remote_node,
11508 mode=constants.REPLACE_DISK_CHG,
11509 early_release=self.op.early_release)]
11510 for instance_name in self.instance_names
11514 raise errors.ProgrammerError("No iallocator or remote node")
11516 return ResultWithJobs(jobs)
11519 def _SetOpEarlyRelease(early_release, op):
11520 """Sets C{early_release} flag on opcodes if available.
11524 op.early_release = early_release
11525 except AttributeError:
11526 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
11531 def _NodeEvacDest(use_nodes, group, nodes):
11532 """Returns group or nodes depending on caller's choice.
11536 return utils.CommaJoin(nodes)
11541 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
11542 """Unpacks the result of change-group and node-evacuate iallocator requests.
11544 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
11545 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
11547 @type lu: L{LogicalUnit}
11548 @param lu: Logical unit instance
11549 @type alloc_result: tuple/list
11550 @param alloc_result: Result from iallocator
11551 @type early_release: bool
11552 @param early_release: Whether to release locks early if possible
11553 @type use_nodes: bool
11554 @param use_nodes: Whether to display node names instead of groups
11557 (moved, failed, jobs) = alloc_result
11560 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
11561 for (name, reason) in failed)
11562 lu.LogWarning("Unable to evacuate instances %s", failreason)
11563 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
11566 lu.LogInfo("Instances to be moved: %s",
11567 utils.CommaJoin("%s (to %s)" %
11568 (name, _NodeEvacDest(use_nodes, group, nodes))
11569 for (name, group, nodes) in moved))
11571 return [map(compat.partial(_SetOpEarlyRelease, early_release),
11572 map(opcodes.OpCode.LoadOpCode, ops))
11576 class LUInstanceGrowDisk(LogicalUnit):
11577 """Grow a disk of an instance.
11580 HPATH = "disk-grow"
11581 HTYPE = constants.HTYPE_INSTANCE
11584 def ExpandNames(self):
11585 self._ExpandAndLockInstance()
11586 self.needed_locks[locking.LEVEL_NODE] = []
11587 self.needed_locks[locking.LEVEL_NODE_RES] = []
11588 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11589 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
11591 def DeclareLocks(self, level):
11592 if level == locking.LEVEL_NODE:
11593 self._LockInstancesNodes()
11594 elif level == locking.LEVEL_NODE_RES:
11596 self.needed_locks[locking.LEVEL_NODE_RES] = \
11597 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
11599 def BuildHooksEnv(self):
11600 """Build hooks env.
11602 This runs on the master, the primary and all the secondaries.
11606 "DISK": self.op.disk,
11607 "AMOUNT": self.op.amount,
11608 "ABSOLUTE": self.op.absolute,
11610 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11613 def BuildHooksNodes(self):
11614 """Build hooks nodes.
11617 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
11620 def CheckPrereq(self):
11621 """Check prerequisites.
11623 This checks that the instance is in the cluster.
11626 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11627 assert instance is not None, \
11628 "Cannot retrieve locked instance %s" % self.op.instance_name
11629 nodenames = list(instance.all_nodes)
11630 for node in nodenames:
11631 _CheckNodeOnline(self, node)
11633 self.instance = instance
11635 if instance.disk_template not in constants.DTS_GROWABLE:
11636 raise errors.OpPrereqError("Instance's disk layout does not support"
11637 " growing", errors.ECODE_INVAL)
11639 self.disk = instance.FindDisk(self.op.disk)
11641 if self.op.absolute:
11642 self.target = self.op.amount
11643 self.delta = self.target - self.disk.size
11645 raise errors.OpPrereqError("Requested size (%s) is smaller than "
11646 "current disk size (%s)" %
11647 (utils.FormatUnit(self.target, "h"),
11648 utils.FormatUnit(self.disk.size, "h")),
11649 errors.ECODE_STATE)
11651 self.delta = self.op.amount
11652 self.target = self.disk.size + self.delta
11654 raise errors.OpPrereqError("Requested increment (%s) is negative" %
11655 utils.FormatUnit(self.delta, "h"),
11656 errors.ECODE_INVAL)
11658 if instance.disk_template not in (constants.DT_FILE,
11659 constants.DT_SHARED_FILE,
11661 # TODO: check the free disk space for file, when that feature will be
11663 _CheckNodesFreeDiskPerVG(self, nodenames,
11664 self.disk.ComputeGrowth(self.delta))
11666 def Exec(self, feedback_fn):
11667 """Execute disk grow.
11670 instance = self.instance
11673 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11674 assert (self.owned_locks(locking.LEVEL_NODE) ==
11675 self.owned_locks(locking.LEVEL_NODE_RES))
11677 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
11679 raise errors.OpExecError("Cannot activate block device to grow")
11681 feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
11682 (self.op.disk, instance.name,
11683 utils.FormatUnit(self.delta, "h"),
11684 utils.FormatUnit(self.target, "h")))
11686 # First run all grow ops in dry-run mode
11687 for node in instance.all_nodes:
11688 self.cfg.SetDiskID(disk, node)
11689 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11691 result.Raise("Grow request failed to node %s" % node)
11693 # We know that (as far as we can test) operations across different
11694 # nodes will succeed, time to run it for real
11695 for node in instance.all_nodes:
11696 self.cfg.SetDiskID(disk, node)
11697 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11699 result.Raise("Grow request failed to node %s" % node)
11701 # TODO: Rewrite code to work properly
11702 # DRBD goes into sync mode for a short amount of time after executing the
11703 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
11704 # calling "resize" in sync mode fails. Sleeping for a short amount of
11705 # time is a work-around.
11708 disk.RecordGrow(self.delta)
11709 self.cfg.Update(instance, feedback_fn)
11711 # Changes have been recorded, release node lock
11712 _ReleaseLocks(self, locking.LEVEL_NODE)
11714 # Downgrade lock while waiting for sync
11715 self.glm.downgrade(locking.LEVEL_INSTANCE)
11717 if self.op.wait_for_sync:
11718 disk_abort = not _WaitForSync(self, instance, disks=[disk])
11720 self.proc.LogWarning("Disk sync-ing has not returned a good"
11721 " status; please check the instance")
11722 if instance.admin_state != constants.ADMINST_UP:
11723 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
11724 elif instance.admin_state != constants.ADMINST_UP:
11725 self.proc.LogWarning("Not shutting down the disk even if the instance is"
11726 " not supposed to be running because no wait for"
11727 " sync mode was requested")
11729 assert self.owned_locks(locking.LEVEL_NODE_RES)
11730 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11733 class LUInstanceQueryData(NoHooksLU):
11734 """Query runtime instance data.
11739 def ExpandNames(self):
11740 self.needed_locks = {}
11742 # Use locking if requested or when non-static information is wanted
11743 if not (self.op.static or self.op.use_locking):
11744 self.LogWarning("Non-static data requested, locks need to be acquired")
11745 self.op.use_locking = True
11747 if self.op.instances or not self.op.use_locking:
11748 # Expand instance names right here
11749 self.wanted_names = _GetWantedInstances(self, self.op.instances)
11751 # Will use acquired locks
11752 self.wanted_names = None
11754 if self.op.use_locking:
11755 self.share_locks = _ShareAll()
11757 if self.wanted_names is None:
11758 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
11760 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
11762 self.needed_locks[locking.LEVEL_NODEGROUP] = []
11763 self.needed_locks[locking.LEVEL_NODE] = []
11764 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11766 def DeclareLocks(self, level):
11767 if self.op.use_locking:
11768 if level == locking.LEVEL_NODEGROUP:
11769 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11771 # Lock all groups used by instances optimistically; this requires going
11772 # via the node before it's locked, requiring verification later on
11773 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11774 frozenset(group_uuid
11775 for instance_name in owned_instances
11777 self.cfg.GetInstanceNodeGroups(instance_name))
11779 elif level == locking.LEVEL_NODE:
11780 self._LockInstancesNodes()
11782 def CheckPrereq(self):
11783 """Check prerequisites.
11785 This only checks the optional instance list against the existing names.
11788 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11789 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11790 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11792 if self.wanted_names is None:
11793 assert self.op.use_locking, "Locking was not used"
11794 self.wanted_names = owned_instances
11796 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
11798 if self.op.use_locking:
11799 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
11802 assert not (owned_instances or owned_groups or owned_nodes)
11804 self.wanted_instances = instances.values()
11806 def _ComputeBlockdevStatus(self, node, instance, dev):
11807 """Returns the status of a block device
11810 if self.op.static or not node:
11813 self.cfg.SetDiskID(dev, node)
11815 result = self.rpc.call_blockdev_find(node, dev)
11819 result.Raise("Can't compute disk status for %s" % instance.name)
11821 status = result.payload
11825 return (status.dev_path, status.major, status.minor,
11826 status.sync_percent, status.estimated_time,
11827 status.is_degraded, status.ldisk_status)
11829 def _ComputeDiskStatus(self, instance, snode, dev):
11830 """Compute block device status.
11833 (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
11835 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
11837 def _ComputeDiskStatusInner(self, instance, snode, dev):
11838 """Compute block device status.
11840 @attention: The device has to be annotated already.
11843 if dev.dev_type in constants.LDS_DRBD:
11844 # we change the snode then (otherwise we use the one passed in)
11845 if dev.logical_id[0] == instance.primary_node:
11846 snode = dev.logical_id[1]
11848 snode = dev.logical_id[0]
11850 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
11852 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
11855 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
11862 "iv_name": dev.iv_name,
11863 "dev_type": dev.dev_type,
11864 "logical_id": dev.logical_id,
11865 "physical_id": dev.physical_id,
11866 "pstatus": dev_pstatus,
11867 "sstatus": dev_sstatus,
11868 "children": dev_children,
11873 def Exec(self, feedback_fn):
11874 """Gather and return data"""
11877 cluster = self.cfg.GetClusterInfo()
11879 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
11880 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
11882 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
11883 for node in nodes.values()))
11885 group2name_fn = lambda uuid: groups[uuid].name
11887 for instance in self.wanted_instances:
11888 pnode = nodes[instance.primary_node]
11890 if self.op.static or pnode.offline:
11891 remote_state = None
11893 self.LogWarning("Primary node %s is marked offline, returning static"
11894 " information only for instance %s" %
11895 (pnode.name, instance.name))
11897 remote_info = self.rpc.call_instance_info(instance.primary_node,
11899 instance.hypervisor)
11900 remote_info.Raise("Error checking node %s" % instance.primary_node)
11901 remote_info = remote_info.payload
11902 if remote_info and "state" in remote_info:
11903 remote_state = "up"
11905 if instance.admin_state == constants.ADMINST_UP:
11906 remote_state = "down"
11908 remote_state = instance.admin_state
11910 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
11913 snodes_group_uuids = [nodes[snode_name].group
11914 for snode_name in instance.secondary_nodes]
11916 result[instance.name] = {
11917 "name": instance.name,
11918 "config_state": instance.admin_state,
11919 "run_state": remote_state,
11920 "pnode": instance.primary_node,
11921 "pnode_group_uuid": pnode.group,
11922 "pnode_group_name": group2name_fn(pnode.group),
11923 "snodes": instance.secondary_nodes,
11924 "snodes_group_uuids": snodes_group_uuids,
11925 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
11927 # this happens to be the same format used for hooks
11928 "nics": _NICListToTuple(self, instance.nics),
11929 "disk_template": instance.disk_template,
11931 "hypervisor": instance.hypervisor,
11932 "network_port": instance.network_port,
11933 "hv_instance": instance.hvparams,
11934 "hv_actual": cluster.FillHV(instance, skip_globals=True),
11935 "be_instance": instance.beparams,
11936 "be_actual": cluster.FillBE(instance),
11937 "os_instance": instance.osparams,
11938 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
11939 "serial_no": instance.serial_no,
11940 "mtime": instance.mtime,
11941 "ctime": instance.ctime,
11942 "uuid": instance.uuid,
11948 def PrepareContainerMods(mods, private_fn):
11949 """Prepares a list of container modifications by adding a private data field.
11951 @type mods: list of tuples; (operation, index, parameters)
11952 @param mods: List of modifications
11953 @type private_fn: callable or None
11954 @param private_fn: Callable for constructing a private data field for a
11959 if private_fn is None:
11964 return [(op, idx, params, fn()) for (op, idx, params) in mods]
11967 #: Type description for changes as returned by L{ApplyContainerMods}'s
11969 _TApplyContModsCbChanges = \
11970 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
11971 ht.TNonEmptyString,
11976 def ApplyContainerMods(kind, container, chgdesc, mods,
11977 create_fn, modify_fn, remove_fn):
11978 """Applies descriptions in C{mods} to C{container}.
11981 @param kind: One-word item description
11982 @type container: list
11983 @param container: Container to modify
11984 @type chgdesc: None or list
11985 @param chgdesc: List of applied changes
11987 @param mods: Modifications as returned by L{PrepareContainerMods}
11988 @type create_fn: callable
11989 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
11990 receives absolute item index, parameters and private data object as added
11991 by L{PrepareContainerMods}, returns tuple containing new item and changes
11993 @type modify_fn: callable
11994 @param modify_fn: Callback for modifying an existing item
11995 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
11996 and private data object as added by L{PrepareContainerMods}, returns
11998 @type remove_fn: callable
11999 @param remove_fn: Callback on removing item; receives absolute item index,
12000 item and private data object as added by L{PrepareContainerMods}
12003 for (op, idx, params, private) in mods:
12006 absidx = len(container) - 1
12008 raise IndexError("Not accepting negative indices other than -1")
12009 elif idx > len(container):
12010 raise IndexError("Got %s index %s, but there are only %s" %
12011 (kind, idx, len(container)))
12017 if op == constants.DDM_ADD:
12018 # Calculate where item will be added
12020 addidx = len(container)
12024 if create_fn is None:
12027 (item, changes) = create_fn(addidx, params, private)
12030 container.append(item)
12033 assert idx <= len(container)
12034 # list.insert does so before the specified index
12035 container.insert(idx, item)
12037 # Retrieve existing item
12039 item = container[absidx]
12041 raise IndexError("Invalid %s index %s" % (kind, idx))
12043 if op == constants.DDM_REMOVE:
12046 if remove_fn is not None:
12047 remove_fn(absidx, item, private)
12049 changes = [("%s/%s" % (kind, absidx), "remove")]
12051 assert container[absidx] == item
12052 del container[absidx]
12053 elif op == constants.DDM_MODIFY:
12054 if modify_fn is not None:
12055 changes = modify_fn(absidx, item, params, private)
12057 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12059 assert _TApplyContModsCbChanges(changes)
12061 if not (chgdesc is None or changes is None):
12062 chgdesc.extend(changes)
12065 def _UpdateIvNames(base_index, disks):
12066 """Updates the C{iv_name} attribute of disks.
12068 @type disks: list of L{objects.Disk}
12071 for (idx, disk) in enumerate(disks):
12072 disk.iv_name = "disk/%s" % (base_index + idx, )
12075 class _InstNicModPrivate:
12076 """Data structure for network interface modifications.
12078 Used by L{LUInstanceSetParams}.
12081 def __init__(self):
12086 class LUInstanceSetParams(LogicalUnit):
12087 """Modifies an instances's parameters.
12090 HPATH = "instance-modify"
12091 HTYPE = constants.HTYPE_INSTANCE
12095 def _UpgradeDiskNicMods(kind, mods, verify_fn):
12096 assert ht.TList(mods)
12097 assert not mods or len(mods[0]) in (2, 3)
12099 if mods and len(mods[0]) == 2:
12103 for op, params in mods:
12104 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
12105 result.append((op, -1, params))
12109 raise errors.OpPrereqError("Only one %s add or remove operation is"
12110 " supported at a time" % kind,
12111 errors.ECODE_INVAL)
12113 result.append((constants.DDM_MODIFY, op, params))
12115 assert verify_fn(result)
12122 def _CheckMods(kind, mods, key_types, item_fn):
12123 """Ensures requested disk/NIC modifications are valid.
12126 for (op, _, params) in mods:
12127 assert ht.TDict(params)
12129 utils.ForceDictType(params, key_types)
12131 if op == constants.DDM_REMOVE:
12133 raise errors.OpPrereqError("No settings should be passed when"
12134 " removing a %s" % kind,
12135 errors.ECODE_INVAL)
12136 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
12137 item_fn(op, params)
12139 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12142 def _VerifyDiskModification(op, params):
12143 """Verifies a disk modification.
12146 if op == constants.DDM_ADD:
12147 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
12148 if mode not in constants.DISK_ACCESS_SET:
12149 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
12150 errors.ECODE_INVAL)
12152 size = params.get(constants.IDISK_SIZE, None)
12154 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
12155 constants.IDISK_SIZE, errors.ECODE_INVAL)
12159 except (TypeError, ValueError), err:
12160 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
12161 errors.ECODE_INVAL)
12163 params[constants.IDISK_SIZE] = size
12165 elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
12166 raise errors.OpPrereqError("Disk size change not possible, use"
12167 " grow-disk", errors.ECODE_INVAL)
12170 def _VerifyNicModification(op, params):
12171 """Verifies a network interface modification.
12174 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
12175 ip = params.get(constants.INIC_IP, None)
12178 elif ip.lower() == constants.VALUE_NONE:
12179 params[constants.INIC_IP] = None
12180 elif not netutils.IPAddress.IsValid(ip):
12181 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
12182 errors.ECODE_INVAL)
12184 bridge = params.get("bridge", None)
12185 link = params.get(constants.INIC_LINK, None)
12186 if bridge and link:
12187 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
12188 " at the same time", errors.ECODE_INVAL)
12189 elif bridge and bridge.lower() == constants.VALUE_NONE:
12190 params["bridge"] = None
12191 elif link and link.lower() == constants.VALUE_NONE:
12192 params[constants.INIC_LINK] = None
12194 if op == constants.DDM_ADD:
12195 macaddr = params.get(constants.INIC_MAC, None)
12196 if macaddr is None:
12197 params[constants.INIC_MAC] = constants.VALUE_AUTO
12199 if constants.INIC_MAC in params:
12200 macaddr = params[constants.INIC_MAC]
12201 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12202 macaddr = utils.NormalizeAndValidateMac(macaddr)
12204 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
12205 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
12206 " modifying an existing NIC",
12207 errors.ECODE_INVAL)
12209 def CheckArguments(self):
12210 if not (self.op.nics or self.op.disks or self.op.disk_template or
12211 self.op.hvparams or self.op.beparams or self.op.os_name or
12212 self.op.offline is not None or self.op.runtime_mem):
12213 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
12215 if self.op.hvparams:
12216 _CheckGlobalHvParams(self.op.hvparams)
12219 self._UpgradeDiskNicMods("disk", self.op.disks,
12220 opcodes.OpInstanceSetParams.TestDiskModifications)
12222 self._UpgradeDiskNicMods("NIC", self.op.nics,
12223 opcodes.OpInstanceSetParams.TestNicModifications)
12225 # Check disk modifications
12226 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
12227 self._VerifyDiskModification)
12229 if self.op.disks and self.op.disk_template is not None:
12230 raise errors.OpPrereqError("Disk template conversion and other disk"
12231 " changes not supported at the same time",
12232 errors.ECODE_INVAL)
12234 if (self.op.disk_template and
12235 self.op.disk_template in constants.DTS_INT_MIRROR and
12236 self.op.remote_node is None):
12237 raise errors.OpPrereqError("Changing the disk template to a mirrored"
12238 " one requires specifying a secondary node",
12239 errors.ECODE_INVAL)
12241 # Check NIC modifications
12242 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
12243 self._VerifyNicModification)
12245 def ExpandNames(self):
12246 self._ExpandAndLockInstance()
12247 # Can't even acquire node locks in shared mode as upcoming changes in
12248 # Ganeti 2.6 will start to modify the node object on disk conversion
12249 self.needed_locks[locking.LEVEL_NODE] = []
12250 self.needed_locks[locking.LEVEL_NODE_RES] = []
12251 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12253 def DeclareLocks(self, level):
12254 # TODO: Acquire group lock in shared mode (disk parameters)
12255 if level == locking.LEVEL_NODE:
12256 self._LockInstancesNodes()
12257 if self.op.disk_template and self.op.remote_node:
12258 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
12259 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
12260 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
12262 self.needed_locks[locking.LEVEL_NODE_RES] = \
12263 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
12265 def BuildHooksEnv(self):
12266 """Build hooks env.
12268 This runs on the master, primary and secondaries.
12272 if constants.BE_MINMEM in self.be_new:
12273 args["minmem"] = self.be_new[constants.BE_MINMEM]
12274 if constants.BE_MAXMEM in self.be_new:
12275 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
12276 if constants.BE_VCPUS in self.be_new:
12277 args["vcpus"] = self.be_new[constants.BE_VCPUS]
12278 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
12279 # information at all.
12281 if self._new_nics is not None:
12284 for nic in self._new_nics:
12285 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
12286 mode = nicparams[constants.NIC_MODE]
12287 link = nicparams[constants.NIC_LINK]
12288 nics.append((nic.ip, nic.mac, mode, link))
12290 args["nics"] = nics
12292 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
12293 if self.op.disk_template:
12294 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
12295 if self.op.runtime_mem:
12296 env["RUNTIME_MEMORY"] = self.op.runtime_mem
12300 def BuildHooksNodes(self):
12301 """Build hooks nodes.
12304 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
12307 def _PrepareNicModification(self, params, private, old_ip, old_params,
12309 update_params_dict = dict([(key, params[key])
12310 for key in constants.NICS_PARAMETERS
12313 if "bridge" in params:
12314 update_params_dict[constants.NIC_LINK] = params["bridge"]
12316 new_params = _GetUpdatedParams(old_params, update_params_dict)
12317 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
12319 new_filled_params = cluster.SimpleFillNIC(new_params)
12320 objects.NIC.CheckParameterSyntax(new_filled_params)
12322 new_mode = new_filled_params[constants.NIC_MODE]
12323 if new_mode == constants.NIC_MODE_BRIDGED:
12324 bridge = new_filled_params[constants.NIC_LINK]
12325 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
12327 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
12329 self.warn.append(msg)
12331 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
12333 elif new_mode == constants.NIC_MODE_ROUTED:
12334 ip = params.get(constants.INIC_IP, old_ip)
12336 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
12337 " on a routed NIC", errors.ECODE_INVAL)
12339 if constants.INIC_MAC in params:
12340 mac = params[constants.INIC_MAC]
12342 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
12343 errors.ECODE_INVAL)
12344 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12345 # otherwise generate the MAC address
12346 params[constants.INIC_MAC] = \
12347 self.cfg.GenerateMAC(self.proc.GetECId())
12349 # or validate/reserve the current one
12351 self.cfg.ReserveMAC(mac, self.proc.GetECId())
12352 except errors.ReservationError:
12353 raise errors.OpPrereqError("MAC address '%s' already in use"
12354 " in cluster" % mac,
12355 errors.ECODE_NOTUNIQUE)
12357 private.params = new_params
12358 private.filled = new_filled_params
12360 def CheckPrereq(self):
12361 """Check prerequisites.
12363 This only checks the instance list against the existing names.
12366 # checking the new params on the primary/secondary nodes
12368 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12369 cluster = self.cluster = self.cfg.GetClusterInfo()
12370 assert self.instance is not None, \
12371 "Cannot retrieve locked instance %s" % self.op.instance_name
12372 pnode = instance.primary_node
12373 nodelist = list(instance.all_nodes)
12374 pnode_info = self.cfg.GetNodeInfo(pnode)
12375 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
12377 # Prepare disk/NIC modifications
12378 self.diskmod = PrepareContainerMods(self.op.disks, None)
12379 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
12382 if self.op.os_name and not self.op.force:
12383 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
12384 self.op.force_variant)
12385 instance_os = self.op.os_name
12387 instance_os = instance.os
12389 assert not (self.op.disk_template and self.op.disks), \
12390 "Can't modify disk template and apply disk changes at the same time"
12392 if self.op.disk_template:
12393 if instance.disk_template == self.op.disk_template:
12394 raise errors.OpPrereqError("Instance already has disk template %s" %
12395 instance.disk_template, errors.ECODE_INVAL)
12397 if (instance.disk_template,
12398 self.op.disk_template) not in self._DISK_CONVERSIONS:
12399 raise errors.OpPrereqError("Unsupported disk template conversion from"
12400 " %s to %s" % (instance.disk_template,
12401 self.op.disk_template),
12402 errors.ECODE_INVAL)
12403 _CheckInstanceState(self, instance, INSTANCE_DOWN,
12404 msg="cannot change disk template")
12405 if self.op.disk_template in constants.DTS_INT_MIRROR:
12406 if self.op.remote_node == pnode:
12407 raise errors.OpPrereqError("Given new secondary node %s is the same"
12408 " as the primary node of the instance" %
12409 self.op.remote_node, errors.ECODE_STATE)
12410 _CheckNodeOnline(self, self.op.remote_node)
12411 _CheckNodeNotDrained(self, self.op.remote_node)
12412 # FIXME: here we assume that the old instance type is DT_PLAIN
12413 assert instance.disk_template == constants.DT_PLAIN
12414 disks = [{constants.IDISK_SIZE: d.size,
12415 constants.IDISK_VG: d.logical_id[0]}
12416 for d in instance.disks]
12417 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
12418 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
12420 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
12421 snode_group = self.cfg.GetNodeGroup(snode_info.group)
12422 ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
12423 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
12424 ignore=self.op.ignore_ipolicy)
12425 if pnode_info.group != snode_info.group:
12426 self.LogWarning("The primary and secondary nodes are in two"
12427 " different node groups; the disk parameters"
12428 " from the first disk's node group will be"
12431 # hvparams processing
12432 if self.op.hvparams:
12433 hv_type = instance.hypervisor
12434 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
12435 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
12436 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
12439 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
12440 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
12441 self.hv_proposed = self.hv_new = hv_new # the new actual values
12442 self.hv_inst = i_hvdict # the new dict (without defaults)
12444 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
12446 self.hv_new = self.hv_inst = {}
12448 # beparams processing
12449 if self.op.beparams:
12450 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
12452 objects.UpgradeBeParams(i_bedict)
12453 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
12454 be_new = cluster.SimpleFillBE(i_bedict)
12455 self.be_proposed = self.be_new = be_new # the new actual values
12456 self.be_inst = i_bedict # the new dict (without defaults)
12458 self.be_new = self.be_inst = {}
12459 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
12460 be_old = cluster.FillBE(instance)
12462 # CPU param validation -- checking every time a parameter is
12463 # changed to cover all cases where either CPU mask or vcpus have
12465 if (constants.BE_VCPUS in self.be_proposed and
12466 constants.HV_CPU_MASK in self.hv_proposed):
12468 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
12469 # Verify mask is consistent with number of vCPUs. Can skip this
12470 # test if only 1 entry in the CPU mask, which means same mask
12471 # is applied to all vCPUs.
12472 if (len(cpu_list) > 1 and
12473 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
12474 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
12476 (self.be_proposed[constants.BE_VCPUS],
12477 self.hv_proposed[constants.HV_CPU_MASK]),
12478 errors.ECODE_INVAL)
12480 # Only perform this test if a new CPU mask is given
12481 if constants.HV_CPU_MASK in self.hv_new:
12482 # Calculate the largest CPU number requested
12483 max_requested_cpu = max(map(max, cpu_list))
12484 # Check that all of the instance's nodes have enough physical CPUs to
12485 # satisfy the requested CPU mask
12486 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
12487 max_requested_cpu + 1, instance.hypervisor)
12489 # osparams processing
12490 if self.op.osparams:
12491 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
12492 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
12493 self.os_inst = i_osdict # the new dict (without defaults)
12499 #TODO(dynmem): do the appropriate check involving MINMEM
12500 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
12501 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
12502 mem_check_list = [pnode]
12503 if be_new[constants.BE_AUTO_BALANCE]:
12504 # either we changed auto_balance to yes or it was from before
12505 mem_check_list.extend(instance.secondary_nodes)
12506 instance_info = self.rpc.call_instance_info(pnode, instance.name,
12507 instance.hypervisor)
12508 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
12509 [instance.hypervisor])
12510 pninfo = nodeinfo[pnode]
12511 msg = pninfo.fail_msg
12513 # Assume the primary node is unreachable and go ahead
12514 self.warn.append("Can't get info from primary node %s: %s" %
12517 (_, _, (pnhvinfo, )) = pninfo.payload
12518 if not isinstance(pnhvinfo.get("memory_free", None), int):
12519 self.warn.append("Node data from primary node %s doesn't contain"
12520 " free memory information" % pnode)
12521 elif instance_info.fail_msg:
12522 self.warn.append("Can't get instance runtime information: %s" %
12523 instance_info.fail_msg)
12525 if instance_info.payload:
12526 current_mem = int(instance_info.payload["memory"])
12528 # Assume instance not running
12529 # (there is a slight race condition here, but it's not very
12530 # probable, and we have no other way to check)
12531 # TODO: Describe race condition
12533 #TODO(dynmem): do the appropriate check involving MINMEM
12534 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
12535 pnhvinfo["memory_free"])
12537 raise errors.OpPrereqError("This change will prevent the instance"
12538 " from starting, due to %d MB of memory"
12539 " missing on its primary node" %
12541 errors.ECODE_NORES)
12543 if be_new[constants.BE_AUTO_BALANCE]:
12544 for node, nres in nodeinfo.items():
12545 if node not in instance.secondary_nodes:
12547 nres.Raise("Can't get info from secondary node %s" % node,
12548 prereq=True, ecode=errors.ECODE_STATE)
12549 (_, _, (nhvinfo, )) = nres.payload
12550 if not isinstance(nhvinfo.get("memory_free", None), int):
12551 raise errors.OpPrereqError("Secondary node %s didn't return free"
12552 " memory information" % node,
12553 errors.ECODE_STATE)
12554 #TODO(dynmem): do the appropriate check involving MINMEM
12555 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
12556 raise errors.OpPrereqError("This change will prevent the instance"
12557 " from failover to its secondary node"
12558 " %s, due to not enough memory" % node,
12559 errors.ECODE_STATE)
12561 if self.op.runtime_mem:
12562 remote_info = self.rpc.call_instance_info(instance.primary_node,
12564 instance.hypervisor)
12565 remote_info.Raise("Error checking node %s" % instance.primary_node)
12566 if not remote_info.payload: # not running already
12567 raise errors.OpPrereqError("Instance %s is not running" % instance.name,
12568 errors.ECODE_STATE)
12570 current_memory = remote_info.payload["memory"]
12571 if (not self.op.force and
12572 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
12573 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
12574 raise errors.OpPrereqError("Instance %s must have memory between %d"
12575 " and %d MB of memory unless --force is"
12576 " given" % (instance.name,
12577 self.be_proposed[constants.BE_MINMEM],
12578 self.be_proposed[constants.BE_MAXMEM]),
12579 errors.ECODE_INVAL)
12581 delta = self.op.runtime_mem - current_memory
12583 _CheckNodeFreeMemory(self, instance.primary_node,
12584 "ballooning memory for instance %s" %
12585 instance.name, delta, instance.hypervisor)
12587 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
12588 raise errors.OpPrereqError("Disk operations not supported for"
12589 " diskless instances",
12590 errors.ECODE_INVAL)
12592 def _PrepareNicCreate(_, params, private):
12593 self._PrepareNicModification(params, private, None, {}, cluster, pnode)
12594 return (None, None)
12596 def _PrepareNicMod(_, nic, params, private):
12597 self._PrepareNicModification(params, private, nic.ip,
12598 nic.nicparams, cluster, pnode)
12601 # Verify NIC changes (operating on copy)
12602 nics = instance.nics[:]
12603 ApplyContainerMods("NIC", nics, None, self.nicmod,
12604 _PrepareNicCreate, _PrepareNicMod, None)
12605 if len(nics) > constants.MAX_NICS:
12606 raise errors.OpPrereqError("Instance has too many network interfaces"
12607 " (%d), cannot add more" % constants.MAX_NICS,
12608 errors.ECODE_STATE)
12610 # Verify disk changes (operating on a copy)
12611 disks = instance.disks[:]
12612 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
12613 if len(disks) > constants.MAX_DISKS:
12614 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
12615 " more" % constants.MAX_DISKS,
12616 errors.ECODE_STATE)
12618 if self.op.offline is not None:
12619 if self.op.offline:
12620 msg = "can't change to offline"
12622 msg = "can't change to online"
12623 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
12625 # Pre-compute NIC changes (necessary to use result in hooks)
12626 self._nic_chgdesc = []
12628 # Operate on copies as this is still in prereq
12629 nics = [nic.Copy() for nic in instance.nics]
12630 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
12631 self._CreateNewNic, self._ApplyNicMods, None)
12632 self._new_nics = nics
12634 self._new_nics = None
12636 def _ConvertPlainToDrbd(self, feedback_fn):
12637 """Converts an instance from plain to drbd.
12640 feedback_fn("Converting template to drbd")
12641 instance = self.instance
12642 pnode = instance.primary_node
12643 snode = self.op.remote_node
12645 assert instance.disk_template == constants.DT_PLAIN
12647 # create a fake disk info for _GenerateDiskTemplate
12648 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
12649 constants.IDISK_VG: d.logical_id[0]}
12650 for d in instance.disks]
12651 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
12652 instance.name, pnode, [snode],
12653 disk_info, None, None, 0, feedback_fn,
12655 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
12657 info = _GetInstanceInfoText(instance)
12658 feedback_fn("Creating additional volumes...")
12659 # first, create the missing data and meta devices
12660 for disk in anno_disks:
12661 # unfortunately this is... not too nice
12662 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
12664 for child in disk.children:
12665 _CreateSingleBlockDev(self, snode, instance, child, info, True)
12666 # at this stage, all new LVs have been created, we can rename the
12668 feedback_fn("Renaming original volumes...")
12669 rename_list = [(o, n.children[0].logical_id)
12670 for (o, n) in zip(instance.disks, new_disks)]
12671 result = self.rpc.call_blockdev_rename(pnode, rename_list)
12672 result.Raise("Failed to rename original LVs")
12674 feedback_fn("Initializing DRBD devices...")
12675 # all child devices are in place, we can now create the DRBD devices
12676 for disk in anno_disks:
12677 for node in [pnode, snode]:
12678 f_create = node == pnode
12679 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
12681 # at this point, the instance has been modified
12682 instance.disk_template = constants.DT_DRBD8
12683 instance.disks = new_disks
12684 self.cfg.Update(instance, feedback_fn)
12686 # Release node locks while waiting for sync
12687 _ReleaseLocks(self, locking.LEVEL_NODE)
12689 # disks are created, waiting for sync
12690 disk_abort = not _WaitForSync(self, instance,
12691 oneshot=not self.op.wait_for_sync)
12693 raise errors.OpExecError("There are some degraded disks for"
12694 " this instance, please cleanup manually")
12696 # Node resource locks will be released by caller
12698 def _ConvertDrbdToPlain(self, feedback_fn):
12699 """Converts an instance from drbd to plain.
12702 instance = self.instance
12704 assert len(instance.secondary_nodes) == 1
12705 assert instance.disk_template == constants.DT_DRBD8
12707 pnode = instance.primary_node
12708 snode = instance.secondary_nodes[0]
12709 feedback_fn("Converting template to plain")
12711 old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
12712 new_disks = [d.children[0] for d in instance.disks]
12714 # copy over size and mode
12715 for parent, child in zip(old_disks, new_disks):
12716 child.size = parent.size
12717 child.mode = parent.mode
12719 # this is a DRBD disk, return its port to the pool
12720 # NOTE: this must be done right before the call to cfg.Update!
12721 for disk in old_disks:
12722 tcp_port = disk.logical_id[2]
12723 self.cfg.AddTcpUdpPort(tcp_port)
12725 # update instance structure
12726 instance.disks = new_disks
12727 instance.disk_template = constants.DT_PLAIN
12728 self.cfg.Update(instance, feedback_fn)
12730 # Release locks in case removing disks takes a while
12731 _ReleaseLocks(self, locking.LEVEL_NODE)
12733 feedback_fn("Removing volumes on the secondary node...")
12734 for disk in old_disks:
12735 self.cfg.SetDiskID(disk, snode)
12736 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
12738 self.LogWarning("Could not remove block device %s on node %s,"
12739 " continuing anyway: %s", disk.iv_name, snode, msg)
12741 feedback_fn("Removing unneeded volumes on the primary node...")
12742 for idx, disk in enumerate(old_disks):
12743 meta = disk.children[1]
12744 self.cfg.SetDiskID(meta, pnode)
12745 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
12747 self.LogWarning("Could not remove metadata for disk %d on node %s,"
12748 " continuing anyway: %s", idx, pnode, msg)
12750 def _CreateNewDisk(self, idx, params, _):
12751 """Creates a new disk.
12754 instance = self.instance
12757 if instance.disk_template in constants.DTS_FILEBASED:
12758 (file_driver, file_path) = instance.disks[0].logical_id
12759 file_path = os.path.dirname(file_path)
12761 file_driver = file_path = None
12764 _GenerateDiskTemplate(self, instance.disk_template, instance.name,
12765 instance.primary_node, instance.secondary_nodes,
12766 [params], file_path, file_driver, idx,
12767 self.Log, self.diskparams)[0]
12769 info = _GetInstanceInfoText(instance)
12771 logging.info("Creating volume %s for instance %s",
12772 disk.iv_name, instance.name)
12773 # Note: this needs to be kept in sync with _CreateDisks
12775 for node in instance.all_nodes:
12776 f_create = (node == instance.primary_node)
12778 _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
12779 except errors.OpExecError, err:
12780 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
12781 disk.iv_name, disk, node, err)
12784 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
12788 def _ModifyDisk(idx, disk, params, _):
12789 """Modifies a disk.
12792 disk.mode = params[constants.IDISK_MODE]
12795 ("disk.mode/%d" % idx, disk.mode),
12798 def _RemoveDisk(self, idx, root, _):
12802 (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
12803 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
12804 self.cfg.SetDiskID(disk, node)
12805 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
12807 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
12808 " continuing anyway", idx, node, msg)
12810 # if this is a DRBD disk, return its port to the pool
12811 if root.dev_type in constants.LDS_DRBD:
12812 self.cfg.AddTcpUdpPort(root.logical_id[2])
12815 def _CreateNewNic(idx, params, private):
12816 """Creates data structure for a new network interface.
12819 mac = params[constants.INIC_MAC]
12820 ip = params.get(constants.INIC_IP, None)
12821 nicparams = private.params
12823 return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
12825 "add:mac=%s,ip=%s,mode=%s,link=%s" %
12826 (mac, ip, private.filled[constants.NIC_MODE],
12827 private.filled[constants.NIC_LINK])),
12831 def _ApplyNicMods(idx, nic, params, private):
12832 """Modifies a network interface.
12837 for key in [constants.INIC_MAC, constants.INIC_IP]:
12839 changes.append(("nic.%s/%d" % (key, idx), params[key]))
12840 setattr(nic, key, params[key])
12843 nic.nicparams = private.params
12845 for (key, val) in params.items():
12846 changes.append(("nic.%s/%d" % (key, idx), val))
12850 def Exec(self, feedback_fn):
12851 """Modifies an instance.
12853 All parameters take effect only at the next restart of the instance.
12856 # Process here the warnings from CheckPrereq, as we don't have a
12857 # feedback_fn there.
12858 # TODO: Replace with self.LogWarning
12859 for warn in self.warn:
12860 feedback_fn("WARNING: %s" % warn)
12862 assert ((self.op.disk_template is None) ^
12863 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
12864 "Not owning any node resource locks"
12867 instance = self.instance
12870 if self.op.runtime_mem:
12871 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
12873 self.op.runtime_mem)
12874 rpcres.Raise("Cannot modify instance runtime memory")
12875 result.append(("runtime_memory", self.op.runtime_mem))
12877 # Apply disk changes
12878 ApplyContainerMods("disk", instance.disks, result, self.diskmod,
12879 self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
12880 _UpdateIvNames(0, instance.disks)
12882 if self.op.disk_template:
12884 check_nodes = set(instance.all_nodes)
12885 if self.op.remote_node:
12886 check_nodes.add(self.op.remote_node)
12887 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
12888 owned = self.owned_locks(level)
12889 assert not (check_nodes - owned), \
12890 ("Not owning the correct locks, owning %r, expected at least %r" %
12891 (owned, check_nodes))
12893 r_shut = _ShutdownInstanceDisks(self, instance)
12895 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
12896 " proceed with disk template conversion")
12897 mode = (instance.disk_template, self.op.disk_template)
12899 self._DISK_CONVERSIONS[mode](self, feedback_fn)
12901 self.cfg.ReleaseDRBDMinors(instance.name)
12903 result.append(("disk_template", self.op.disk_template))
12905 assert instance.disk_template == self.op.disk_template, \
12906 ("Expected disk template '%s', found '%s'" %
12907 (self.op.disk_template, instance.disk_template))
12909 # Release node and resource locks if there are any (they might already have
12910 # been released during disk conversion)
12911 _ReleaseLocks(self, locking.LEVEL_NODE)
12912 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
12914 # Apply NIC changes
12915 if self._new_nics is not None:
12916 instance.nics = self._new_nics
12917 result.extend(self._nic_chgdesc)
12920 if self.op.hvparams:
12921 instance.hvparams = self.hv_inst
12922 for key, val in self.op.hvparams.iteritems():
12923 result.append(("hv/%s" % key, val))
12926 if self.op.beparams:
12927 instance.beparams = self.be_inst
12928 for key, val in self.op.beparams.iteritems():
12929 result.append(("be/%s" % key, val))
12932 if self.op.os_name:
12933 instance.os = self.op.os_name
12936 if self.op.osparams:
12937 instance.osparams = self.os_inst
12938 for key, val in self.op.osparams.iteritems():
12939 result.append(("os/%s" % key, val))
12941 if self.op.offline is None:
12944 elif self.op.offline:
12945 # Mark instance as offline
12946 self.cfg.MarkInstanceOffline(instance.name)
12947 result.append(("admin_state", constants.ADMINST_OFFLINE))
12949 # Mark instance as online, but stopped
12950 self.cfg.MarkInstanceDown(instance.name)
12951 result.append(("admin_state", constants.ADMINST_DOWN))
12953 self.cfg.Update(instance, feedback_fn)
12955 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
12956 self.owned_locks(locking.LEVEL_NODE)), \
12957 "All node locks should have been released by now"
12961 _DISK_CONVERSIONS = {
12962 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
12963 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
12967 class LUInstanceChangeGroup(LogicalUnit):
12968 HPATH = "instance-change-group"
12969 HTYPE = constants.HTYPE_INSTANCE
12972 def ExpandNames(self):
12973 self.share_locks = _ShareAll()
12974 self.needed_locks = {
12975 locking.LEVEL_NODEGROUP: [],
12976 locking.LEVEL_NODE: [],
12979 self._ExpandAndLockInstance()
12981 if self.op.target_groups:
12982 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12983 self.op.target_groups)
12985 self.req_target_uuids = None
12987 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12989 def DeclareLocks(self, level):
12990 if level == locking.LEVEL_NODEGROUP:
12991 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12993 if self.req_target_uuids:
12994 lock_groups = set(self.req_target_uuids)
12996 # Lock all groups used by instance optimistically; this requires going
12997 # via the node before it's locked, requiring verification later on
12998 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
12999 lock_groups.update(instance_groups)
13001 # No target groups, need to lock all of them
13002 lock_groups = locking.ALL_SET
13004 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
13006 elif level == locking.LEVEL_NODE:
13007 if self.req_target_uuids:
13008 # Lock all nodes used by instances
13009 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
13010 self._LockInstancesNodes()
13012 # Lock all nodes in all potential target groups
13013 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
13014 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
13015 member_nodes = [node_name
13016 for group in lock_groups
13017 for node_name in self.cfg.GetNodeGroup(group).members]
13018 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
13020 # Lock all nodes as all groups are potential targets
13021 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13023 def CheckPrereq(self):
13024 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13025 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
13026 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
13028 assert (self.req_target_uuids is None or
13029 owned_groups.issuperset(self.req_target_uuids))
13030 assert owned_instances == set([self.op.instance_name])
13032 # Get instance information
13033 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
13035 # Check if node groups for locked instance are still correct
13036 assert owned_nodes.issuperset(self.instance.all_nodes), \
13037 ("Instance %s's nodes changed while we kept the lock" %
13038 self.op.instance_name)
13040 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
13043 if self.req_target_uuids:
13044 # User requested specific target groups
13045 self.target_uuids = frozenset(self.req_target_uuids)
13047 # All groups except those used by the instance are potential targets
13048 self.target_uuids = owned_groups - inst_groups
13050 conflicting_groups = self.target_uuids & inst_groups
13051 if conflicting_groups:
13052 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
13053 " used by the instance '%s'" %
13054 (utils.CommaJoin(conflicting_groups),
13055 self.op.instance_name),
13056 errors.ECODE_INVAL)
13058 if not self.target_uuids:
13059 raise errors.OpPrereqError("There are no possible target groups",
13060 errors.ECODE_INVAL)
13062 def BuildHooksEnv(self):
13063 """Build hooks env.
13066 assert self.target_uuids
13069 "TARGET_GROUPS": " ".join(self.target_uuids),
13072 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13076 def BuildHooksNodes(self):
13077 """Build hooks nodes.
13080 mn = self.cfg.GetMasterNode()
13081 return ([mn], [mn])
13083 def Exec(self, feedback_fn):
13084 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
13086 assert instances == [self.op.instance_name], "Instance not locked"
13088 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
13089 instances=instances, target_groups=list(self.target_uuids))
13091 ial.Run(self.op.iallocator)
13093 if not ial.success:
13094 raise errors.OpPrereqError("Can't compute solution for changing group of"
13095 " instance '%s' using iallocator '%s': %s" %
13096 (self.op.instance_name, self.op.iallocator,
13098 errors.ECODE_NORES)
13100 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
13102 self.LogInfo("Iallocator returned %s job(s) for changing group of"
13103 " instance '%s'", len(jobs), self.op.instance_name)
13105 return ResultWithJobs(jobs)
13108 class LUBackupQuery(NoHooksLU):
13109 """Query the exports list
13114 def CheckArguments(self):
13115 self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
13116 ["node", "export"], self.op.use_locking)
13118 def ExpandNames(self):
13119 self.expq.ExpandNames(self)
13121 def DeclareLocks(self, level):
13122 self.expq.DeclareLocks(self, level)
13124 def Exec(self, feedback_fn):
13127 for (node, expname) in self.expq.OldStyleQuery(self):
13128 if expname is None:
13129 result[node] = False
13131 result.setdefault(node, []).append(expname)
13136 class _ExportQuery(_QueryBase):
13137 FIELDS = query.EXPORT_FIELDS
13139 #: The node name is not a unique key for this query
13140 SORT_FIELD = "node"
13142 def ExpandNames(self, lu):
13143 lu.needed_locks = {}
13145 # The following variables interact with _QueryBase._GetNames
13147 self.wanted = _GetWantedNodes(lu, self.names)
13149 self.wanted = locking.ALL_SET
13151 self.do_locking = self.use_locking
13153 if self.do_locking:
13154 lu.share_locks = _ShareAll()
13155 lu.needed_locks = {
13156 locking.LEVEL_NODE: self.wanted,
13159 def DeclareLocks(self, lu, level):
13162 def _GetQueryData(self, lu):
13163 """Computes the list of nodes and their attributes.
13166 # Locking is not used
13168 assert not (compat.any(lu.glm.is_owned(level)
13169 for level in locking.LEVELS
13170 if level != locking.LEVEL_CLUSTER) or
13171 self.do_locking or self.use_locking)
13173 nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
13177 for (node, nres) in lu.rpc.call_export_list(nodes).items():
13179 result.append((node, None))
13181 result.extend((node, expname) for expname in nres.payload)
13186 class LUBackupPrepare(NoHooksLU):
13187 """Prepares an instance for an export and returns useful information.
13192 def ExpandNames(self):
13193 self._ExpandAndLockInstance()
13195 def CheckPrereq(self):
13196 """Check prerequisites.
13199 instance_name = self.op.instance_name
13201 self.instance = self.cfg.GetInstanceInfo(instance_name)
13202 assert self.instance is not None, \
13203 "Cannot retrieve locked instance %s" % self.op.instance_name
13204 _CheckNodeOnline(self, self.instance.primary_node)
13206 self._cds = _GetClusterDomainSecret()
13208 def Exec(self, feedback_fn):
13209 """Prepares an instance for an export.
13212 instance = self.instance
13214 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13215 salt = utils.GenerateSecret(8)
13217 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
13218 result = self.rpc.call_x509_cert_create(instance.primary_node,
13219 constants.RIE_CERT_VALIDITY)
13220 result.Raise("Can't create X509 key and certificate on %s" % result.node)
13222 (name, cert_pem) = result.payload
13224 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
13228 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
13229 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
13231 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
13237 class LUBackupExport(LogicalUnit):
13238 """Export an instance to an image in the cluster.
13241 HPATH = "instance-export"
13242 HTYPE = constants.HTYPE_INSTANCE
13245 def CheckArguments(self):
13246 """Check the arguments.
13249 self.x509_key_name = self.op.x509_key_name
13250 self.dest_x509_ca_pem = self.op.destination_x509_ca
13252 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13253 if not self.x509_key_name:
13254 raise errors.OpPrereqError("Missing X509 key name for encryption",
13255 errors.ECODE_INVAL)
13257 if not self.dest_x509_ca_pem:
13258 raise errors.OpPrereqError("Missing destination X509 CA",
13259 errors.ECODE_INVAL)
13261 def ExpandNames(self):
13262 self._ExpandAndLockInstance()
13264 # Lock all nodes for local exports
13265 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13266 # FIXME: lock only instance primary and destination node
13268 # Sad but true, for now we have do lock all nodes, as we don't know where
13269 # the previous export might be, and in this LU we search for it and
13270 # remove it from its current node. In the future we could fix this by:
13271 # - making a tasklet to search (share-lock all), then create the
13272 # new one, then one to remove, after
13273 # - removing the removal operation altogether
13274 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13276 def DeclareLocks(self, level):
13277 """Last minute lock declaration."""
13278 # All nodes are locked anyway, so nothing to do here.
13280 def BuildHooksEnv(self):
13281 """Build hooks env.
13283 This will run on the master, primary node and target node.
13287 "EXPORT_MODE": self.op.mode,
13288 "EXPORT_NODE": self.op.target_node,
13289 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
13290 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
13291 # TODO: Generic function for boolean env variables
13292 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
13295 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13299 def BuildHooksNodes(self):
13300 """Build hooks nodes.
13303 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
13305 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13306 nl.append(self.op.target_node)
13310 def CheckPrereq(self):
13311 """Check prerequisites.
13313 This checks that the instance and node names are valid.
13316 instance_name = self.op.instance_name
13318 self.instance = self.cfg.GetInstanceInfo(instance_name)
13319 assert self.instance is not None, \
13320 "Cannot retrieve locked instance %s" % self.op.instance_name
13321 _CheckNodeOnline(self, self.instance.primary_node)
13323 if (self.op.remove_instance and
13324 self.instance.admin_state == constants.ADMINST_UP and
13325 not self.op.shutdown):
13326 raise errors.OpPrereqError("Can not remove instance without shutting it"
13329 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13330 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
13331 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
13332 assert self.dst_node is not None
13334 _CheckNodeOnline(self, self.dst_node.name)
13335 _CheckNodeNotDrained(self, self.dst_node.name)
13338 self.dest_disk_info = None
13339 self.dest_x509_ca = None
13341 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13342 self.dst_node = None
13344 if len(self.op.target_node) != len(self.instance.disks):
13345 raise errors.OpPrereqError(("Received destination information for %s"
13346 " disks, but instance %s has %s disks") %
13347 (len(self.op.target_node), instance_name,
13348 len(self.instance.disks)),
13349 errors.ECODE_INVAL)
13351 cds = _GetClusterDomainSecret()
13353 # Check X509 key name
13355 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
13356 except (TypeError, ValueError), err:
13357 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
13359 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
13360 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
13361 errors.ECODE_INVAL)
13363 # Load and verify CA
13365 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
13366 except OpenSSL.crypto.Error, err:
13367 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
13368 (err, ), errors.ECODE_INVAL)
13370 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
13371 if errcode is not None:
13372 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
13373 (msg, ), errors.ECODE_INVAL)
13375 self.dest_x509_ca = cert
13377 # Verify target information
13379 for idx, disk_data in enumerate(self.op.target_node):
13381 (host, port, magic) = \
13382 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
13383 except errors.GenericError, err:
13384 raise errors.OpPrereqError("Target info for disk %s: %s" %
13385 (idx, err), errors.ECODE_INVAL)
13387 disk_info.append((host, port, magic))
13389 assert len(disk_info) == len(self.op.target_node)
13390 self.dest_disk_info = disk_info
13393 raise errors.ProgrammerError("Unhandled export mode %r" %
13396 # instance disk type verification
13397 # TODO: Implement export support for file-based disks
13398 for disk in self.instance.disks:
13399 if disk.dev_type == constants.LD_FILE:
13400 raise errors.OpPrereqError("Export not supported for instances with"
13401 " file-based disks", errors.ECODE_INVAL)
13403 def _CleanupExports(self, feedback_fn):
13404 """Removes exports of current instance from all other nodes.
13406 If an instance in a cluster with nodes A..D was exported to node C, its
13407 exports will be removed from the nodes A, B and D.
13410 assert self.op.mode != constants.EXPORT_MODE_REMOTE
13412 nodelist = self.cfg.GetNodeList()
13413 nodelist.remove(self.dst_node.name)
13415 # on one-node clusters nodelist will be empty after the removal
13416 # if we proceed the backup would be removed because OpBackupQuery
13417 # substitutes an empty list with the full cluster node list.
13418 iname = self.instance.name
13420 feedback_fn("Removing old exports for instance %s" % iname)
13421 exportlist = self.rpc.call_export_list(nodelist)
13422 for node in exportlist:
13423 if exportlist[node].fail_msg:
13425 if iname in exportlist[node].payload:
13426 msg = self.rpc.call_export_remove(node, iname).fail_msg
13428 self.LogWarning("Could not remove older export for instance %s"
13429 " on node %s: %s", iname, node, msg)
13431 def Exec(self, feedback_fn):
13432 """Export an instance to an image in the cluster.
13435 assert self.op.mode in constants.EXPORT_MODES
13437 instance = self.instance
13438 src_node = instance.primary_node
13440 if self.op.shutdown:
13441 # shutdown the instance, but not the disks
13442 feedback_fn("Shutting down instance %s" % instance.name)
13443 result = self.rpc.call_instance_shutdown(src_node, instance,
13444 self.op.shutdown_timeout)
13445 # TODO: Maybe ignore failures if ignore_remove_failures is set
13446 result.Raise("Could not shutdown instance %s on"
13447 " node %s" % (instance.name, src_node))
13449 # set the disks ID correctly since call_instance_start needs the
13450 # correct drbd minor to create the symlinks
13451 for disk in instance.disks:
13452 self.cfg.SetDiskID(disk, src_node)
13454 activate_disks = (instance.admin_state != constants.ADMINST_UP)
13457 # Activate the instance disks if we'exporting a stopped instance
13458 feedback_fn("Activating disks for %s" % instance.name)
13459 _StartInstanceDisks(self, instance, None)
13462 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
13465 helper.CreateSnapshots()
13467 if (self.op.shutdown and
13468 instance.admin_state == constants.ADMINST_UP and
13469 not self.op.remove_instance):
13470 assert not activate_disks
13471 feedback_fn("Starting instance %s" % instance.name)
13472 result = self.rpc.call_instance_start(src_node,
13473 (instance, None, None), False)
13474 msg = result.fail_msg
13476 feedback_fn("Failed to start instance: %s" % msg)
13477 _ShutdownInstanceDisks(self, instance)
13478 raise errors.OpExecError("Could not start instance: %s" % msg)
13480 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13481 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
13482 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13483 connect_timeout = constants.RIE_CONNECT_TIMEOUT
13484 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
13486 (key_name, _, _) = self.x509_key_name
13489 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
13492 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
13493 key_name, dest_ca_pem,
13498 # Check for backwards compatibility
13499 assert len(dresults) == len(instance.disks)
13500 assert compat.all(isinstance(i, bool) for i in dresults), \
13501 "Not all results are boolean: %r" % dresults
13505 feedback_fn("Deactivating disks for %s" % instance.name)
13506 _ShutdownInstanceDisks(self, instance)
13508 if not (compat.all(dresults) and fin_resu):
13511 failures.append("export finalization")
13512 if not compat.all(dresults):
13513 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
13515 failures.append("disk export: disk(s) %s" % fdsk)
13517 raise errors.OpExecError("Export failed, errors in %s" %
13518 utils.CommaJoin(failures))
13520 # At this point, the export was successful, we can cleanup/finish
13522 # Remove instance if requested
13523 if self.op.remove_instance:
13524 feedback_fn("Removing instance %s" % instance.name)
13525 _RemoveInstance(self, feedback_fn, instance,
13526 self.op.ignore_remove_failures)
13528 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13529 self._CleanupExports(feedback_fn)
13531 return fin_resu, dresults
13534 class LUBackupRemove(NoHooksLU):
13535 """Remove exports related to the named instance.
13540 def ExpandNames(self):
13541 self.needed_locks = {}
13542 # We need all nodes to be locked in order for RemoveExport to work, but we
13543 # don't need to lock the instance itself, as nothing will happen to it (and
13544 # we can remove exports also for a removed instance)
13545 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13547 def Exec(self, feedback_fn):
13548 """Remove any export.
13551 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
13552 # If the instance was not found we'll try with the name that was passed in.
13553 # This will only work if it was an FQDN, though.
13555 if not instance_name:
13557 instance_name = self.op.instance_name
13559 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
13560 exportlist = self.rpc.call_export_list(locked_nodes)
13562 for node in exportlist:
13563 msg = exportlist[node].fail_msg
13565 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
13567 if instance_name in exportlist[node].payload:
13569 result = self.rpc.call_export_remove(node, instance_name)
13570 msg = result.fail_msg
13572 logging.error("Could not remove export for instance %s"
13573 " on node %s: %s", instance_name, node, msg)
13575 if fqdn_warn and not found:
13576 feedback_fn("Export not found. If trying to remove an export belonging"
13577 " to a deleted instance please use its Fully Qualified"
13581 class LUGroupAdd(LogicalUnit):
13582 """Logical unit for creating node groups.
13585 HPATH = "group-add"
13586 HTYPE = constants.HTYPE_GROUP
13589 def ExpandNames(self):
13590 # We need the new group's UUID here so that we can create and acquire the
13591 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
13592 # that it should not check whether the UUID exists in the configuration.
13593 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
13594 self.needed_locks = {}
13595 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
13597 def CheckPrereq(self):
13598 """Check prerequisites.
13600 This checks that the given group name is not an existing node group
13605 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13606 except errors.OpPrereqError:
13609 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
13610 " node group (UUID: %s)" %
13611 (self.op.group_name, existing_uuid),
13612 errors.ECODE_EXISTS)
13614 if self.op.ndparams:
13615 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13617 if self.op.hv_state:
13618 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
13620 self.new_hv_state = None
13622 if self.op.disk_state:
13623 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
13625 self.new_disk_state = None
13627 if self.op.diskparams:
13628 for templ in constants.DISK_TEMPLATES:
13629 if templ in self.op.diskparams:
13630 utils.ForceDictType(self.op.diskparams[templ],
13631 constants.DISK_DT_TYPES)
13632 self.new_diskparams = self.op.diskparams
13634 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
13635 except errors.OpPrereqError, err:
13636 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
13637 errors.ECODE_INVAL)
13639 self.new_diskparams = {}
13641 if self.op.ipolicy:
13642 cluster = self.cfg.GetClusterInfo()
13643 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
13645 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
13646 except errors.ConfigurationError, err:
13647 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
13648 errors.ECODE_INVAL)
13650 def BuildHooksEnv(self):
13651 """Build hooks env.
13655 "GROUP_NAME": self.op.group_name,
13658 def BuildHooksNodes(self):
13659 """Build hooks nodes.
13662 mn = self.cfg.GetMasterNode()
13663 return ([mn], [mn])
13665 def Exec(self, feedback_fn):
13666 """Add the node group to the cluster.
13669 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
13670 uuid=self.group_uuid,
13671 alloc_policy=self.op.alloc_policy,
13672 ndparams=self.op.ndparams,
13673 diskparams=self.new_diskparams,
13674 ipolicy=self.op.ipolicy,
13675 hv_state_static=self.new_hv_state,
13676 disk_state_static=self.new_disk_state)
13678 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
13679 del self.remove_locks[locking.LEVEL_NODEGROUP]
13682 class LUGroupAssignNodes(NoHooksLU):
13683 """Logical unit for assigning nodes to groups.
13688 def ExpandNames(self):
13689 # These raise errors.OpPrereqError on their own:
13690 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13691 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
13693 # We want to lock all the affected nodes and groups. We have readily
13694 # available the list of nodes, and the *destination* group. To gather the
13695 # list of "source" groups, we need to fetch node information later on.
13696 self.needed_locks = {
13697 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
13698 locking.LEVEL_NODE: self.op.nodes,
13701 def DeclareLocks(self, level):
13702 if level == locking.LEVEL_NODEGROUP:
13703 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
13705 # Try to get all affected nodes' groups without having the group or node
13706 # lock yet. Needs verification later in the code flow.
13707 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
13709 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
13711 def CheckPrereq(self):
13712 """Check prerequisites.
13715 assert self.needed_locks[locking.LEVEL_NODEGROUP]
13716 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
13717 frozenset(self.op.nodes))
13719 expected_locks = (set([self.group_uuid]) |
13720 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
13721 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
13722 if actual_locks != expected_locks:
13723 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
13724 " current groups are '%s', used to be '%s'" %
13725 (utils.CommaJoin(expected_locks),
13726 utils.CommaJoin(actual_locks)))
13728 self.node_data = self.cfg.GetAllNodesInfo()
13729 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13730 instance_data = self.cfg.GetAllInstancesInfo()
13732 if self.group is None:
13733 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13734 (self.op.group_name, self.group_uuid))
13736 (new_splits, previous_splits) = \
13737 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
13738 for node in self.op.nodes],
13739 self.node_data, instance_data)
13742 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
13744 if not self.op.force:
13745 raise errors.OpExecError("The following instances get split by this"
13746 " change and --force was not given: %s" %
13749 self.LogWarning("This operation will split the following instances: %s",
13752 if previous_splits:
13753 self.LogWarning("In addition, these already-split instances continue"
13754 " to be split across groups: %s",
13755 utils.CommaJoin(utils.NiceSort(previous_splits)))
13757 def Exec(self, feedback_fn):
13758 """Assign nodes to a new group.
13761 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
13763 self.cfg.AssignGroupNodes(mods)
13766 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
13767 """Check for split instances after a node assignment.
13769 This method considers a series of node assignments as an atomic operation,
13770 and returns information about split instances after applying the set of
13773 In particular, it returns information about newly split instances, and
13774 instances that were already split, and remain so after the change.
13776 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
13779 @type changes: list of (node_name, new_group_uuid) pairs.
13780 @param changes: list of node assignments to consider.
13781 @param node_data: a dict with data for all nodes
13782 @param instance_data: a dict with all instances to consider
13783 @rtype: a two-tuple
13784 @return: a list of instances that were previously okay and result split as a
13785 consequence of this change, and a list of instances that were previously
13786 split and this change does not fix.
13789 changed_nodes = dict((node, group) for node, group in changes
13790 if node_data[node].group != group)
13792 all_split_instances = set()
13793 previously_split_instances = set()
13795 def InstanceNodes(instance):
13796 return [instance.primary_node] + list(instance.secondary_nodes)
13798 for inst in instance_data.values():
13799 if inst.disk_template not in constants.DTS_INT_MIRROR:
13802 instance_nodes = InstanceNodes(inst)
13804 if len(set(node_data[node].group for node in instance_nodes)) > 1:
13805 previously_split_instances.add(inst.name)
13807 if len(set(changed_nodes.get(node, node_data[node].group)
13808 for node in instance_nodes)) > 1:
13809 all_split_instances.add(inst.name)
13811 return (list(all_split_instances - previously_split_instances),
13812 list(previously_split_instances & all_split_instances))
13815 class _GroupQuery(_QueryBase):
13816 FIELDS = query.GROUP_FIELDS
13818 def ExpandNames(self, lu):
13819 lu.needed_locks = {}
13821 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
13822 self._cluster = lu.cfg.GetClusterInfo()
13823 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
13826 self.wanted = [name_to_uuid[name]
13827 for name in utils.NiceSort(name_to_uuid.keys())]
13829 # Accept names to be either names or UUIDs.
13832 all_uuid = frozenset(self._all_groups.keys())
13834 for name in self.names:
13835 if name in all_uuid:
13836 self.wanted.append(name)
13837 elif name in name_to_uuid:
13838 self.wanted.append(name_to_uuid[name])
13840 missing.append(name)
13843 raise errors.OpPrereqError("Some groups do not exist: %s" %
13844 utils.CommaJoin(missing),
13845 errors.ECODE_NOENT)
13847 def DeclareLocks(self, lu, level):
13850 def _GetQueryData(self, lu):
13851 """Computes the list of node groups and their attributes.
13854 do_nodes = query.GQ_NODE in self.requested_data
13855 do_instances = query.GQ_INST in self.requested_data
13857 group_to_nodes = None
13858 group_to_instances = None
13860 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
13861 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
13862 # latter GetAllInstancesInfo() is not enough, for we have to go through
13863 # instance->node. Hence, we will need to process nodes even if we only need
13864 # instance information.
13865 if do_nodes or do_instances:
13866 all_nodes = lu.cfg.GetAllNodesInfo()
13867 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
13870 for node in all_nodes.values():
13871 if node.group in group_to_nodes:
13872 group_to_nodes[node.group].append(node.name)
13873 node_to_group[node.name] = node.group
13876 all_instances = lu.cfg.GetAllInstancesInfo()
13877 group_to_instances = dict((uuid, []) for uuid in self.wanted)
13879 for instance in all_instances.values():
13880 node = instance.primary_node
13881 if node in node_to_group:
13882 group_to_instances[node_to_group[node]].append(instance.name)
13885 # Do not pass on node information if it was not requested.
13886 group_to_nodes = None
13888 return query.GroupQueryData(self._cluster,
13889 [self._all_groups[uuid]
13890 for uuid in self.wanted],
13891 group_to_nodes, group_to_instances,
13892 query.GQ_DISKPARAMS in self.requested_data)
13895 class LUGroupQuery(NoHooksLU):
13896 """Logical unit for querying node groups.
13901 def CheckArguments(self):
13902 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
13903 self.op.output_fields, False)
13905 def ExpandNames(self):
13906 self.gq.ExpandNames(self)
13908 def DeclareLocks(self, level):
13909 self.gq.DeclareLocks(self, level)
13911 def Exec(self, feedback_fn):
13912 return self.gq.OldStyleQuery(self)
13915 class LUGroupSetParams(LogicalUnit):
13916 """Modifies the parameters of a node group.
13919 HPATH = "group-modify"
13920 HTYPE = constants.HTYPE_GROUP
13923 def CheckArguments(self):
13926 self.op.diskparams,
13927 self.op.alloc_policy,
13929 self.op.disk_state,
13933 if all_changes.count(None) == len(all_changes):
13934 raise errors.OpPrereqError("Please pass at least one modification",
13935 errors.ECODE_INVAL)
13937 def ExpandNames(self):
13938 # This raises errors.OpPrereqError on its own:
13939 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13941 self.needed_locks = {
13942 locking.LEVEL_INSTANCE: [],
13943 locking.LEVEL_NODEGROUP: [self.group_uuid],
13946 self.share_locks[locking.LEVEL_INSTANCE] = 1
13948 def DeclareLocks(self, level):
13949 if level == locking.LEVEL_INSTANCE:
13950 assert not self.needed_locks[locking.LEVEL_INSTANCE]
13952 # Lock instances optimistically, needs verification once group lock has
13954 self.needed_locks[locking.LEVEL_INSTANCE] = \
13955 self.cfg.GetNodeGroupInstances(self.group_uuid)
13958 def _UpdateAndVerifyDiskParams(old, new):
13959 """Updates and verifies disk parameters.
13962 new_params = _GetUpdatedParams(old, new)
13963 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
13966 def CheckPrereq(self):
13967 """Check prerequisites.
13970 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13972 # Check if locked instances are still correct
13973 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
13975 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13976 cluster = self.cfg.GetClusterInfo()
13978 if self.group is None:
13979 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13980 (self.op.group_name, self.group_uuid))
13982 if self.op.ndparams:
13983 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
13984 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
13985 self.new_ndparams = new_ndparams
13987 if self.op.diskparams:
13988 diskparams = self.group.diskparams
13989 uavdp = self._UpdateAndVerifyDiskParams
13990 # For each disktemplate subdict update and verify the values
13991 new_diskparams = dict((dt,
13992 uavdp(diskparams.get(dt, {}),
13993 self.op.diskparams[dt]))
13994 for dt in constants.DISK_TEMPLATES
13995 if dt in self.op.diskparams)
13996 # As we've all subdicts of diskparams ready, lets merge the actual
13997 # dict with all updated subdicts
13998 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
14000 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
14001 except errors.OpPrereqError, err:
14002 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
14003 errors.ECODE_INVAL)
14005 if self.op.hv_state:
14006 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
14007 self.group.hv_state_static)
14009 if self.op.disk_state:
14010 self.new_disk_state = \
14011 _MergeAndVerifyDiskState(self.op.disk_state,
14012 self.group.disk_state_static)
14014 if self.op.ipolicy:
14015 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
14019 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
14020 inst_filter = lambda inst: inst.name in owned_instances
14021 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
14023 _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
14025 new_ipolicy, instances)
14028 self.LogWarning("After the ipolicy change the following instances"
14029 " violate them: %s",
14030 utils.CommaJoin(violations))
14032 def BuildHooksEnv(self):
14033 """Build hooks env.
14037 "GROUP_NAME": self.op.group_name,
14038 "NEW_ALLOC_POLICY": self.op.alloc_policy,
14041 def BuildHooksNodes(self):
14042 """Build hooks nodes.
14045 mn = self.cfg.GetMasterNode()
14046 return ([mn], [mn])
14048 def Exec(self, feedback_fn):
14049 """Modifies the node group.
14054 if self.op.ndparams:
14055 self.group.ndparams = self.new_ndparams
14056 result.append(("ndparams", str(self.group.ndparams)))
14058 if self.op.diskparams:
14059 self.group.diskparams = self.new_diskparams
14060 result.append(("diskparams", str(self.group.diskparams)))
14062 if self.op.alloc_policy:
14063 self.group.alloc_policy = self.op.alloc_policy
14065 if self.op.hv_state:
14066 self.group.hv_state_static = self.new_hv_state
14068 if self.op.disk_state:
14069 self.group.disk_state_static = self.new_disk_state
14071 if self.op.ipolicy:
14072 self.group.ipolicy = self.new_ipolicy
14074 self.cfg.Update(self.group, feedback_fn)
14078 class LUGroupRemove(LogicalUnit):
14079 HPATH = "group-remove"
14080 HTYPE = constants.HTYPE_GROUP
14083 def ExpandNames(self):
14084 # This will raises errors.OpPrereqError on its own:
14085 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14086 self.needed_locks = {
14087 locking.LEVEL_NODEGROUP: [self.group_uuid],
14090 def CheckPrereq(self):
14091 """Check prerequisites.
14093 This checks that the given group name exists as a node group, that is
14094 empty (i.e., contains no nodes), and that is not the last group of the
14098 # Verify that the group is empty.
14099 group_nodes = [node.name
14100 for node in self.cfg.GetAllNodesInfo().values()
14101 if node.group == self.group_uuid]
14104 raise errors.OpPrereqError("Group '%s' not empty, has the following"
14106 (self.op.group_name,
14107 utils.CommaJoin(utils.NiceSort(group_nodes))),
14108 errors.ECODE_STATE)
14110 # Verify the cluster would not be left group-less.
14111 if len(self.cfg.GetNodeGroupList()) == 1:
14112 raise errors.OpPrereqError("Group '%s' is the only group,"
14113 " cannot be removed" %
14114 self.op.group_name,
14115 errors.ECODE_STATE)
14117 def BuildHooksEnv(self):
14118 """Build hooks env.
14122 "GROUP_NAME": self.op.group_name,
14125 def BuildHooksNodes(self):
14126 """Build hooks nodes.
14129 mn = self.cfg.GetMasterNode()
14130 return ([mn], [mn])
14132 def Exec(self, feedback_fn):
14133 """Remove the node group.
14137 self.cfg.RemoveNodeGroup(self.group_uuid)
14138 except errors.ConfigurationError:
14139 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
14140 (self.op.group_name, self.group_uuid))
14142 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
14145 class LUGroupRename(LogicalUnit):
14146 HPATH = "group-rename"
14147 HTYPE = constants.HTYPE_GROUP
14150 def ExpandNames(self):
14151 # This raises errors.OpPrereqError on its own:
14152 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14154 self.needed_locks = {
14155 locking.LEVEL_NODEGROUP: [self.group_uuid],
14158 def CheckPrereq(self):
14159 """Check prerequisites.
14161 Ensures requested new name is not yet used.
14165 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
14166 except errors.OpPrereqError:
14169 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
14170 " node group (UUID: %s)" %
14171 (self.op.new_name, new_name_uuid),
14172 errors.ECODE_EXISTS)
14174 def BuildHooksEnv(self):
14175 """Build hooks env.
14179 "OLD_NAME": self.op.group_name,
14180 "NEW_NAME": self.op.new_name,
14183 def BuildHooksNodes(self):
14184 """Build hooks nodes.
14187 mn = self.cfg.GetMasterNode()
14189 all_nodes = self.cfg.GetAllNodesInfo()
14190 all_nodes.pop(mn, None)
14193 run_nodes.extend(node.name for node in all_nodes.values()
14194 if node.group == self.group_uuid)
14196 return (run_nodes, run_nodes)
14198 def Exec(self, feedback_fn):
14199 """Rename the node group.
14202 group = self.cfg.GetNodeGroup(self.group_uuid)
14205 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14206 (self.op.group_name, self.group_uuid))
14208 group.name = self.op.new_name
14209 self.cfg.Update(group, feedback_fn)
14211 return self.op.new_name
14214 class LUGroupEvacuate(LogicalUnit):
14215 HPATH = "group-evacuate"
14216 HTYPE = constants.HTYPE_GROUP
14219 def ExpandNames(self):
14220 # This raises errors.OpPrereqError on its own:
14221 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14223 if self.op.target_groups:
14224 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
14225 self.op.target_groups)
14227 self.req_target_uuids = []
14229 if self.group_uuid in self.req_target_uuids:
14230 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
14231 " as a target group (targets are %s)" %
14233 utils.CommaJoin(self.req_target_uuids)),
14234 errors.ECODE_INVAL)
14236 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
14238 self.share_locks = _ShareAll()
14239 self.needed_locks = {
14240 locking.LEVEL_INSTANCE: [],
14241 locking.LEVEL_NODEGROUP: [],
14242 locking.LEVEL_NODE: [],
14245 def DeclareLocks(self, level):
14246 if level == locking.LEVEL_INSTANCE:
14247 assert not self.needed_locks[locking.LEVEL_INSTANCE]
14249 # Lock instances optimistically, needs verification once node and group
14250 # locks have been acquired
14251 self.needed_locks[locking.LEVEL_INSTANCE] = \
14252 self.cfg.GetNodeGroupInstances(self.group_uuid)
14254 elif level == locking.LEVEL_NODEGROUP:
14255 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
14257 if self.req_target_uuids:
14258 lock_groups = set([self.group_uuid] + self.req_target_uuids)
14260 # Lock all groups used by instances optimistically; this requires going
14261 # via the node before it's locked, requiring verification later on
14262 lock_groups.update(group_uuid
14263 for instance_name in
14264 self.owned_locks(locking.LEVEL_INSTANCE)
14266 self.cfg.GetInstanceNodeGroups(instance_name))
14268 # No target groups, need to lock all of them
14269 lock_groups = locking.ALL_SET
14271 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
14273 elif level == locking.LEVEL_NODE:
14274 # This will only lock the nodes in the group to be evacuated which
14275 # contain actual instances
14276 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
14277 self._LockInstancesNodes()
14279 # Lock all nodes in group to be evacuated and target groups
14280 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14281 assert self.group_uuid in owned_groups
14282 member_nodes = [node_name
14283 for group in owned_groups
14284 for node_name in self.cfg.GetNodeGroup(group).members]
14285 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
14287 def CheckPrereq(self):
14288 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
14289 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14290 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
14292 assert owned_groups.issuperset(self.req_target_uuids)
14293 assert self.group_uuid in owned_groups
14295 # Check if locked instances are still correct
14296 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
14298 # Get instance information
14299 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
14301 # Check if node groups for locked instances are still correct
14302 _CheckInstancesNodeGroups(self.cfg, self.instances,
14303 owned_groups, owned_nodes, self.group_uuid)
14305 if self.req_target_uuids:
14306 # User requested specific target groups
14307 self.target_uuids = self.req_target_uuids
14309 # All groups except the one to be evacuated are potential targets
14310 self.target_uuids = [group_uuid for group_uuid in owned_groups
14311 if group_uuid != self.group_uuid]
14313 if not self.target_uuids:
14314 raise errors.OpPrereqError("There are no possible target groups",
14315 errors.ECODE_INVAL)
14317 def BuildHooksEnv(self):
14318 """Build hooks env.
14322 "GROUP_NAME": self.op.group_name,
14323 "TARGET_GROUPS": " ".join(self.target_uuids),
14326 def BuildHooksNodes(self):
14327 """Build hooks nodes.
14330 mn = self.cfg.GetMasterNode()
14332 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
14334 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
14336 return (run_nodes, run_nodes)
14338 def Exec(self, feedback_fn):
14339 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
14341 assert self.group_uuid not in self.target_uuids
14343 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
14344 instances=instances, target_groups=self.target_uuids)
14346 ial.Run(self.op.iallocator)
14348 if not ial.success:
14349 raise errors.OpPrereqError("Can't compute group evacuation using"
14350 " iallocator '%s': %s" %
14351 (self.op.iallocator, ial.info),
14352 errors.ECODE_NORES)
14354 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
14356 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
14357 len(jobs), self.op.group_name)
14359 return ResultWithJobs(jobs)
14362 class TagsLU(NoHooksLU): # pylint: disable=W0223
14363 """Generic tags LU.
14365 This is an abstract class which is the parent of all the other tags LUs.
14368 def ExpandNames(self):
14369 self.group_uuid = None
14370 self.needed_locks = {}
14372 if self.op.kind == constants.TAG_NODE:
14373 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
14374 lock_level = locking.LEVEL_NODE
14375 lock_name = self.op.name
14376 elif self.op.kind == constants.TAG_INSTANCE:
14377 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
14378 lock_level = locking.LEVEL_INSTANCE
14379 lock_name = self.op.name
14380 elif self.op.kind == constants.TAG_NODEGROUP:
14381 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
14382 lock_level = locking.LEVEL_NODEGROUP
14383 lock_name = self.group_uuid
14388 if lock_level and getattr(self.op, "use_locking", True):
14389 self.needed_locks[lock_level] = lock_name
14391 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
14392 # not possible to acquire the BGL based on opcode parameters)
14394 def CheckPrereq(self):
14395 """Check prerequisites.
14398 if self.op.kind == constants.TAG_CLUSTER:
14399 self.target = self.cfg.GetClusterInfo()
14400 elif self.op.kind == constants.TAG_NODE:
14401 self.target = self.cfg.GetNodeInfo(self.op.name)
14402 elif self.op.kind == constants.TAG_INSTANCE:
14403 self.target = self.cfg.GetInstanceInfo(self.op.name)
14404 elif self.op.kind == constants.TAG_NODEGROUP:
14405 self.target = self.cfg.GetNodeGroup(self.group_uuid)
14407 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
14408 str(self.op.kind), errors.ECODE_INVAL)
14411 class LUTagsGet(TagsLU):
14412 """Returns the tags of a given object.
14417 def ExpandNames(self):
14418 TagsLU.ExpandNames(self)
14420 # Share locks as this is only a read operation
14421 self.share_locks = _ShareAll()
14423 def Exec(self, feedback_fn):
14424 """Returns the tag list.
14427 return list(self.target.GetTags())
14430 class LUTagsSearch(NoHooksLU):
14431 """Searches the tags for a given pattern.
14436 def ExpandNames(self):
14437 self.needed_locks = {}
14439 def CheckPrereq(self):
14440 """Check prerequisites.
14442 This checks the pattern passed for validity by compiling it.
14446 self.re = re.compile(self.op.pattern)
14447 except re.error, err:
14448 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
14449 (self.op.pattern, err), errors.ECODE_INVAL)
14451 def Exec(self, feedback_fn):
14452 """Returns the tag list.
14456 tgts = [("/cluster", cfg.GetClusterInfo())]
14457 ilist = cfg.GetAllInstancesInfo().values()
14458 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
14459 nlist = cfg.GetAllNodesInfo().values()
14460 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
14461 tgts.extend(("/nodegroup/%s" % n.name, n)
14462 for n in cfg.GetAllNodeGroupsInfo().values())
14464 for path, target in tgts:
14465 for tag in target.GetTags():
14466 if self.re.search(tag):
14467 results.append((path, tag))
14471 class LUTagsSet(TagsLU):
14472 """Sets a tag on a given object.
14477 def CheckPrereq(self):
14478 """Check prerequisites.
14480 This checks the type and length of the tag name and value.
14483 TagsLU.CheckPrereq(self)
14484 for tag in self.op.tags:
14485 objects.TaggableObject.ValidateTag(tag)
14487 def Exec(self, feedback_fn):
14492 for tag in self.op.tags:
14493 self.target.AddTag(tag)
14494 except errors.TagError, err:
14495 raise errors.OpExecError("Error while setting tag: %s" % str(err))
14496 self.cfg.Update(self.target, feedback_fn)
14499 class LUTagsDel(TagsLU):
14500 """Delete a list of tags from a given object.
14505 def CheckPrereq(self):
14506 """Check prerequisites.
14508 This checks that we have the given tag.
14511 TagsLU.CheckPrereq(self)
14512 for tag in self.op.tags:
14513 objects.TaggableObject.ValidateTag(tag)
14514 del_tags = frozenset(self.op.tags)
14515 cur_tags = self.target.GetTags()
14517 diff_tags = del_tags - cur_tags
14519 diff_names = ("'%s'" % i for i in sorted(diff_tags))
14520 raise errors.OpPrereqError("Tag(s) %s not found" %
14521 (utils.CommaJoin(diff_names), ),
14522 errors.ECODE_NOENT)
14524 def Exec(self, feedback_fn):
14525 """Remove the tag from the object.
14528 for tag in self.op.tags:
14529 self.target.RemoveTag(tag)
14530 self.cfg.Update(self.target, feedback_fn)
14533 class LUTestDelay(NoHooksLU):
14534 """Sleep for a specified amount of time.
14536 This LU sleeps on the master and/or nodes for a specified amount of
14542 def ExpandNames(self):
14543 """Expand names and set required locks.
14545 This expands the node list, if any.
14548 self.needed_locks = {}
14549 if self.op.on_nodes:
14550 # _GetWantedNodes can be used here, but is not always appropriate to use
14551 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
14552 # more information.
14553 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
14554 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
14556 def _TestDelay(self):
14557 """Do the actual sleep.
14560 if self.op.on_master:
14561 if not utils.TestDelay(self.op.duration):
14562 raise errors.OpExecError("Error during master delay test")
14563 if self.op.on_nodes:
14564 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
14565 for node, node_result in result.items():
14566 node_result.Raise("Failure during rpc call to node %s" % node)
14568 def Exec(self, feedback_fn):
14569 """Execute the test delay opcode, with the wanted repetitions.
14572 if self.op.repeat == 0:
14575 top_value = self.op.repeat - 1
14576 for i in range(self.op.repeat):
14577 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
14581 class LUTestJqueue(NoHooksLU):
14582 """Utility LU to test some aspects of the job queue.
14587 # Must be lower than default timeout for WaitForJobChange to see whether it
14588 # notices changed jobs
14589 _CLIENT_CONNECT_TIMEOUT = 20.0
14590 _CLIENT_CONFIRM_TIMEOUT = 60.0
14593 def _NotifyUsingSocket(cls, cb, errcls):
14594 """Opens a Unix socket and waits for another program to connect.
14597 @param cb: Callback to send socket name to client
14598 @type errcls: class
14599 @param errcls: Exception class to use for errors
14602 # Using a temporary directory as there's no easy way to create temporary
14603 # sockets without writing a custom loop around tempfile.mktemp and
14605 tmpdir = tempfile.mkdtemp()
14607 tmpsock = utils.PathJoin(tmpdir, "sock")
14609 logging.debug("Creating temporary socket at %s", tmpsock)
14610 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
14615 # Send details to client
14618 # Wait for client to connect before continuing
14619 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
14621 (conn, _) = sock.accept()
14622 except socket.error, err:
14623 raise errcls("Client didn't connect in time (%s)" % err)
14627 # Remove as soon as client is connected
14628 shutil.rmtree(tmpdir)
14630 # Wait for client to close
14633 # pylint: disable=E1101
14634 # Instance of '_socketobject' has no ... member
14635 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
14637 except socket.error, err:
14638 raise errcls("Client failed to confirm notification (%s)" % err)
14642 def _SendNotification(self, test, arg, sockname):
14643 """Sends a notification to the client.
14646 @param test: Test name
14647 @param arg: Test argument (depends on test)
14648 @type sockname: string
14649 @param sockname: Socket path
14652 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
14654 def _Notify(self, prereq, test, arg):
14655 """Notifies the client of a test.
14658 @param prereq: Whether this is a prereq-phase test
14660 @param test: Test name
14661 @param arg: Test argument (depends on test)
14665 errcls = errors.OpPrereqError
14667 errcls = errors.OpExecError
14669 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
14673 def CheckArguments(self):
14674 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
14675 self.expandnames_calls = 0
14677 def ExpandNames(self):
14678 checkargs_calls = getattr(self, "checkargs_calls", 0)
14679 if checkargs_calls < 1:
14680 raise errors.ProgrammerError("CheckArguments was not called")
14682 self.expandnames_calls += 1
14684 if self.op.notify_waitlock:
14685 self._Notify(True, constants.JQT_EXPANDNAMES, None)
14687 self.LogInfo("Expanding names")
14689 # Get lock on master node (just to get a lock, not for a particular reason)
14690 self.needed_locks = {
14691 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
14694 def Exec(self, feedback_fn):
14695 if self.expandnames_calls < 1:
14696 raise errors.ProgrammerError("ExpandNames was not called")
14698 if self.op.notify_exec:
14699 self._Notify(False, constants.JQT_EXEC, None)
14701 self.LogInfo("Executing")
14703 if self.op.log_messages:
14704 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
14705 for idx, msg in enumerate(self.op.log_messages):
14706 self.LogInfo("Sending log message %s", idx + 1)
14707 feedback_fn(constants.JQT_MSGPREFIX + msg)
14708 # Report how many test messages have been sent
14709 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
14712 raise errors.OpExecError("Opcode failure was requested")
14717 class IAllocator(object):
14718 """IAllocator framework.
14720 An IAllocator instance has three sets of attributes:
14721 - cfg that is needed to query the cluster
14722 - input data (all members of the _KEYS class attribute are required)
14723 - four buffer attributes (in|out_data|text), that represent the
14724 input (to the external script) in text and data structure format,
14725 and the output from it, again in two formats
14726 - the result variables from the script (success, info, nodes) for
14730 # pylint: disable=R0902
14731 # lots of instance attributes
14733 def __init__(self, cfg, rpc_runner, mode, **kwargs):
14735 self.rpc = rpc_runner
14736 # init buffer variables
14737 self.in_text = self.out_text = self.in_data = self.out_data = None
14738 # init all input fields so that pylint is happy
14740 self.memory = self.disks = self.disk_template = self.spindle_use = None
14741 self.os = self.tags = self.nics = self.vcpus = None
14742 self.hypervisor = None
14743 self.relocate_from = None
14745 self.instances = None
14746 self.evac_mode = None
14747 self.target_groups = []
14749 self.required_nodes = None
14750 # init result fields
14751 self.success = self.info = self.result = None
14754 (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
14756 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
14757 " IAllocator" % self.mode)
14759 keyset = [n for (n, _) in keydata]
14762 if key not in keyset:
14763 raise errors.ProgrammerError("Invalid input parameter '%s' to"
14764 " IAllocator" % key)
14765 setattr(self, key, kwargs[key])
14768 if key not in kwargs:
14769 raise errors.ProgrammerError("Missing input parameter '%s' to"
14770 " IAllocator" % key)
14771 self._BuildInputData(compat.partial(fn, self), keydata)
14773 def _ComputeClusterData(self):
14774 """Compute the generic allocator input data.
14776 This is the data that is independent of the actual operation.
14780 cluster_info = cfg.GetClusterInfo()
14783 "version": constants.IALLOCATOR_VERSION,
14784 "cluster_name": cfg.GetClusterName(),
14785 "cluster_tags": list(cluster_info.GetTags()),
14786 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
14787 "ipolicy": cluster_info.ipolicy,
14789 ninfo = cfg.GetAllNodesInfo()
14790 iinfo = cfg.GetAllInstancesInfo().values()
14791 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
14794 node_list = [n.name for n in ninfo.values() if n.vm_capable]
14796 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
14797 hypervisor_name = self.hypervisor
14798 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
14799 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
14801 hypervisor_name = cluster_info.primary_hypervisor
14803 node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
14806 self.rpc.call_all_instances_info(node_list,
14807 cluster_info.enabled_hypervisors)
14809 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
14811 config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
14812 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
14813 i_list, config_ndata)
14814 assert len(data["nodes"]) == len(ninfo), \
14815 "Incomplete node data computed"
14817 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
14819 self.in_data = data
14822 def _ComputeNodeGroupData(cfg):
14823 """Compute node groups data.
14826 cluster = cfg.GetClusterInfo()
14827 ng = dict((guuid, {
14828 "name": gdata.name,
14829 "alloc_policy": gdata.alloc_policy,
14830 "ipolicy": _CalculateGroupIPolicy(cluster, gdata),
14832 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
14837 def _ComputeBasicNodeData(cfg, node_cfg):
14838 """Compute global node data.
14841 @returns: a dict of name: (node dict, node config)
14844 # fill in static (config-based) values
14845 node_results = dict((ninfo.name, {
14846 "tags": list(ninfo.GetTags()),
14847 "primary_ip": ninfo.primary_ip,
14848 "secondary_ip": ninfo.secondary_ip,
14849 "offline": ninfo.offline,
14850 "drained": ninfo.drained,
14851 "master_candidate": ninfo.master_candidate,
14852 "group": ninfo.group,
14853 "master_capable": ninfo.master_capable,
14854 "vm_capable": ninfo.vm_capable,
14855 "ndparams": cfg.GetNdParams(ninfo),
14857 for ninfo in node_cfg.values())
14859 return node_results
14862 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
14864 """Compute global node data.
14866 @param node_results: the basic node structures as filled from the config
14869 #TODO(dynmem): compute the right data on MAX and MIN memory
14870 # make a copy of the current dict
14871 node_results = dict(node_results)
14872 for nname, nresult in node_data.items():
14873 assert nname in node_results, "Missing basic data for node %s" % nname
14874 ninfo = node_cfg[nname]
14876 if not (ninfo.offline or ninfo.drained):
14877 nresult.Raise("Can't get data for node %s" % nname)
14878 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
14880 remote_info = _MakeLegacyNodeInfo(nresult.payload)
14882 for attr in ["memory_total", "memory_free", "memory_dom0",
14883 "vg_size", "vg_free", "cpu_total"]:
14884 if attr not in remote_info:
14885 raise errors.OpExecError("Node '%s' didn't return attribute"
14886 " '%s'" % (nname, attr))
14887 if not isinstance(remote_info[attr], int):
14888 raise errors.OpExecError("Node '%s' returned invalid value"
14890 (nname, attr, remote_info[attr]))
14891 # compute memory used by primary instances
14892 i_p_mem = i_p_up_mem = 0
14893 for iinfo, beinfo in i_list:
14894 if iinfo.primary_node == nname:
14895 i_p_mem += beinfo[constants.BE_MAXMEM]
14896 if iinfo.name not in node_iinfo[nname].payload:
14899 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
14900 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
14901 remote_info["memory_free"] -= max(0, i_mem_diff)
14903 if iinfo.admin_state == constants.ADMINST_UP:
14904 i_p_up_mem += beinfo[constants.BE_MAXMEM]
14906 # compute memory used by instances
14908 "total_memory": remote_info["memory_total"],
14909 "reserved_memory": remote_info["memory_dom0"],
14910 "free_memory": remote_info["memory_free"],
14911 "total_disk": remote_info["vg_size"],
14912 "free_disk": remote_info["vg_free"],
14913 "total_cpus": remote_info["cpu_total"],
14914 "i_pri_memory": i_p_mem,
14915 "i_pri_up_memory": i_p_up_mem,
14917 pnr_dyn.update(node_results[nname])
14918 node_results[nname] = pnr_dyn
14920 return node_results
14923 def _ComputeInstanceData(cluster_info, i_list):
14924 """Compute global instance data.
14928 for iinfo, beinfo in i_list:
14930 for nic in iinfo.nics:
14931 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
14935 "mode": filled_params[constants.NIC_MODE],
14936 "link": filled_params[constants.NIC_LINK],
14938 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
14939 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
14940 nic_data.append(nic_dict)
14942 "tags": list(iinfo.GetTags()),
14943 "admin_state": iinfo.admin_state,
14944 "vcpus": beinfo[constants.BE_VCPUS],
14945 "memory": beinfo[constants.BE_MAXMEM],
14946 "spindle_use": beinfo[constants.BE_SPINDLE_USE],
14948 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
14950 "disks": [{constants.IDISK_SIZE: dsk.size,
14951 constants.IDISK_MODE: dsk.mode}
14952 for dsk in iinfo.disks],
14953 "disk_template": iinfo.disk_template,
14954 "hypervisor": iinfo.hypervisor,
14956 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
14958 instance_data[iinfo.name] = pir
14960 return instance_data
14962 def _AddNewInstance(self):
14963 """Add new instance data to allocator structure.
14965 This in combination with _AllocatorGetClusterData will create the
14966 correct structure needed as input for the allocator.
14968 The checks for the completeness of the opcode must have already been
14972 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
14974 if self.disk_template in constants.DTS_INT_MIRROR:
14975 self.required_nodes = 2
14977 self.required_nodes = 1
14981 "disk_template": self.disk_template,
14984 "vcpus": self.vcpus,
14985 "memory": self.memory,
14986 "spindle_use": self.spindle_use,
14987 "disks": self.disks,
14988 "disk_space_total": disk_space,
14990 "required_nodes": self.required_nodes,
14991 "hypervisor": self.hypervisor,
14996 def _AddRelocateInstance(self):
14997 """Add relocate instance data to allocator structure.
14999 This in combination with _IAllocatorGetClusterData will create the
15000 correct structure needed as input for the allocator.
15002 The checks for the completeness of the opcode must have already been
15006 instance = self.cfg.GetInstanceInfo(self.name)
15007 if instance is None:
15008 raise errors.ProgrammerError("Unknown instance '%s' passed to"
15009 " IAllocator" % self.name)
15011 if instance.disk_template not in constants.DTS_MIRRORED:
15012 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
15013 errors.ECODE_INVAL)
15015 if instance.disk_template in constants.DTS_INT_MIRROR and \
15016 len(instance.secondary_nodes) != 1:
15017 raise errors.OpPrereqError("Instance has not exactly one secondary node",
15018 errors.ECODE_STATE)
15020 self.required_nodes = 1
15021 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
15022 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
15026 "disk_space_total": disk_space,
15027 "required_nodes": self.required_nodes,
15028 "relocate_from": self.relocate_from,
15032 def _AddNodeEvacuate(self):
15033 """Get data for node-evacuate requests.
15037 "instances": self.instances,
15038 "evac_mode": self.evac_mode,
15041 def _AddChangeGroup(self):
15042 """Get data for node-evacuate requests.
15046 "instances": self.instances,
15047 "target_groups": self.target_groups,
15050 def _BuildInputData(self, fn, keydata):
15051 """Build input data structures.
15054 self._ComputeClusterData()
15057 request["type"] = self.mode
15058 for keyname, keytype in keydata:
15059 if keyname not in request:
15060 raise errors.ProgrammerError("Request parameter %s is missing" %
15062 val = request[keyname]
15063 if not keytype(val):
15064 raise errors.ProgrammerError("Request parameter %s doesn't pass"
15065 " validation, value %s, expected"
15066 " type %s" % (keyname, val, keytype))
15067 self.in_data["request"] = request
15069 self.in_text = serializer.Dump(self.in_data)
15071 _STRING_LIST = ht.TListOf(ht.TString)
15072 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
15073 # pylint: disable=E1101
15074 # Class '...' has no 'OP_ID' member
15075 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
15076 opcodes.OpInstanceMigrate.OP_ID,
15077 opcodes.OpInstanceReplaceDisks.OP_ID])
15081 ht.TListOf(ht.TAnd(ht.TIsLength(3),
15082 ht.TItems([ht.TNonEmptyString,
15083 ht.TNonEmptyString,
15084 ht.TListOf(ht.TNonEmptyString),
15087 ht.TListOf(ht.TAnd(ht.TIsLength(2),
15088 ht.TItems([ht.TNonEmptyString,
15091 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
15092 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
15095 constants.IALLOCATOR_MODE_ALLOC:
15098 ("name", ht.TString),
15099 ("memory", ht.TInt),
15100 ("spindle_use", ht.TInt),
15101 ("disks", ht.TListOf(ht.TDict)),
15102 ("disk_template", ht.TString),
15103 ("os", ht.TString),
15104 ("tags", _STRING_LIST),
15105 ("nics", ht.TListOf(ht.TDict)),
15106 ("vcpus", ht.TInt),
15107 ("hypervisor", ht.TString),
15109 constants.IALLOCATOR_MODE_RELOC:
15110 (_AddRelocateInstance,
15111 [("name", ht.TString), ("relocate_from", _STRING_LIST)],
15113 constants.IALLOCATOR_MODE_NODE_EVAC:
15114 (_AddNodeEvacuate, [
15115 ("instances", _STRING_LIST),
15116 ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
15118 constants.IALLOCATOR_MODE_CHG_GROUP:
15119 (_AddChangeGroup, [
15120 ("instances", _STRING_LIST),
15121 ("target_groups", _STRING_LIST),
15125 def Run(self, name, validate=True, call_fn=None):
15126 """Run an instance allocator and return the results.
15129 if call_fn is None:
15130 call_fn = self.rpc.call_iallocator_runner
15132 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
15133 result.Raise("Failure while running the iallocator script")
15135 self.out_text = result.payload
15137 self._ValidateResult()
15139 def _ValidateResult(self):
15140 """Process the allocator results.
15142 This will process and if successful save the result in
15143 self.out_data and the other parameters.
15147 rdict = serializer.Load(self.out_text)
15148 except Exception, err:
15149 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
15151 if not isinstance(rdict, dict):
15152 raise errors.OpExecError("Can't parse iallocator results: not a dict")
15154 # TODO: remove backwards compatiblity in later versions
15155 if "nodes" in rdict and "result" not in rdict:
15156 rdict["result"] = rdict["nodes"]
15159 for key in "success", "info", "result":
15160 if key not in rdict:
15161 raise errors.OpExecError("Can't parse iallocator results:"
15162 " missing key '%s'" % key)
15163 setattr(self, key, rdict[key])
15165 if not self._result_check(self.result):
15166 raise errors.OpExecError("Iallocator returned invalid result,"
15167 " expected %s, got %s" %
15168 (self._result_check, self.result),
15169 errors.ECODE_INVAL)
15171 if self.mode == constants.IALLOCATOR_MODE_RELOC:
15172 assert self.relocate_from is not None
15173 assert self.required_nodes == 1
15175 node2group = dict((name, ndata["group"])
15176 for (name, ndata) in self.in_data["nodes"].items())
15178 fn = compat.partial(self._NodesToGroups, node2group,
15179 self.in_data["nodegroups"])
15181 instance = self.cfg.GetInstanceInfo(self.name)
15182 request_groups = fn(self.relocate_from + [instance.primary_node])
15183 result_groups = fn(rdict["result"] + [instance.primary_node])
15185 if self.success and not set(result_groups).issubset(request_groups):
15186 raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
15187 " differ from original groups (%s)" %
15188 (utils.CommaJoin(result_groups),
15189 utils.CommaJoin(request_groups)))
15191 elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15192 assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
15194 self.out_data = rdict
15197 def _NodesToGroups(node2group, groups, nodes):
15198 """Returns a list of unique group names for a list of nodes.
15200 @type node2group: dict
15201 @param node2group: Map from node name to group UUID
15203 @param groups: Group information
15205 @param nodes: Node names
15212 group_uuid = node2group[node]
15214 # Ignore unknown node
15218 group = groups[group_uuid]
15220 # Can't find group, let's use UUID
15221 group_name = group_uuid
15223 group_name = group["name"]
15225 result.add(group_name)
15227 return sorted(result)
15230 class LUTestAllocator(NoHooksLU):
15231 """Run allocator tests.
15233 This LU runs the allocator tests
15236 def CheckPrereq(self):
15237 """Check prerequisites.
15239 This checks the opcode parameters depending on the director and mode test.
15242 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15243 for attr in ["memory", "disks", "disk_template",
15244 "os", "tags", "nics", "vcpus"]:
15245 if not hasattr(self.op, attr):
15246 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
15247 attr, errors.ECODE_INVAL)
15248 iname = self.cfg.ExpandInstanceName(self.op.name)
15249 if iname is not None:
15250 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
15251 iname, errors.ECODE_EXISTS)
15252 if not isinstance(self.op.nics, list):
15253 raise errors.OpPrereqError("Invalid parameter 'nics'",
15254 errors.ECODE_INVAL)
15255 if not isinstance(self.op.disks, list):
15256 raise errors.OpPrereqError("Invalid parameter 'disks'",
15257 errors.ECODE_INVAL)
15258 for row in self.op.disks:
15259 if (not isinstance(row, dict) or
15260 constants.IDISK_SIZE not in row or
15261 not isinstance(row[constants.IDISK_SIZE], int) or
15262 constants.IDISK_MODE not in row or
15263 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
15264 raise errors.OpPrereqError("Invalid contents of the 'disks'"
15265 " parameter", errors.ECODE_INVAL)
15266 if self.op.hypervisor is None:
15267 self.op.hypervisor = self.cfg.GetHypervisorType()
15268 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15269 fname = _ExpandInstanceName(self.cfg, self.op.name)
15270 self.op.name = fname
15271 self.relocate_from = \
15272 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
15273 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
15274 constants.IALLOCATOR_MODE_NODE_EVAC):
15275 if not self.op.instances:
15276 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
15277 self.op.instances = _GetWantedInstances(self, self.op.instances)
15279 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
15280 self.op.mode, errors.ECODE_INVAL)
15282 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
15283 if self.op.allocator is None:
15284 raise errors.OpPrereqError("Missing allocator name",
15285 errors.ECODE_INVAL)
15286 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
15287 raise errors.OpPrereqError("Wrong allocator test '%s'" %
15288 self.op.direction, errors.ECODE_INVAL)
15290 def Exec(self, feedback_fn):
15291 """Run the allocator test.
15294 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15295 ial = IAllocator(self.cfg, self.rpc,
15298 memory=self.op.memory,
15299 disks=self.op.disks,
15300 disk_template=self.op.disk_template,
15304 vcpus=self.op.vcpus,
15305 hypervisor=self.op.hypervisor,
15306 spindle_use=self.op.spindle_use,
15308 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15309 ial = IAllocator(self.cfg, self.rpc,
15312 relocate_from=list(self.relocate_from),
15314 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
15315 ial = IAllocator(self.cfg, self.rpc,
15317 instances=self.op.instances,
15318 target_groups=self.op.target_groups)
15319 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15320 ial = IAllocator(self.cfg, self.rpc,
15322 instances=self.op.instances,
15323 evac_mode=self.op.evac_mode)
15325 raise errors.ProgrammerError("Uncatched mode %s in"
15326 " LUTestAllocator.Exec", self.op.mode)
15328 if self.op.direction == constants.IALLOCATOR_DIR_IN:
15329 result = ial.in_text
15331 ial.Run(self.op.allocator, validate=False)
15332 result = ial.out_text
15336 #: Query type implementations
15338 constants.QR_CLUSTER: _ClusterQuery,
15339 constants.QR_INSTANCE: _InstanceQuery,
15340 constants.QR_NODE: _NodeQuery,
15341 constants.QR_GROUP: _GroupQuery,
15342 constants.QR_OS: _OsQuery,
15343 constants.QR_EXPORT: _ExportQuery,
15346 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
15349 def _GetQueryImplementation(name):
15350 """Returns the implemtnation for a query type.
15352 @param name: Query type, must be one of L{constants.QR_VIA_OP}
15356 return _QUERY_IMPL[name]
15358 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
15359 errors.ECODE_INVAL)