4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 from ganeti import rpc
62 from ganeti import runtime
64 import ganeti.masterd.instance # pylint: disable=W0611
67 #: Size of DRBD meta block device
71 INSTANCE_DOWN = [constants.ADMINST_DOWN]
72 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
73 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
75 #: Instance status in which an instance can be marked as offline/online
76 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
77 constants.ADMINST_OFFLINE,
82 """Data container for LU results with jobs.
84 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
85 by L{mcpu._ProcessResult}. The latter will then submit the jobs
86 contained in the C{jobs} attribute and include the job IDs in the opcode
90 def __init__(self, jobs, **kwargs):
91 """Initializes this class.
93 Additional return values can be specified as keyword arguments.
95 @type jobs: list of lists of L{opcode.OpCode}
96 @param jobs: A list of lists of opcode objects
103 class LogicalUnit(object):
104 """Logical Unit base class.
106 Subclasses must follow these rules:
107 - implement ExpandNames
108 - implement CheckPrereq (except when tasklets are used)
109 - implement Exec (except when tasklets are used)
110 - implement BuildHooksEnv
111 - implement BuildHooksNodes
112 - redefine HPATH and HTYPE
113 - optionally redefine their run requirements:
114 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
116 Note that all commands require root permissions.
118 @ivar dry_run_result: the value (if any) that will be returned to the caller
119 in dry-run mode (signalled by opcode dry_run parameter)
126 def __init__(self, processor, op, context, rpc_runner):
127 """Constructor for LogicalUnit.
129 This needs to be overridden in derived classes in order to check op
133 self.proc = processor
135 self.cfg = context.cfg
136 self.glm = context.glm
138 self.owned_locks = context.glm.list_owned
139 self.context = context
140 self.rpc = rpc_runner
141 # Dicts used to declare locking needs to mcpu
142 self.needed_locks = None
143 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
145 self.remove_locks = {}
146 # Used to force good behavior when calling helper functions
147 self.recalculate_locks = {}
149 self.Log = processor.Log # pylint: disable=C0103
150 self.LogWarning = processor.LogWarning # pylint: disable=C0103
151 self.LogInfo = processor.LogInfo # pylint: disable=C0103
152 self.LogStep = processor.LogStep # pylint: disable=C0103
153 # support for dry-run
154 self.dry_run_result = None
155 # support for generic debug attribute
156 if (not hasattr(self.op, "debug_level") or
157 not isinstance(self.op.debug_level, int)):
158 self.op.debug_level = 0
163 # Validate opcode parameters and set defaults
164 self.op.Validate(True)
166 self.CheckArguments()
168 def CheckArguments(self):
169 """Check syntactic validity for the opcode arguments.
171 This method is for doing a simple syntactic check and ensure
172 validity of opcode parameters, without any cluster-related
173 checks. While the same can be accomplished in ExpandNames and/or
174 CheckPrereq, doing these separate is better because:
176 - ExpandNames is left as as purely a lock-related function
177 - CheckPrereq is run after we have acquired locks (and possible
180 The function is allowed to change the self.op attribute so that
181 later methods can no longer worry about missing parameters.
186 def ExpandNames(self):
187 """Expand names for this LU.
189 This method is called before starting to execute the opcode, and it should
190 update all the parameters of the opcode to their canonical form (e.g. a
191 short node name must be fully expanded after this method has successfully
192 completed). This way locking, hooks, logging, etc. can work correctly.
194 LUs which implement this method must also populate the self.needed_locks
195 member, as a dict with lock levels as keys, and a list of needed lock names
198 - use an empty dict if you don't need any lock
199 - if you don't need any lock at a particular level omit that
200 level (note that in this case C{DeclareLocks} won't be called
201 at all for that level)
202 - if you need locks at a level, but you can't calculate it in
203 this function, initialise that level with an empty list and do
204 further processing in L{LogicalUnit.DeclareLocks} (see that
205 function's docstring)
206 - don't put anything for the BGL level
207 - if you want all locks at a level use L{locking.ALL_SET} as a value
209 If you need to share locks (rather than acquire them exclusively) at one
210 level you can modify self.share_locks, setting a true value (usually 1) for
211 that level. By default locks are not shared.
213 This function can also define a list of tasklets, which then will be
214 executed in order instead of the usual LU-level CheckPrereq and Exec
215 functions, if those are not defined by the LU.
219 # Acquire all nodes and one instance
220 self.needed_locks = {
221 locking.LEVEL_NODE: locking.ALL_SET,
222 locking.LEVEL_INSTANCE: ['instance1.example.com'],
224 # Acquire just two nodes
225 self.needed_locks = {
226 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
229 self.needed_locks = {} # No, you can't leave it to the default value None
232 # The implementation of this method is mandatory only if the new LU is
233 # concurrent, so that old LUs don't need to be changed all at the same
236 self.needed_locks = {} # Exclusive LUs don't need locks.
238 raise NotImplementedError
240 def DeclareLocks(self, level):
241 """Declare LU locking needs for a level
243 While most LUs can just declare their locking needs at ExpandNames time,
244 sometimes there's the need to calculate some locks after having acquired
245 the ones before. This function is called just before acquiring locks at a
246 particular level, but after acquiring the ones at lower levels, and permits
247 such calculations. It can be used to modify self.needed_locks, and by
248 default it does nothing.
250 This function is only called if you have something already set in
251 self.needed_locks for the level.
253 @param level: Locking level which is going to be locked
254 @type level: member of L{ganeti.locking.LEVELS}
258 def CheckPrereq(self):
259 """Check prerequisites for this LU.
261 This method should check that the prerequisites for the execution
262 of this LU are fulfilled. It can do internode communication, but
263 it should be idempotent - no cluster or system changes are
266 The method should raise errors.OpPrereqError in case something is
267 not fulfilled. Its return value is ignored.
269 This method should also update all the parameters of the opcode to
270 their canonical form if it hasn't been done by ExpandNames before.
273 if self.tasklets is not None:
274 for (idx, tl) in enumerate(self.tasklets):
275 logging.debug("Checking prerequisites for tasklet %s/%s",
276 idx + 1, len(self.tasklets))
281 def Exec(self, feedback_fn):
284 This method should implement the actual work. It should raise
285 errors.OpExecError for failures that are somewhat dealt with in
289 if self.tasklets is not None:
290 for (idx, tl) in enumerate(self.tasklets):
291 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
294 raise NotImplementedError
296 def BuildHooksEnv(self):
297 """Build hooks environment for this LU.
300 @return: Dictionary containing the environment that will be used for
301 running the hooks for this LU. The keys of the dict must not be prefixed
302 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
303 will extend the environment with additional variables. If no environment
304 should be defined, an empty dictionary should be returned (not C{None}).
305 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
309 raise NotImplementedError
311 def BuildHooksNodes(self):
312 """Build list of nodes to run LU's hooks.
314 @rtype: tuple; (list, list)
315 @return: Tuple containing a list of node names on which the hook
316 should run before the execution and a list of node names on which the
317 hook should run after the execution. No nodes should be returned as an
318 empty list (and not None).
319 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
323 raise NotImplementedError
325 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
326 """Notify the LU about the results of its hooks.
328 This method is called every time a hooks phase is executed, and notifies
329 the Logical Unit about the hooks' result. The LU can then use it to alter
330 its result based on the hooks. By default the method does nothing and the
331 previous result is passed back unchanged but any LU can define it if it
332 wants to use the local cluster hook-scripts somehow.
334 @param phase: one of L{constants.HOOKS_PHASE_POST} or
335 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
336 @param hook_results: the results of the multi-node hooks rpc call
337 @param feedback_fn: function used send feedback back to the caller
338 @param lu_result: the previous Exec result this LU had, or None
340 @return: the new Exec result, based on the previous result
344 # API must be kept, thus we ignore the unused argument and could
345 # be a function warnings
346 # pylint: disable=W0613,R0201
349 def _ExpandAndLockInstance(self):
350 """Helper function to expand and lock an instance.
352 Many LUs that work on an instance take its name in self.op.instance_name
353 and need to expand it and then declare the expanded name for locking. This
354 function does it, and then updates self.op.instance_name to the expanded
355 name. It also initializes needed_locks as a dict, if this hasn't been done
359 if self.needed_locks is None:
360 self.needed_locks = {}
362 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
363 "_ExpandAndLockInstance called with instance-level locks set"
364 self.op.instance_name = _ExpandInstanceName(self.cfg,
365 self.op.instance_name)
366 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
368 def _LockInstancesNodes(self, primary_only=False,
369 level=locking.LEVEL_NODE):
370 """Helper function to declare instances' nodes for locking.
372 This function should be called after locking one or more instances to lock
373 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
374 with all primary or secondary nodes for instances already locked and
375 present in self.needed_locks[locking.LEVEL_INSTANCE].
377 It should be called from DeclareLocks, and for safety only works if
378 self.recalculate_locks[locking.LEVEL_NODE] is set.
380 In the future it may grow parameters to just lock some instance's nodes, or
381 to just lock primaries or secondary nodes, if needed.
383 If should be called in DeclareLocks in a way similar to::
385 if level == locking.LEVEL_NODE:
386 self._LockInstancesNodes()
388 @type primary_only: boolean
389 @param primary_only: only lock primary nodes of locked instances
390 @param level: Which lock level to use for locking nodes
393 assert level in self.recalculate_locks, \
394 "_LockInstancesNodes helper function called with no nodes to recalculate"
396 # TODO: check if we're really been called with the instance locks held
398 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
399 # future we might want to have different behaviors depending on the value
400 # of self.recalculate_locks[locking.LEVEL_NODE]
402 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
403 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
404 wanted_nodes.append(instance.primary_node)
406 wanted_nodes.extend(instance.secondary_nodes)
408 if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
409 self.needed_locks[level] = wanted_nodes
410 elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
411 self.needed_locks[level].extend(wanted_nodes)
413 raise errors.ProgrammerError("Unknown recalculation mode")
415 del self.recalculate_locks[level]
418 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
419 """Simple LU which runs no hooks.
421 This LU is intended as a parent for other LogicalUnits which will
422 run no hooks, in order to reduce duplicate code.
428 def BuildHooksEnv(self):
429 """Empty BuildHooksEnv for NoHooksLu.
431 This just raises an error.
434 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
436 def BuildHooksNodes(self):
437 """Empty BuildHooksNodes for NoHooksLU.
440 raise AssertionError("BuildHooksNodes called for NoHooksLU")
444 """Tasklet base class.
446 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447 they can mix legacy code with tasklets. Locking needs to be done in the LU,
448 tasklets know nothing about locks.
450 Subclasses must follow these rules:
451 - Implement CheckPrereq
455 def __init__(self, lu):
462 def CheckPrereq(self):
463 """Check prerequisites for this tasklets.
465 This method should check whether the prerequisites for the execution of
466 this tasklet are fulfilled. It can do internode communication, but it
467 should be idempotent - no cluster or system changes are allowed.
469 The method should raise errors.OpPrereqError in case something is not
470 fulfilled. Its return value is ignored.
472 This method should also update all parameters to their canonical form if it
473 hasn't been done before.
478 def Exec(self, feedback_fn):
479 """Execute the tasklet.
481 This method should implement the actual work. It should raise
482 errors.OpExecError for failures that are somewhat dealt with in code, or
486 raise NotImplementedError
490 """Base for query utility classes.
493 #: Attribute holding field definitions
499 def __init__(self, qfilter, fields, use_locking):
500 """Initializes this class.
503 self.use_locking = use_locking
505 self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
506 namefield=self.SORT_FIELD)
507 self.requested_data = self.query.RequestedData()
508 self.names = self.query.RequestedNames()
510 # Sort only if no names were requested
511 self.sort_by_name = not self.names
513 self.do_locking = None
516 def _GetNames(self, lu, all_names, lock_level):
517 """Helper function to determine names asked for in the query.
521 names = lu.owned_locks(lock_level)
525 if self.wanted == locking.ALL_SET:
526 assert not self.names
527 # caller didn't specify names, so ordering is not important
528 return utils.NiceSort(names)
530 # caller specified names and we must keep the same order
532 assert not self.do_locking or lu.glm.is_owned(lock_level)
534 missing = set(self.wanted).difference(names)
536 raise errors.OpExecError("Some items were removed before retrieving"
537 " their data: %s" % missing)
539 # Return expanded names
542 def ExpandNames(self, lu):
543 """Expand names for this query.
545 See L{LogicalUnit.ExpandNames}.
548 raise NotImplementedError()
550 def DeclareLocks(self, lu, level):
551 """Declare locks for this query.
553 See L{LogicalUnit.DeclareLocks}.
556 raise NotImplementedError()
558 def _GetQueryData(self, lu):
559 """Collects all data for this query.
561 @return: Query data object
564 raise NotImplementedError()
566 def NewStyleQuery(self, lu):
567 """Collect data and execute query.
570 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
571 sort_by_name=self.sort_by_name)
573 def OldStyleQuery(self, lu):
574 """Collect data and execute query.
577 return self.query.OldStyleQuery(self._GetQueryData(lu),
578 sort_by_name=self.sort_by_name)
582 """Returns a dict declaring all lock levels shared.
585 return dict.fromkeys(locking.LEVELS, 1)
588 def _MakeLegacyNodeInfo(data):
589 """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
591 Converts the data into a single dictionary. This is fine for most use cases,
592 but some require information from more than one volume group or hypervisor.
595 (bootid, (vg_info, ), (hv_info, )) = data
597 return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
602 def _AnnotateDiskParams(instance, devs, cfg):
603 """Little helper wrapper to the rpc annotation method.
605 @param instance: The instance object
606 @type devs: List of L{objects.Disk}
607 @param devs: The root devices (not any of its children!)
608 @param cfg: The config object
609 @returns The annotated disk copies
610 @see L{rpc.AnnotateDiskParams}
613 return rpc.AnnotateDiskParams(instance.disk_template, devs,
614 cfg.GetInstanceDiskParams(instance))
617 def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
619 """Checks if node groups for locked instances are still correct.
621 @type cfg: L{config.ConfigWriter}
622 @param cfg: Cluster configuration
623 @type instances: dict; string as key, L{objects.Instance} as value
624 @param instances: Dictionary, instance name as key, instance object as value
625 @type owned_groups: iterable of string
626 @param owned_groups: List of owned groups
627 @type owned_nodes: iterable of string
628 @param owned_nodes: List of owned nodes
629 @type cur_group_uuid: string or None
630 @param cur_group_uuid: Optional group UUID to check against instance's groups
633 for (name, inst) in instances.items():
634 assert owned_nodes.issuperset(inst.all_nodes), \
635 "Instance %s's nodes changed while we kept the lock" % name
637 inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
639 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
640 "Instance %s has no node in group %s" % (name, cur_group_uuid)
643 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
644 """Checks if the owned node groups are still correct for an instance.
646 @type cfg: L{config.ConfigWriter}
647 @param cfg: The cluster configuration
648 @type instance_name: string
649 @param instance_name: Instance name
650 @type owned_groups: set or frozenset
651 @param owned_groups: List of currently owned node groups
654 inst_groups = cfg.GetInstanceNodeGroups(instance_name)
656 if not owned_groups.issuperset(inst_groups):
657 raise errors.OpPrereqError("Instance %s's node groups changed since"
658 " locks were acquired, current groups are"
659 " are '%s', owning groups '%s'; retry the"
662 utils.CommaJoin(inst_groups),
663 utils.CommaJoin(owned_groups)),
669 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
670 """Checks if the instances in a node group are still correct.
672 @type cfg: L{config.ConfigWriter}
673 @param cfg: The cluster configuration
674 @type group_uuid: string
675 @param group_uuid: Node group UUID
676 @type owned_instances: set or frozenset
677 @param owned_instances: List of currently owned instances
680 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
681 if owned_instances != wanted_instances:
682 raise errors.OpPrereqError("Instances in node group '%s' changed since"
683 " locks were acquired, wanted '%s', have '%s';"
684 " retry the operation" %
686 utils.CommaJoin(wanted_instances),
687 utils.CommaJoin(owned_instances)),
690 return wanted_instances
693 def _SupportsOob(cfg, node):
694 """Tells if node supports OOB.
696 @type cfg: L{config.ConfigWriter}
697 @param cfg: The cluster configuration
698 @type node: L{objects.Node}
699 @param node: The node
700 @return: The OOB script if supported or an empty string otherwise
703 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
706 def _CopyLockList(names):
707 """Makes a copy of a list of lock names.
709 Handles L{locking.ALL_SET} correctly.
712 if names == locking.ALL_SET:
713 return locking.ALL_SET
718 def _GetWantedNodes(lu, nodes):
719 """Returns list of checked and expanded node names.
721 @type lu: L{LogicalUnit}
722 @param lu: the logical unit on whose behalf we execute
724 @param nodes: list of node names or None for all nodes
726 @return: the list of nodes, sorted
727 @raise errors.ProgrammerError: if the nodes parameter is wrong type
731 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
733 return utils.NiceSort(lu.cfg.GetNodeList())
736 def _GetWantedInstances(lu, instances):
737 """Returns list of checked and expanded instance names.
739 @type lu: L{LogicalUnit}
740 @param lu: the logical unit on whose behalf we execute
741 @type instances: list
742 @param instances: list of instance names or None for all instances
744 @return: the list of instances, sorted
745 @raise errors.OpPrereqError: if the instances parameter is wrong type
746 @raise errors.OpPrereqError: if any of the passed instances is not found
750 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
752 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
756 def _GetUpdatedParams(old_params, update_dict,
757 use_default=True, use_none=False):
758 """Return the new version of a parameter dictionary.
760 @type old_params: dict
761 @param old_params: old parameters
762 @type update_dict: dict
763 @param update_dict: dict containing new parameter values, or
764 constants.VALUE_DEFAULT to reset the parameter to its default
766 @param use_default: boolean
767 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
768 values as 'to be deleted' values
769 @param use_none: boolean
770 @type use_none: whether to recognise C{None} values as 'to be
773 @return: the new parameter dictionary
776 params_copy = copy.deepcopy(old_params)
777 for key, val in update_dict.iteritems():
778 if ((use_default and val == constants.VALUE_DEFAULT) or
779 (use_none and val is None)):
785 params_copy[key] = val
789 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
790 """Return the new version of a instance policy.
792 @param group_policy: whether this policy applies to a group and thus
793 we should support removal of policy entries
796 use_none = use_default = group_policy
797 ipolicy = copy.deepcopy(old_ipolicy)
798 for key, value in new_ipolicy.items():
799 if key not in constants.IPOLICY_ALL_KEYS:
800 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
802 if key in constants.IPOLICY_ISPECS:
803 utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
804 ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
806 use_default=use_default)
808 if (not value or value == [constants.VALUE_DEFAULT] or
809 value == constants.VALUE_DEFAULT):
813 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
814 " on the cluster'" % key,
817 if key in constants.IPOLICY_PARAMETERS:
818 # FIXME: we assume all such values are float
820 ipolicy[key] = float(value)
821 except (TypeError, ValueError), err:
822 raise errors.OpPrereqError("Invalid value for attribute"
823 " '%s': '%s', error: %s" %
824 (key, value, err), errors.ECODE_INVAL)
826 # FIXME: we assume all others are lists; this should be redone
828 ipolicy[key] = list(value)
830 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
831 except errors.ConfigurationError, err:
832 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
837 def _UpdateAndVerifySubDict(base, updates, type_check):
838 """Updates and verifies a dict with sub dicts of the same type.
840 @param base: The dict with the old data
841 @param updates: The dict with the new data
842 @param type_check: Dict suitable to ForceDictType to verify correct types
843 @returns: A new dict with updated and verified values
847 new = _GetUpdatedParams(old, value)
848 utils.ForceDictType(new, type_check)
851 ret = copy.deepcopy(base)
852 ret.update(dict((key, fn(base.get(key, {}), value))
853 for key, value in updates.items()))
857 def _MergeAndVerifyHvState(op_input, obj_input):
858 """Combines the hv state from an opcode with the one of the object
860 @param op_input: The input dict from the opcode
861 @param obj_input: The input dict from the objects
862 @return: The verified and updated dict
866 invalid_hvs = set(op_input) - constants.HYPER_TYPES
868 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
869 " %s" % utils.CommaJoin(invalid_hvs),
871 if obj_input is None:
873 type_check = constants.HVSTS_PARAMETER_TYPES
874 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
879 def _MergeAndVerifyDiskState(op_input, obj_input):
880 """Combines the disk state from an opcode with the one of the object
882 @param op_input: The input dict from the opcode
883 @param obj_input: The input dict from the objects
884 @return: The verified and updated dict
887 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
889 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
890 utils.CommaJoin(invalid_dst),
892 type_check = constants.DSS_PARAMETER_TYPES
893 if obj_input is None:
895 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
897 for key, value in op_input.items())
902 def _ReleaseLocks(lu, level, names=None, keep=None):
903 """Releases locks owned by an LU.
905 @type lu: L{LogicalUnit}
906 @param level: Lock level
907 @type names: list or None
908 @param names: Names of locks to release
909 @type keep: list or None
910 @param keep: Names of locks to retain
913 assert not (keep is not None and names is not None), \
914 "Only one of the 'names' and the 'keep' parameters can be given"
916 if names is not None:
917 should_release = names.__contains__
919 should_release = lambda name: name not in keep
921 should_release = None
923 owned = lu.owned_locks(level)
925 # Not owning any lock at this level, do nothing
932 # Determine which locks to release
934 if should_release(name):
939 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
941 # Release just some locks
942 lu.glm.release(level, names=release)
944 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
947 lu.glm.release(level)
949 assert not lu.glm.is_owned(level), "No locks should be owned"
952 def _MapInstanceDisksToNodes(instances):
953 """Creates a map from (node, volume) to instance name.
955 @type instances: list of L{objects.Instance}
956 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
959 return dict(((node, vol), inst.name)
960 for inst in instances
961 for (node, vols) in inst.MapLVsByNode().items()
965 def _RunPostHook(lu, node_name):
966 """Runs the post-hook for an opcode on a single node.
969 hm = lu.proc.BuildHooksManager(lu)
971 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
972 except Exception, err: # pylint: disable=W0703
973 lu.LogWarning("Errors occurred running hooks on %s: %s" % (node_name, err))
976 def _CheckOutputFields(static, dynamic, selected):
977 """Checks whether all selected fields are valid.
979 @type static: L{utils.FieldSet}
980 @param static: static fields set
981 @type dynamic: L{utils.FieldSet}
982 @param dynamic: dynamic fields set
989 delta = f.NonMatching(selected)
991 raise errors.OpPrereqError("Unknown output fields selected: %s"
992 % ",".join(delta), errors.ECODE_INVAL)
995 def _CheckGlobalHvParams(params):
996 """Validates that given hypervisor params are not global ones.
998 This will ensure that instances don't get customised versions of
1002 used_globals = constants.HVC_GLOBALS.intersection(params)
1004 msg = ("The following hypervisor parameters are global and cannot"
1005 " be customized at instance level, please modify them at"
1006 " cluster level: %s" % utils.CommaJoin(used_globals))
1007 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1010 def _CheckNodeOnline(lu, node, msg=None):
1011 """Ensure that a given node is online.
1013 @param lu: the LU on behalf of which we make the check
1014 @param node: the node to check
1015 @param msg: if passed, should be a message to replace the default one
1016 @raise errors.OpPrereqError: if the node is offline
1020 msg = "Can't use offline node"
1021 if lu.cfg.GetNodeInfo(node).offline:
1022 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
1025 def _CheckNodeNotDrained(lu, node):
1026 """Ensure that a given node is not drained.
1028 @param lu: the LU on behalf of which we make the check
1029 @param node: the node to check
1030 @raise errors.OpPrereqError: if the node is drained
1033 if lu.cfg.GetNodeInfo(node).drained:
1034 raise errors.OpPrereqError("Can't use drained node %s" % node,
1038 def _CheckNodeVmCapable(lu, node):
1039 """Ensure that a given node is vm capable.
1041 @param lu: the LU on behalf of which we make the check
1042 @param node: the node to check
1043 @raise errors.OpPrereqError: if the node is not vm capable
1046 if not lu.cfg.GetNodeInfo(node).vm_capable:
1047 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
1051 def _CheckNodeHasOS(lu, node, os_name, force_variant):
1052 """Ensure that a node supports a given OS.
1054 @param lu: the LU on behalf of which we make the check
1055 @param node: the node to check
1056 @param os_name: the OS to query about
1057 @param force_variant: whether to ignore variant errors
1058 @raise errors.OpPrereqError: if the node is not supporting the OS
1061 result = lu.rpc.call_os_get(node, os_name)
1062 result.Raise("OS '%s' not in supported OS list for node %s" %
1064 prereq=True, ecode=errors.ECODE_INVAL)
1065 if not force_variant:
1066 _CheckOSVariant(result.payload, os_name)
1069 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
1070 """Ensure that a node has the given secondary ip.
1072 @type lu: L{LogicalUnit}
1073 @param lu: the LU on behalf of which we make the check
1075 @param node: the node to check
1076 @type secondary_ip: string
1077 @param secondary_ip: the ip to check
1078 @type prereq: boolean
1079 @param prereq: whether to throw a prerequisite or an execute error
1080 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
1081 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
1084 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
1085 result.Raise("Failure checking secondary ip on node %s" % node,
1086 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1087 if not result.payload:
1088 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
1089 " please fix and re-run this command" % secondary_ip)
1091 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
1093 raise errors.OpExecError(msg)
1096 def _GetClusterDomainSecret():
1097 """Reads the cluster domain secret.
1100 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
1104 def _CheckInstanceState(lu, instance, req_states, msg=None):
1105 """Ensure that an instance is in one of the required states.
1107 @param lu: the LU on behalf of which we make the check
1108 @param instance: the instance to check
1109 @param msg: if passed, should be a message to replace the default one
1110 @raise errors.OpPrereqError: if the instance is not in the required state
1114 msg = "can't use instance from outside %s states" % ", ".join(req_states)
1115 if instance.admin_state not in req_states:
1116 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1117 (instance.name, instance.admin_state, msg),
1120 if constants.ADMINST_UP not in req_states:
1121 pnode = instance.primary_node
1122 if not lu.cfg.GetNodeInfo(pnode).offline:
1123 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
1124 ins_l.Raise("Can't contact node %s for instance information" % pnode,
1125 prereq=True, ecode=errors.ECODE_ENVIRON)
1126 if instance.name in ins_l.payload:
1127 raise errors.OpPrereqError("Instance %s is running, %s" %
1128 (instance.name, msg), errors.ECODE_STATE)
1130 lu.LogWarning("Primary node offline, ignoring check that instance"
1134 def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
1135 """Computes if value is in the desired range.
1137 @param name: name of the parameter for which we perform the check
1138 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
1140 @param ipolicy: dictionary containing min, max and std values
1141 @param value: actual value that we want to use
1142 @return: None or element not meeting the criteria
1146 if value in [None, constants.VALUE_AUTO]:
1148 max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
1149 min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
1150 if value > max_v or min_v > value:
1152 fqn = "%s/%s" % (name, qualifier)
1155 return ("%s value %s is not in range [%s, %s]" %
1156 (fqn, value, min_v, max_v))
1160 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
1161 nic_count, disk_sizes, spindle_use,
1162 _compute_fn=_ComputeMinMaxSpec):
1163 """Verifies ipolicy against provided specs.
1166 @param ipolicy: The ipolicy
1168 @param mem_size: The memory size
1169 @type cpu_count: int
1170 @param cpu_count: Used cpu cores
1171 @type disk_count: int
1172 @param disk_count: Number of disks used
1173 @type nic_count: int
1174 @param nic_count: Number of nics used
1175 @type disk_sizes: list of ints
1176 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
1177 @type spindle_use: int
1178 @param spindle_use: The number of spindles this instance uses
1179 @param _compute_fn: The compute function (unittest only)
1180 @return: A list of violations, or an empty list of no violations are found
1183 assert disk_count == len(disk_sizes)
1186 (constants.ISPEC_MEM_SIZE, "", mem_size),
1187 (constants.ISPEC_CPU_COUNT, "", cpu_count),
1188 (constants.ISPEC_DISK_COUNT, "", disk_count),
1189 (constants.ISPEC_NIC_COUNT, "", nic_count),
1190 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
1191 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
1192 for idx, d in enumerate(disk_sizes)]
1195 (_compute_fn(name, qualifier, ipolicy, value)
1196 for (name, qualifier, value) in test_settings))
1199 def _ComputeIPolicyInstanceViolation(ipolicy, instance,
1200 _compute_fn=_ComputeIPolicySpecViolation):
1201 """Compute if instance meets the specs of ipolicy.
1204 @param ipolicy: The ipolicy to verify against
1205 @type instance: L{objects.Instance}
1206 @param instance: The instance to verify
1207 @param _compute_fn: The function to verify ipolicy (unittest only)
1208 @see: L{_ComputeIPolicySpecViolation}
1211 mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
1212 cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
1213 spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
1214 disk_count = len(instance.disks)
1215 disk_sizes = [disk.size for disk in instance.disks]
1216 nic_count = len(instance.nics)
1218 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1219 disk_sizes, spindle_use)
1222 def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
1223 _compute_fn=_ComputeIPolicySpecViolation):
1224 """Compute if instance specs meets the specs of ipolicy.
1227 @param ipolicy: The ipolicy to verify against
1228 @param instance_spec: dict
1229 @param instance_spec: The instance spec to verify
1230 @param _compute_fn: The function to verify ipolicy (unittest only)
1231 @see: L{_ComputeIPolicySpecViolation}
1234 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
1235 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
1236 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
1237 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
1238 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
1239 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
1241 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1242 disk_sizes, spindle_use)
1245 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
1247 _compute_fn=_ComputeIPolicyInstanceViolation):
1248 """Compute if instance meets the specs of the new target group.
1250 @param ipolicy: The ipolicy to verify
1251 @param instance: The instance object to verify
1252 @param current_group: The current group of the instance
1253 @param target_group: The new group of the instance
1254 @param _compute_fn: The function to verify ipolicy (unittest only)
1255 @see: L{_ComputeIPolicySpecViolation}
1258 if current_group == target_group:
1261 return _compute_fn(ipolicy, instance)
1264 def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
1265 _compute_fn=_ComputeIPolicyNodeViolation):
1266 """Checks that the target node is correct in terms of instance policy.
1268 @param ipolicy: The ipolicy to verify
1269 @param instance: The instance object to verify
1270 @param node: The new node to relocate
1271 @param ignore: Ignore violations of the ipolicy
1272 @param _compute_fn: The function to verify ipolicy (unittest only)
1273 @see: L{_ComputeIPolicySpecViolation}
1276 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
1277 res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
1280 msg = ("Instance does not meet target node group's (%s) instance"
1281 " policy: %s") % (node.group, utils.CommaJoin(res))
1285 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1288 def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
1289 """Computes a set of any instances that would violate the new ipolicy.
1291 @param old_ipolicy: The current (still in-place) ipolicy
1292 @param new_ipolicy: The new (to become) ipolicy
1293 @param instances: List of instances to verify
1294 @return: A list of instances which violates the new ipolicy but
1298 return (_ComputeViolatingInstances(new_ipolicy, instances) -
1299 _ComputeViolatingInstances(old_ipolicy, instances))
1302 def _ExpandItemName(fn, name, kind):
1303 """Expand an item name.
1305 @param fn: the function to use for expansion
1306 @param name: requested item name
1307 @param kind: text description ('Node' or 'Instance')
1308 @return: the resolved (full) name
1309 @raise errors.OpPrereqError: if the item is not found
1312 full_name = fn(name)
1313 if full_name is None:
1314 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
1319 def _ExpandNodeName(cfg, name):
1320 """Wrapper over L{_ExpandItemName} for nodes."""
1321 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
1324 def _ExpandInstanceName(cfg, name):
1325 """Wrapper over L{_ExpandItemName} for instance."""
1326 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
1329 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
1330 minmem, maxmem, vcpus, nics, disk_template, disks,
1331 bep, hvp, hypervisor_name, tags):
1332 """Builds instance related env variables for hooks
1334 This builds the hook environment from individual variables.
1337 @param name: the name of the instance
1338 @type primary_node: string
1339 @param primary_node: the name of the instance's primary node
1340 @type secondary_nodes: list
1341 @param secondary_nodes: list of secondary nodes as strings
1342 @type os_type: string
1343 @param os_type: the name of the instance's OS
1344 @type status: string
1345 @param status: the desired status of the instance
1346 @type minmem: string
1347 @param minmem: the minimum memory size of the instance
1348 @type maxmem: string
1349 @param maxmem: the maximum memory size of the instance
1351 @param vcpus: the count of VCPUs the instance has
1353 @param nics: list of tuples (ip, mac, mode, link) representing
1354 the NICs the instance has
1355 @type disk_template: string
1356 @param disk_template: the disk template of the instance
1358 @param disks: the list of (size, mode) pairs
1360 @param bep: the backend parameters for the instance
1362 @param hvp: the hypervisor parameters for the instance
1363 @type hypervisor_name: string
1364 @param hypervisor_name: the hypervisor for the instance
1366 @param tags: list of instance tags as strings
1368 @return: the hook environment for this instance
1373 "INSTANCE_NAME": name,
1374 "INSTANCE_PRIMARY": primary_node,
1375 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
1376 "INSTANCE_OS_TYPE": os_type,
1377 "INSTANCE_STATUS": status,
1378 "INSTANCE_MINMEM": minmem,
1379 "INSTANCE_MAXMEM": maxmem,
1380 # TODO(2.7) remove deprecated "memory" value
1381 "INSTANCE_MEMORY": maxmem,
1382 "INSTANCE_VCPUS": vcpus,
1383 "INSTANCE_DISK_TEMPLATE": disk_template,
1384 "INSTANCE_HYPERVISOR": hypervisor_name,
1387 nic_count = len(nics)
1388 for idx, (ip, mac, mode, link) in enumerate(nics):
1391 env["INSTANCE_NIC%d_IP" % idx] = ip
1392 env["INSTANCE_NIC%d_MAC" % idx] = mac
1393 env["INSTANCE_NIC%d_MODE" % idx] = mode
1394 env["INSTANCE_NIC%d_LINK" % idx] = link
1395 if mode == constants.NIC_MODE_BRIDGED:
1396 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1400 env["INSTANCE_NIC_COUNT"] = nic_count
1403 disk_count = len(disks)
1404 for idx, (size, mode) in enumerate(disks):
1405 env["INSTANCE_DISK%d_SIZE" % idx] = size
1406 env["INSTANCE_DISK%d_MODE" % idx] = mode
1410 env["INSTANCE_DISK_COUNT"] = disk_count
1415 env["INSTANCE_TAGS"] = " ".join(tags)
1417 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1418 for key, value in source.items():
1419 env["INSTANCE_%s_%s" % (kind, key)] = value
1424 def _NICListToTuple(lu, nics):
1425 """Build a list of nic information tuples.
1427 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1428 value in LUInstanceQueryData.
1430 @type lu: L{LogicalUnit}
1431 @param lu: the logical unit on whose behalf we execute
1432 @type nics: list of L{objects.NIC}
1433 @param nics: list of nics to convert to hooks tuples
1437 cluster = lu.cfg.GetClusterInfo()
1441 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1442 mode = filled_params[constants.NIC_MODE]
1443 link = filled_params[constants.NIC_LINK]
1444 hooks_nics.append((ip, mac, mode, link))
1448 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1449 """Builds instance related env variables for hooks from an object.
1451 @type lu: L{LogicalUnit}
1452 @param lu: the logical unit on whose behalf we execute
1453 @type instance: L{objects.Instance}
1454 @param instance: the instance for which we should build the
1456 @type override: dict
1457 @param override: dictionary with key/values that will override
1460 @return: the hook environment dictionary
1463 cluster = lu.cfg.GetClusterInfo()
1464 bep = cluster.FillBE(instance)
1465 hvp = cluster.FillHV(instance)
1467 "name": instance.name,
1468 "primary_node": instance.primary_node,
1469 "secondary_nodes": instance.secondary_nodes,
1470 "os_type": instance.os,
1471 "status": instance.admin_state,
1472 "maxmem": bep[constants.BE_MAXMEM],
1473 "minmem": bep[constants.BE_MINMEM],
1474 "vcpus": bep[constants.BE_VCPUS],
1475 "nics": _NICListToTuple(lu, instance.nics),
1476 "disk_template": instance.disk_template,
1477 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1480 "hypervisor_name": instance.hypervisor,
1481 "tags": instance.tags,
1484 args.update(override)
1485 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1488 def _AdjustCandidatePool(lu, exceptions):
1489 """Adjust the candidate pool after node operations.
1492 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1494 lu.LogInfo("Promoted nodes to master candidate role: %s",
1495 utils.CommaJoin(node.name for node in mod_list))
1496 for name in mod_list:
1497 lu.context.ReaddNode(name)
1498 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1500 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1504 def _DecideSelfPromotion(lu, exceptions=None):
1505 """Decide whether I should promote myself as a master candidate.
1508 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1509 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1510 # the new node will increase mc_max with one, so:
1511 mc_should = min(mc_should + 1, cp_size)
1512 return mc_now < mc_should
1515 def _CalculateGroupIPolicy(cluster, group):
1516 """Calculate instance policy for group.
1519 return cluster.SimpleFillIPolicy(group.ipolicy)
1522 def _ComputeViolatingInstances(ipolicy, instances):
1523 """Computes a set of instances who violates given ipolicy.
1525 @param ipolicy: The ipolicy to verify
1526 @type instances: object.Instance
1527 @param instances: List of instances to verify
1528 @return: A frozenset of instance names violating the ipolicy
1531 return frozenset([inst.name for inst in instances
1532 if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
1535 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1536 """Check that the brigdes needed by a list of nics exist.
1539 cluster = lu.cfg.GetClusterInfo()
1540 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1541 brlist = [params[constants.NIC_LINK] for params in paramslist
1542 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1544 result = lu.rpc.call_bridges_exist(target_node, brlist)
1545 result.Raise("Error checking bridges on destination node '%s'" %
1546 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1549 def _CheckInstanceBridgesExist(lu, instance, node=None):
1550 """Check that the brigdes needed by an instance exist.
1554 node = instance.primary_node
1555 _CheckNicsBridgesExist(lu, instance.nics, node)
1558 def _CheckOSVariant(os_obj, name):
1559 """Check whether an OS name conforms to the os variants specification.
1561 @type os_obj: L{objects.OS}
1562 @param os_obj: OS object to check
1564 @param name: OS name passed by the user, to check for validity
1567 variant = objects.OS.GetVariant(name)
1568 if not os_obj.supported_variants:
1570 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1571 " passed)" % (os_obj.name, variant),
1575 raise errors.OpPrereqError("OS name must include a variant",
1578 if variant not in os_obj.supported_variants:
1579 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1582 def _GetNodeInstancesInner(cfg, fn):
1583 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1586 def _GetNodeInstances(cfg, node_name):
1587 """Returns a list of all primary and secondary instances on a node.
1591 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1594 def _GetNodePrimaryInstances(cfg, node_name):
1595 """Returns primary instances on a node.
1598 return _GetNodeInstancesInner(cfg,
1599 lambda inst: node_name == inst.primary_node)
1602 def _GetNodeSecondaryInstances(cfg, node_name):
1603 """Returns secondary instances on a node.
1606 return _GetNodeInstancesInner(cfg,
1607 lambda inst: node_name in inst.secondary_nodes)
1610 def _GetStorageTypeArgs(cfg, storage_type):
1611 """Returns the arguments for a storage type.
1614 # Special case for file storage
1615 if storage_type == constants.ST_FILE:
1616 # storage.FileStorage wants a list of storage directories
1617 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1622 def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
1625 for dev in instance.disks:
1626 cfg.SetDiskID(dev, node_name)
1628 result = rpc_runner.call_blockdev_getmirrorstatus(node_name, (instance.disks,
1630 result.Raise("Failed to get disk status from node %s" % node_name,
1631 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1633 for idx, bdev_status in enumerate(result.payload):
1634 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1640 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1641 """Check the sanity of iallocator and node arguments and use the
1642 cluster-wide iallocator if appropriate.
1644 Check that at most one of (iallocator, node) is specified. If none is
1645 specified, then the LU's opcode's iallocator slot is filled with the
1646 cluster-wide default iallocator.
1648 @type iallocator_slot: string
1649 @param iallocator_slot: the name of the opcode iallocator slot
1650 @type node_slot: string
1651 @param node_slot: the name of the opcode target node slot
1654 node = getattr(lu.op, node_slot, None)
1655 iallocator = getattr(lu.op, iallocator_slot, None)
1657 if node is not None and iallocator is not None:
1658 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1660 elif node is None and iallocator is None:
1661 default_iallocator = lu.cfg.GetDefaultIAllocator()
1662 if default_iallocator:
1663 setattr(lu.op, iallocator_slot, default_iallocator)
1665 raise errors.OpPrereqError("No iallocator or node given and no"
1666 " cluster-wide default iallocator found;"
1667 " please specify either an iallocator or a"
1668 " node, or set a cluster-wide default"
1672 def _GetDefaultIAllocator(cfg, iallocator):
1673 """Decides on which iallocator to use.
1675 @type cfg: L{config.ConfigWriter}
1676 @param cfg: Cluster configuration object
1677 @type iallocator: string or None
1678 @param iallocator: Iallocator specified in opcode
1680 @return: Iallocator name
1684 # Use default iallocator
1685 iallocator = cfg.GetDefaultIAllocator()
1688 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1689 " opcode nor as a cluster-wide default",
1695 def _CheckHostnameSane(lu, name):
1696 """Ensures that a given hostname resolves to a 'sane' name.
1698 The given name is required to be a prefix of the resolved hostname,
1699 to prevent accidental mismatches.
1701 @param lu: the logical unit on behalf of which we're checking
1702 @param name: the name we should resolve and check
1703 @return: the resolved hostname object
1706 hostname = netutils.GetHostname(name=name)
1707 if hostname.name != name:
1708 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
1709 if not utils.MatchNameComponent(name, [hostname.name]):
1710 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
1711 " same as given hostname '%s'") %
1712 (hostname.name, name), errors.ECODE_INVAL)
1716 class LUClusterPostInit(LogicalUnit):
1717 """Logical unit for running hooks after cluster initialization.
1720 HPATH = "cluster-init"
1721 HTYPE = constants.HTYPE_CLUSTER
1723 def BuildHooksEnv(self):
1728 "OP_TARGET": self.cfg.GetClusterName(),
1731 def BuildHooksNodes(self):
1732 """Build hooks nodes.
1735 return ([], [self.cfg.GetMasterNode()])
1737 def Exec(self, feedback_fn):
1744 class LUClusterDestroy(LogicalUnit):
1745 """Logical unit for destroying the cluster.
1748 HPATH = "cluster-destroy"
1749 HTYPE = constants.HTYPE_CLUSTER
1751 def BuildHooksEnv(self):
1756 "OP_TARGET": self.cfg.GetClusterName(),
1759 def BuildHooksNodes(self):
1760 """Build hooks nodes.
1765 def CheckPrereq(self):
1766 """Check prerequisites.
1768 This checks whether the cluster is empty.
1770 Any errors are signaled by raising errors.OpPrereqError.
1773 master = self.cfg.GetMasterNode()
1775 nodelist = self.cfg.GetNodeList()
1776 if len(nodelist) != 1 or nodelist[0] != master:
1777 raise errors.OpPrereqError("There are still %d node(s) in"
1778 " this cluster." % (len(nodelist) - 1),
1780 instancelist = self.cfg.GetInstanceList()
1782 raise errors.OpPrereqError("There are still %d instance(s) in"
1783 " this cluster." % len(instancelist),
1786 def Exec(self, feedback_fn):
1787 """Destroys the cluster.
1790 master_params = self.cfg.GetMasterNetworkParameters()
1792 # Run post hooks on master node before it's removed
1793 _RunPostHook(self, master_params.name)
1795 ems = self.cfg.GetUseExternalMipScript()
1796 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
1799 self.LogWarning("Error disabling the master IP address: %s",
1802 return master_params.name
1805 def _VerifyCertificate(filename):
1806 """Verifies a certificate for L{LUClusterVerifyConfig}.
1808 @type filename: string
1809 @param filename: Path to PEM file
1813 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1814 utils.ReadFile(filename))
1815 except Exception, err: # pylint: disable=W0703
1816 return (LUClusterVerifyConfig.ETYPE_ERROR,
1817 "Failed to load X509 certificate %s: %s" % (filename, err))
1820 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1821 constants.SSL_CERT_EXPIRATION_ERROR)
1824 fnamemsg = "While verifying %s: %s" % (filename, msg)
1829 return (None, fnamemsg)
1830 elif errcode == utils.CERT_WARNING:
1831 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1832 elif errcode == utils.CERT_ERROR:
1833 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1835 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1838 def _GetAllHypervisorParameters(cluster, instances):
1839 """Compute the set of all hypervisor parameters.
1841 @type cluster: L{objects.Cluster}
1842 @param cluster: the cluster object
1843 @param instances: list of L{objects.Instance}
1844 @param instances: additional instances from which to obtain parameters
1845 @rtype: list of (origin, hypervisor, parameters)
1846 @return: a list with all parameters found, indicating the hypervisor they
1847 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1852 for hv_name in cluster.enabled_hypervisors:
1853 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1855 for os_name, os_hvp in cluster.os_hvp.items():
1856 for hv_name, hv_params in os_hvp.items():
1858 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1859 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1861 # TODO: collapse identical parameter values in a single one
1862 for instance in instances:
1863 if instance.hvparams:
1864 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1865 cluster.FillHV(instance)))
1870 class _VerifyErrors(object):
1871 """Mix-in for cluster/group verify LUs.
1873 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1874 self.op and self._feedback_fn to be available.)
1878 ETYPE_FIELD = "code"
1879 ETYPE_ERROR = "ERROR"
1880 ETYPE_WARNING = "WARNING"
1882 def _Error(self, ecode, item, msg, *args, **kwargs):
1883 """Format an error message.
1885 Based on the opcode's error_codes parameter, either format a
1886 parseable error code, or a simpler error string.
1888 This must be called only from Exec and functions called from Exec.
1891 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1892 itype, etxt, _ = ecode
1893 # first complete the msg
1896 # then format the whole message
1897 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1898 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1904 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1905 # and finally report it via the feedback_fn
1906 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1908 def _ErrorIf(self, cond, ecode, *args, **kwargs):
1909 """Log an error message if the passed condition is True.
1913 or self.op.debug_simulate_errors) # pylint: disable=E1101
1915 # If the error code is in the list of ignored errors, demote the error to a
1917 (_, etxt, _) = ecode
1918 if etxt in self.op.ignore_errors: # pylint: disable=E1101
1919 kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
1922 self._Error(ecode, *args, **kwargs)
1924 # do not mark the operation as failed for WARN cases only
1925 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1926 self.bad = self.bad or cond
1929 class LUClusterVerify(NoHooksLU):
1930 """Submits all jobs necessary to verify the cluster.
1935 def ExpandNames(self):
1936 self.needed_locks = {}
1938 def Exec(self, feedback_fn):
1941 if self.op.group_name:
1942 groups = [self.op.group_name]
1943 depends_fn = lambda: None
1945 groups = self.cfg.GetNodeGroupList()
1947 # Verify global configuration
1949 opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
1952 # Always depend on global verification
1953 depends_fn = lambda: [(-len(jobs), [])]
1955 jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1956 ignore_errors=self.op.ignore_errors,
1957 depends=depends_fn())]
1958 for group in groups)
1960 # Fix up all parameters
1961 for op in itertools.chain(*jobs): # pylint: disable=W0142
1962 op.debug_simulate_errors = self.op.debug_simulate_errors
1963 op.verbose = self.op.verbose
1964 op.error_codes = self.op.error_codes
1966 op.skip_checks = self.op.skip_checks
1967 except AttributeError:
1968 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1970 return ResultWithJobs(jobs)
1973 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1974 """Verifies the cluster config.
1979 def _VerifyHVP(self, hvp_data):
1980 """Verifies locally the syntax of the hypervisor parameters.
1983 for item, hv_name, hv_params in hvp_data:
1984 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1987 hv_class = hypervisor.GetHypervisorClass(hv_name)
1988 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1989 hv_class.CheckParameterSyntax(hv_params)
1990 except errors.GenericError, err:
1991 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
1993 def ExpandNames(self):
1994 self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
1995 self.share_locks = _ShareAll()
1997 def CheckPrereq(self):
1998 """Check prerequisites.
2001 # Retrieve all information
2002 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
2003 self.all_node_info = self.cfg.GetAllNodesInfo()
2004 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2006 def Exec(self, feedback_fn):
2007 """Verify integrity of cluster, performing various test on nodes.
2011 self._feedback_fn = feedback_fn
2013 feedback_fn("* Verifying cluster config")
2015 for msg in self.cfg.VerifyConfig():
2016 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
2018 feedback_fn("* Verifying cluster certificate files")
2020 for cert_filename in constants.ALL_CERT_FILES:
2021 (errcode, msg) = _VerifyCertificate(cert_filename)
2022 self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
2024 feedback_fn("* Verifying hypervisor parameters")
2026 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
2027 self.all_inst_info.values()))
2029 feedback_fn("* Verifying all nodes belong to an existing group")
2031 # We do this verification here because, should this bogus circumstance
2032 # occur, it would never be caught by VerifyGroup, which only acts on
2033 # nodes/instances reachable from existing node groups.
2035 dangling_nodes = set(node.name for node in self.all_node_info.values()
2036 if node.group not in self.all_group_info)
2038 dangling_instances = {}
2039 no_node_instances = []
2041 for inst in self.all_inst_info.values():
2042 if inst.primary_node in dangling_nodes:
2043 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
2044 elif inst.primary_node not in self.all_node_info:
2045 no_node_instances.append(inst.name)
2050 utils.CommaJoin(dangling_instances.get(node.name,
2052 for node in dangling_nodes]
2054 self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
2056 "the following nodes (and their instances) belong to a non"
2057 " existing group: %s", utils.CommaJoin(pretty_dangling))
2059 self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
2061 "the following instances have a non-existing primary-node:"
2062 " %s", utils.CommaJoin(no_node_instances))
2067 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
2068 """Verifies the status of a node group.
2071 HPATH = "cluster-verify"
2072 HTYPE = constants.HTYPE_CLUSTER
2075 _HOOKS_INDENT_RE = re.compile("^", re.M)
2077 class NodeImage(object):
2078 """A class representing the logical and physical status of a node.
2081 @ivar name: the node name to which this object refers
2082 @ivar volumes: a structure as returned from
2083 L{ganeti.backend.GetVolumeList} (runtime)
2084 @ivar instances: a list of running instances (runtime)
2085 @ivar pinst: list of configured primary instances (config)
2086 @ivar sinst: list of configured secondary instances (config)
2087 @ivar sbp: dictionary of {primary-node: list of instances} for all
2088 instances for which this node is secondary (config)
2089 @ivar mfree: free memory, as reported by hypervisor (runtime)
2090 @ivar dfree: free disk, as reported by the node (runtime)
2091 @ivar offline: the offline status (config)
2092 @type rpc_fail: boolean
2093 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
2094 not whether the individual keys were correct) (runtime)
2095 @type lvm_fail: boolean
2096 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
2097 @type hyp_fail: boolean
2098 @ivar hyp_fail: whether the RPC call didn't return the instance list
2099 @type ghost: boolean
2100 @ivar ghost: whether this is a known node or not (config)
2101 @type os_fail: boolean
2102 @ivar os_fail: whether the RPC call didn't return valid OS data
2104 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
2105 @type vm_capable: boolean
2106 @ivar vm_capable: whether the node can host instances
2109 def __init__(self, offline=False, name=None, vm_capable=True):
2118 self.offline = offline
2119 self.vm_capable = vm_capable
2120 self.rpc_fail = False
2121 self.lvm_fail = False
2122 self.hyp_fail = False
2124 self.os_fail = False
2127 def ExpandNames(self):
2128 # This raises errors.OpPrereqError on its own:
2129 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2131 # Get instances in node group; this is unsafe and needs verification later
2133 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2135 self.needed_locks = {
2136 locking.LEVEL_INSTANCE: inst_names,
2137 locking.LEVEL_NODEGROUP: [self.group_uuid],
2138 locking.LEVEL_NODE: [],
2141 self.share_locks = _ShareAll()
2143 def DeclareLocks(self, level):
2144 if level == locking.LEVEL_NODE:
2145 # Get members of node group; this is unsafe and needs verification later
2146 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
2148 all_inst_info = self.cfg.GetAllInstancesInfo()
2150 # In Exec(), we warn about mirrored instances that have primary and
2151 # secondary living in separate node groups. To fully verify that
2152 # volumes for these instances are healthy, we will need to do an
2153 # extra call to their secondaries. We ensure here those nodes will
2155 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
2156 # Important: access only the instances whose lock is owned
2157 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
2158 nodes.update(all_inst_info[inst].secondary_nodes)
2160 self.needed_locks[locking.LEVEL_NODE] = nodes
2162 def CheckPrereq(self):
2163 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
2164 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
2166 group_nodes = set(self.group_info.members)
2168 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2171 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2173 unlocked_instances = \
2174 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
2177 raise errors.OpPrereqError("Missing lock for nodes: %s" %
2178 utils.CommaJoin(unlocked_nodes),
2181 if unlocked_instances:
2182 raise errors.OpPrereqError("Missing lock for instances: %s" %
2183 utils.CommaJoin(unlocked_instances),
2186 self.all_node_info = self.cfg.GetAllNodesInfo()
2187 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2189 self.my_node_names = utils.NiceSort(group_nodes)
2190 self.my_inst_names = utils.NiceSort(group_instances)
2192 self.my_node_info = dict((name, self.all_node_info[name])
2193 for name in self.my_node_names)
2195 self.my_inst_info = dict((name, self.all_inst_info[name])
2196 for name in self.my_inst_names)
2198 # We detect here the nodes that will need the extra RPC calls for verifying
2199 # split LV volumes; they should be locked.
2200 extra_lv_nodes = set()
2202 for inst in self.my_inst_info.values():
2203 if inst.disk_template in constants.DTS_INT_MIRROR:
2204 for nname in inst.all_nodes:
2205 if self.all_node_info[nname].group != self.group_uuid:
2206 extra_lv_nodes.add(nname)
2208 unlocked_lv_nodes = \
2209 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2211 if unlocked_lv_nodes:
2212 raise errors.OpPrereqError("Missing node locks for LV check: %s" %
2213 utils.CommaJoin(unlocked_lv_nodes),
2215 self.extra_lv_nodes = list(extra_lv_nodes)
2217 def _VerifyNode(self, ninfo, nresult):
2218 """Perform some basic validation on data returned from a node.
2220 - check the result data structure is well formed and has all the
2222 - check ganeti version
2224 @type ninfo: L{objects.Node}
2225 @param ninfo: the node to check
2226 @param nresult: the results from the node
2228 @return: whether overall this call was successful (and we can expect
2229 reasonable values in the respose)
2233 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2235 # main result, nresult should be a non-empty dict
2236 test = not nresult or not isinstance(nresult, dict)
2237 _ErrorIf(test, constants.CV_ENODERPC, node,
2238 "unable to verify node: no data returned")
2242 # compares ganeti version
2243 local_version = constants.PROTOCOL_VERSION
2244 remote_version = nresult.get("version", None)
2245 test = not (remote_version and
2246 isinstance(remote_version, (list, tuple)) and
2247 len(remote_version) == 2)
2248 _ErrorIf(test, constants.CV_ENODERPC, node,
2249 "connection to node returned invalid data")
2253 test = local_version != remote_version[0]
2254 _ErrorIf(test, constants.CV_ENODEVERSION, node,
2255 "incompatible protocol versions: master %s,"
2256 " node %s", local_version, remote_version[0])
2260 # node seems compatible, we can actually try to look into its results
2262 # full package version
2263 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
2264 constants.CV_ENODEVERSION, node,
2265 "software version mismatch: master %s, node %s",
2266 constants.RELEASE_VERSION, remote_version[1],
2267 code=self.ETYPE_WARNING)
2269 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
2270 if ninfo.vm_capable and isinstance(hyp_result, dict):
2271 for hv_name, hv_result in hyp_result.iteritems():
2272 test = hv_result is not None
2273 _ErrorIf(test, constants.CV_ENODEHV, node,
2274 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
2276 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
2277 if ninfo.vm_capable and isinstance(hvp_result, list):
2278 for item, hv_name, hv_result in hvp_result:
2279 _ErrorIf(True, constants.CV_ENODEHV, node,
2280 "hypervisor %s parameter verify failure (source %s): %s",
2281 hv_name, item, hv_result)
2283 test = nresult.get(constants.NV_NODESETUP,
2284 ["Missing NODESETUP results"])
2285 _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
2290 def _VerifyNodeTime(self, ninfo, nresult,
2291 nvinfo_starttime, nvinfo_endtime):
2292 """Check the node time.
2294 @type ninfo: L{objects.Node}
2295 @param ninfo: the node to check
2296 @param nresult: the remote results for the node
2297 @param nvinfo_starttime: the start time of the RPC call
2298 @param nvinfo_endtime: the end time of the RPC call
2302 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2304 ntime = nresult.get(constants.NV_TIME, None)
2306 ntime_merged = utils.MergeTime(ntime)
2307 except (ValueError, TypeError):
2308 _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
2311 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
2312 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
2313 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
2314 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
2318 _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
2319 "Node time diverges by at least %s from master node time",
2322 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
2323 """Check the node LVM results.
2325 @type ninfo: L{objects.Node}
2326 @param ninfo: the node to check
2327 @param nresult: the remote results for the node
2328 @param vg_name: the configured VG name
2335 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2337 # checks vg existence and size > 20G
2338 vglist = nresult.get(constants.NV_VGLIST, None)
2340 _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
2342 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
2343 constants.MIN_VG_SIZE)
2344 _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
2347 pvlist = nresult.get(constants.NV_PVLIST, None)
2348 test = pvlist is None
2349 _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
2351 # check that ':' is not present in PV names, since it's a
2352 # special character for lvcreate (denotes the range of PEs to
2354 for _, pvname, owner_vg in pvlist:
2355 test = ":" in pvname
2356 _ErrorIf(test, constants.CV_ENODELVM, node,
2357 "Invalid character ':' in PV '%s' of VG '%s'",
2360 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
2361 """Check the node bridges.
2363 @type ninfo: L{objects.Node}
2364 @param ninfo: the node to check
2365 @param nresult: the remote results for the node
2366 @param bridges: the expected list of bridges
2373 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2375 missing = nresult.get(constants.NV_BRIDGES, None)
2376 test = not isinstance(missing, list)
2377 _ErrorIf(test, constants.CV_ENODENET, node,
2378 "did not return valid bridge information")
2380 _ErrorIf(bool(missing), constants.CV_ENODENET, node,
2381 "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
2383 def _VerifyNodeUserScripts(self, ninfo, nresult):
2384 """Check the results of user scripts presence and executability on the node
2386 @type ninfo: L{objects.Node}
2387 @param ninfo: the node to check
2388 @param nresult: the remote results for the node
2393 test = not constants.NV_USERSCRIPTS in nresult
2394 self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
2395 "did not return user scripts information")
2397 broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
2399 self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
2400 "user scripts not present or not executable: %s" %
2401 utils.CommaJoin(sorted(broken_scripts)))
2403 def _VerifyNodeNetwork(self, ninfo, nresult):
2404 """Check the node network connectivity results.
2406 @type ninfo: L{objects.Node}
2407 @param ninfo: the node to check
2408 @param nresult: the remote results for the node
2412 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2414 test = constants.NV_NODELIST not in nresult
2415 _ErrorIf(test, constants.CV_ENODESSH, node,
2416 "node hasn't returned node ssh connectivity data")
2418 if nresult[constants.NV_NODELIST]:
2419 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
2420 _ErrorIf(True, constants.CV_ENODESSH, node,
2421 "ssh communication with node '%s': %s", a_node, a_msg)
2423 test = constants.NV_NODENETTEST not in nresult
2424 _ErrorIf(test, constants.CV_ENODENET, node,
2425 "node hasn't returned node tcp connectivity data")
2427 if nresult[constants.NV_NODENETTEST]:
2428 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
2430 _ErrorIf(True, constants.CV_ENODENET, node,
2431 "tcp communication with node '%s': %s",
2432 anode, nresult[constants.NV_NODENETTEST][anode])
2434 test = constants.NV_MASTERIP not in nresult
2435 _ErrorIf(test, constants.CV_ENODENET, node,
2436 "node hasn't returned node master IP reachability data")
2438 if not nresult[constants.NV_MASTERIP]:
2439 if node == self.master_node:
2440 msg = "the master node cannot reach the master IP (not configured?)"
2442 msg = "cannot reach the master IP"
2443 _ErrorIf(True, constants.CV_ENODENET, node, msg)
2445 def _VerifyInstance(self, instance, instanceconfig, node_image,
2447 """Verify an instance.
2449 This function checks to see if the required block devices are
2450 available on the instance's node.
2453 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2454 node_current = instanceconfig.primary_node
2456 node_vol_should = {}
2457 instanceconfig.MapLVsByNode(node_vol_should)
2459 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
2460 err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
2461 _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
2463 for node in node_vol_should:
2464 n_img = node_image[node]
2465 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2466 # ignore missing volumes on offline or broken nodes
2468 for volume in node_vol_should[node]:
2469 test = volume not in n_img.volumes
2470 _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2471 "volume %s missing on node %s", volume, node)
2473 if instanceconfig.admin_state == constants.ADMINST_UP:
2474 pri_img = node_image[node_current]
2475 test = instance not in pri_img.instances and not pri_img.offline
2476 _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
2477 "instance not running on its primary node %s",
2480 diskdata = [(nname, success, status, idx)
2481 for (nname, disks) in diskstatus.items()
2482 for idx, (success, status) in enumerate(disks)]
2484 for nname, success, bdev_status, idx in diskdata:
2485 # the 'ghost node' construction in Exec() ensures that we have a
2487 snode = node_image[nname]
2488 bad_snode = snode.ghost or snode.offline
2489 _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
2490 not success and not bad_snode,
2491 constants.CV_EINSTANCEFAULTYDISK, instance,
2492 "couldn't retrieve status for disk/%s on %s: %s",
2493 idx, nname, bdev_status)
2494 _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
2495 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
2496 constants.CV_EINSTANCEFAULTYDISK, instance,
2497 "disk/%s on %s is faulty", idx, nname)
2499 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2500 """Verify if there are any unknown volumes in the cluster.
2502 The .os, .swap and backup volumes are ignored. All other volumes are
2503 reported as unknown.
2505 @type reserved: L{ganeti.utils.FieldSet}
2506 @param reserved: a FieldSet of reserved volume names
2509 for node, n_img in node_image.items():
2510 if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2511 self.all_node_info[node].group != self.group_uuid):
2512 # skip non-healthy nodes
2514 for volume in n_img.volumes:
2515 test = ((node not in node_vol_should or
2516 volume not in node_vol_should[node]) and
2517 not reserved.Matches(volume))
2518 self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
2519 "volume %s is unknown", volume)
2521 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2522 """Verify N+1 Memory Resilience.
2524 Check that if one single node dies we can still start all the
2525 instances it was primary for.
2528 cluster_info = self.cfg.GetClusterInfo()
2529 for node, n_img in node_image.items():
2530 # This code checks that every node which is now listed as
2531 # secondary has enough memory to host all instances it is
2532 # supposed to should a single other node in the cluster fail.
2533 # FIXME: not ready for failover to an arbitrary node
2534 # FIXME: does not support file-backed instances
2535 # WARNING: we currently take into account down instances as well
2536 # as up ones, considering that even if they're down someone
2537 # might want to start them even in the event of a node failure.
2538 if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2539 # we're skipping nodes marked offline and nodes in other groups from
2540 # the N+1 warning, since most likely we don't have good memory
2541 # infromation from them; we already list instances living on such
2542 # nodes, and that's enough warning
2544 #TODO(dynmem): also consider ballooning out other instances
2545 for prinode, instances in n_img.sbp.items():
2547 for instance in instances:
2548 bep = cluster_info.FillBE(instance_cfg[instance])
2549 if bep[constants.BE_AUTO_BALANCE]:
2550 needed_mem += bep[constants.BE_MINMEM]
2551 test = n_img.mfree < needed_mem
2552 self._ErrorIf(test, constants.CV_ENODEN1, node,
2553 "not enough memory to accomodate instance failovers"
2554 " should node %s fail (%dMiB needed, %dMiB available)",
2555 prinode, needed_mem, n_img.mfree)
2558 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2559 (files_all, files_opt, files_mc, files_vm)):
2560 """Verifies file checksums collected from all nodes.
2562 @param errorif: Callback for reporting errors
2563 @param nodeinfo: List of L{objects.Node} objects
2564 @param master_node: Name of master node
2565 @param all_nvinfo: RPC results
2568 # Define functions determining which nodes to consider for a file
2571 (files_mc, lambda node: (node.master_candidate or
2572 node.name == master_node)),
2573 (files_vm, lambda node: node.vm_capable),
2576 # Build mapping from filename to list of nodes which should have the file
2578 for (files, fn) in files2nodefn:
2580 filenodes = nodeinfo
2582 filenodes = filter(fn, nodeinfo)
2583 nodefiles.update((filename,
2584 frozenset(map(operator.attrgetter("name"), filenodes)))
2585 for filename in files)
2587 assert set(nodefiles) == (files_all | files_mc | files_vm)
2589 fileinfo = dict((filename, {}) for filename in nodefiles)
2590 ignore_nodes = set()
2592 for node in nodeinfo:
2594 ignore_nodes.add(node.name)
2597 nresult = all_nvinfo[node.name]
2599 if nresult.fail_msg or not nresult.payload:
2602 node_files = nresult.payload.get(constants.NV_FILELIST, None)
2604 test = not (node_files and isinstance(node_files, dict))
2605 errorif(test, constants.CV_ENODEFILECHECK, node.name,
2606 "Node did not return file checksum data")
2608 ignore_nodes.add(node.name)
2611 # Build per-checksum mapping from filename to nodes having it
2612 for (filename, checksum) in node_files.items():
2613 assert filename in nodefiles
2614 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2616 for (filename, checksums) in fileinfo.items():
2617 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2619 # Nodes having the file
2620 with_file = frozenset(node_name
2621 for nodes in fileinfo[filename].values()
2622 for node_name in nodes) - ignore_nodes
2624 expected_nodes = nodefiles[filename] - ignore_nodes
2626 # Nodes missing file
2627 missing_file = expected_nodes - with_file
2629 if filename in files_opt:
2631 errorif(missing_file and missing_file != expected_nodes,
2632 constants.CV_ECLUSTERFILECHECK, None,
2633 "File %s is optional, but it must exist on all or no"
2634 " nodes (not found on %s)",
2635 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2637 errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
2638 "File %s is missing from node(s) %s", filename,
2639 utils.CommaJoin(utils.NiceSort(missing_file)))
2641 # Warn if a node has a file it shouldn't
2642 unexpected = with_file - expected_nodes
2644 constants.CV_ECLUSTERFILECHECK, None,
2645 "File %s should not exist on node(s) %s",
2646 filename, utils.CommaJoin(utils.NiceSort(unexpected)))
2648 # See if there are multiple versions of the file
2649 test = len(checksums) > 1
2651 variants = ["variant %s on %s" %
2652 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2653 for (idx, (checksum, nodes)) in
2654 enumerate(sorted(checksums.items()))]
2658 errorif(test, constants.CV_ECLUSTERFILECHECK, None,
2659 "File %s found with %s different checksums (%s)",
2660 filename, len(checksums), "; ".join(variants))
2662 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2664 """Verifies and the node DRBD status.
2666 @type ninfo: L{objects.Node}
2667 @param ninfo: the node to check
2668 @param nresult: the remote results for the node
2669 @param instanceinfo: the dict of instances
2670 @param drbd_helper: the configured DRBD usermode helper
2671 @param drbd_map: the DRBD map as returned by
2672 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2676 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2679 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2680 test = (helper_result == None)
2681 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2682 "no drbd usermode helper returned")
2684 status, payload = helper_result
2686 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2687 "drbd usermode helper check unsuccessful: %s", payload)
2688 test = status and (payload != drbd_helper)
2689 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2690 "wrong drbd usermode helper: %s", payload)
2692 # compute the DRBD minors
2694 for minor, instance in drbd_map[node].items():
2695 test = instance not in instanceinfo
2696 _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2697 "ghost instance '%s' in temporary DRBD map", instance)
2698 # ghost instance should not be running, but otherwise we
2699 # don't give double warnings (both ghost instance and
2700 # unallocated minor in use)
2702 node_drbd[minor] = (instance, False)
2704 instance = instanceinfo[instance]
2705 node_drbd[minor] = (instance.name,
2706 instance.admin_state == constants.ADMINST_UP)
2708 # and now check them
2709 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2710 test = not isinstance(used_minors, (tuple, list))
2711 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2712 "cannot parse drbd status file: %s", str(used_minors))
2714 # we cannot check drbd status
2717 for minor, (iname, must_exist) in node_drbd.items():
2718 test = minor not in used_minors and must_exist
2719 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2720 "drbd minor %d of instance %s is not active", minor, iname)
2721 for minor in used_minors:
2722 test = minor not in node_drbd
2723 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2724 "unallocated drbd minor %d is in use", minor)
2726 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2727 """Builds the node OS structures.
2729 @type ninfo: L{objects.Node}
2730 @param ninfo: the node to check
2731 @param nresult: the remote results for the node
2732 @param nimg: the node image object
2736 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2738 remote_os = nresult.get(constants.NV_OSLIST, None)
2739 test = (not isinstance(remote_os, list) or
2740 not compat.all(isinstance(v, list) and len(v) == 7
2741 for v in remote_os))
2743 _ErrorIf(test, constants.CV_ENODEOS, node,
2744 "node hasn't returned valid OS data")
2753 for (name, os_path, status, diagnose,
2754 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2756 if name not in os_dict:
2759 # parameters is a list of lists instead of list of tuples due to
2760 # JSON lacking a real tuple type, fix it:
2761 parameters = [tuple(v) for v in parameters]
2762 os_dict[name].append((os_path, status, diagnose,
2763 set(variants), set(parameters), set(api_ver)))
2765 nimg.oslist = os_dict
2767 def _VerifyNodeOS(self, ninfo, nimg, base):
2768 """Verifies the node OS list.
2770 @type ninfo: L{objects.Node}
2771 @param ninfo: the node to check
2772 @param nimg: the node image object
2773 @param base: the 'template' node we match against (e.g. from the master)
2777 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2779 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2781 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2782 for os_name, os_data in nimg.oslist.items():
2783 assert os_data, "Empty OS status for OS %s?!" % os_name
2784 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2785 _ErrorIf(not f_status, constants.CV_ENODEOS, node,
2786 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2787 _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
2788 "OS '%s' has multiple entries (first one shadows the rest): %s",
2789 os_name, utils.CommaJoin([v[0] for v in os_data]))
2790 # comparisons with the 'base' image
2791 test = os_name not in base.oslist
2792 _ErrorIf(test, constants.CV_ENODEOS, node,
2793 "Extra OS %s not present on reference node (%s)",
2797 assert base.oslist[os_name], "Base node has empty OS status?"
2798 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2800 # base OS is invalid, skipping
2802 for kind, a, b in [("API version", f_api, b_api),
2803 ("variants list", f_var, b_var),
2804 ("parameters", beautify_params(f_param),
2805 beautify_params(b_param))]:
2806 _ErrorIf(a != b, constants.CV_ENODEOS, node,
2807 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2808 kind, os_name, base.name,
2809 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2811 # check any missing OSes
2812 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2813 _ErrorIf(missing, constants.CV_ENODEOS, node,
2814 "OSes present on reference node %s but missing on this node: %s",
2815 base.name, utils.CommaJoin(missing))
2817 def _VerifyOob(self, ninfo, nresult):
2818 """Verifies out of band functionality of a node.
2820 @type ninfo: L{objects.Node}
2821 @param ninfo: the node to check
2822 @param nresult: the remote results for the node
2826 # We just have to verify the paths on master and/or master candidates
2827 # as the oob helper is invoked on the master
2828 if ((ninfo.master_candidate or ninfo.master_capable) and
2829 constants.NV_OOB_PATHS in nresult):
2830 for path_result in nresult[constants.NV_OOB_PATHS]:
2831 self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
2833 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2834 """Verifies and updates the node volume data.
2836 This function will update a L{NodeImage}'s internal structures
2837 with data from the remote call.
2839 @type ninfo: L{objects.Node}
2840 @param ninfo: the node to check
2841 @param nresult: the remote results for the node
2842 @param nimg: the node image object
2843 @param vg_name: the configured VG name
2847 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2849 nimg.lvm_fail = True
2850 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2853 elif isinstance(lvdata, basestring):
2854 _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
2855 utils.SafeEncode(lvdata))
2856 elif not isinstance(lvdata, dict):
2857 _ErrorIf(True, constants.CV_ENODELVM, node,
2858 "rpc call to node failed (lvlist)")
2860 nimg.volumes = lvdata
2861 nimg.lvm_fail = False
2863 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2864 """Verifies and updates the node instance list.
2866 If the listing was successful, then updates this node's instance
2867 list. Otherwise, it marks the RPC call as failed for the instance
2870 @type ninfo: L{objects.Node}
2871 @param ninfo: the node to check
2872 @param nresult: the remote results for the node
2873 @param nimg: the node image object
2876 idata = nresult.get(constants.NV_INSTANCELIST, None)
2877 test = not isinstance(idata, list)
2878 self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2879 "rpc call to node failed (instancelist): %s",
2880 utils.SafeEncode(str(idata)))
2882 nimg.hyp_fail = True
2884 nimg.instances = idata
2886 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2887 """Verifies and computes a node information map
2889 @type ninfo: L{objects.Node}
2890 @param ninfo: the node to check
2891 @param nresult: the remote results for the node
2892 @param nimg: the node image object
2893 @param vg_name: the configured VG name
2897 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2899 # try to read free memory (from the hypervisor)
2900 hv_info = nresult.get(constants.NV_HVINFO, None)
2901 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2902 _ErrorIf(test, constants.CV_ENODEHV, node,
2903 "rpc call to node failed (hvinfo)")
2906 nimg.mfree = int(hv_info["memory_free"])
2907 except (ValueError, TypeError):
2908 _ErrorIf(True, constants.CV_ENODERPC, node,
2909 "node returned invalid nodeinfo, check hypervisor")
2911 # FIXME: devise a free space model for file based instances as well
2912 if vg_name is not None:
2913 test = (constants.NV_VGLIST not in nresult or
2914 vg_name not in nresult[constants.NV_VGLIST])
2915 _ErrorIf(test, constants.CV_ENODELVM, node,
2916 "node didn't return data for the volume group '%s'"
2917 " - it is either missing or broken", vg_name)
2920 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2921 except (ValueError, TypeError):
2922 _ErrorIf(True, constants.CV_ENODERPC, node,
2923 "node returned invalid LVM info, check LVM status")
2925 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2926 """Gets per-disk status information for all instances.
2928 @type nodelist: list of strings
2929 @param nodelist: Node names
2930 @type node_image: dict of (name, L{objects.Node})
2931 @param node_image: Node objects
2932 @type instanceinfo: dict of (name, L{objects.Instance})
2933 @param instanceinfo: Instance objects
2934 @rtype: {instance: {node: [(succes, payload)]}}
2935 @return: a dictionary of per-instance dictionaries with nodes as
2936 keys and disk information as values; the disk information is a
2937 list of tuples (success, payload)
2940 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2943 node_disks_devonly = {}
2944 diskless_instances = set()
2945 diskless = constants.DT_DISKLESS
2947 for nname in nodelist:
2948 node_instances = list(itertools.chain(node_image[nname].pinst,
2949 node_image[nname].sinst))
2950 diskless_instances.update(inst for inst in node_instances
2951 if instanceinfo[inst].disk_template == diskless)
2952 disks = [(inst, disk)
2953 for inst in node_instances
2954 for disk in instanceinfo[inst].disks]
2957 # No need to collect data
2960 node_disks[nname] = disks
2962 # _AnnotateDiskParams makes already copies of the disks
2964 for (inst, dev) in disks:
2965 (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
2966 self.cfg.SetDiskID(anno_disk, nname)
2967 devonly.append(anno_disk)
2969 node_disks_devonly[nname] = devonly
2971 assert len(node_disks) == len(node_disks_devonly)
2973 # Collect data from all nodes with disks
2974 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2977 assert len(result) == len(node_disks)
2981 for (nname, nres) in result.items():
2982 disks = node_disks[nname]
2985 # No data from this node
2986 data = len(disks) * [(False, "node offline")]
2989 _ErrorIf(msg, constants.CV_ENODERPC, nname,
2990 "while getting disk information: %s", msg)
2992 # No data from this node
2993 data = len(disks) * [(False, msg)]
2996 for idx, i in enumerate(nres.payload):
2997 if isinstance(i, (tuple, list)) and len(i) == 2:
3000 logging.warning("Invalid result from node %s, entry %d: %s",
3002 data.append((False, "Invalid result from the remote node"))
3004 for ((inst, _), status) in zip(disks, data):
3005 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
3007 # Add empty entries for diskless instances.
3008 for inst in diskless_instances:
3009 assert inst not in instdisk
3012 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
3013 len(nnames) <= len(instanceinfo[inst].all_nodes) and
3014 compat.all(isinstance(s, (tuple, list)) and
3015 len(s) == 2 for s in statuses)
3016 for inst, nnames in instdisk.items()
3017 for nname, statuses in nnames.items())
3018 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
3023 def _SshNodeSelector(group_uuid, all_nodes):
3024 """Create endless iterators for all potential SSH check hosts.
3027 nodes = [node for node in all_nodes
3028 if (node.group != group_uuid and
3030 keyfunc = operator.attrgetter("group")
3032 return map(itertools.cycle,
3033 [sorted(map(operator.attrgetter("name"), names))
3034 for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
3038 def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
3039 """Choose which nodes should talk to which other nodes.
3041 We will make nodes contact all nodes in their group, and one node from
3044 @warning: This algorithm has a known issue if one node group is much
3045 smaller than others (e.g. just one node). In such a case all other
3046 nodes will talk to the single node.
3049 online_nodes = sorted(node.name for node in group_nodes if not node.offline)
3050 sel = cls._SshNodeSelector(group_uuid, all_nodes)
3052 return (online_nodes,
3053 dict((name, sorted([i.next() for i in sel]))
3054 for name in online_nodes))
3056 def BuildHooksEnv(self):
3059 Cluster-Verify hooks just ran in the post phase and their failure makes
3060 the output be logged in the verify output and the verification to fail.
3064 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
3067 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
3068 for node in self.my_node_info.values())
3072 def BuildHooksNodes(self):
3073 """Build hooks nodes.
3076 return ([], self.my_node_names)
3078 def Exec(self, feedback_fn):
3079 """Verify integrity of the node group, performing various test on nodes.
3082 # This method has too many local variables. pylint: disable=R0914
3083 feedback_fn("* Verifying group '%s'" % self.group_info.name)
3085 if not self.my_node_names:
3087 feedback_fn("* Empty node group, skipping verification")
3091 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3092 verbose = self.op.verbose
3093 self._feedback_fn = feedback_fn
3095 vg_name = self.cfg.GetVGName()
3096 drbd_helper = self.cfg.GetDRBDHelper()
3097 cluster = self.cfg.GetClusterInfo()
3098 groupinfo = self.cfg.GetAllNodeGroupsInfo()
3099 hypervisors = cluster.enabled_hypervisors
3100 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
3102 i_non_redundant = [] # Non redundant instances
3103 i_non_a_balanced = [] # Non auto-balanced instances
3104 i_offline = 0 # Count of offline instances
3105 n_offline = 0 # Count of offline nodes
3106 n_drained = 0 # Count of nodes being drained
3107 node_vol_should = {}
3109 # FIXME: verify OS list
3112 filemap = _ComputeAncillaryFiles(cluster, False)
3114 # do local checksums
3115 master_node = self.master_node = self.cfg.GetMasterNode()
3116 master_ip = self.cfg.GetMasterIP()
3118 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
3121 if self.cfg.GetUseExternalMipScript():
3122 user_scripts.append(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
3124 node_verify_param = {
3125 constants.NV_FILELIST:
3126 utils.UniqueSequence(filename
3127 for files in filemap
3128 for filename in files),
3129 constants.NV_NODELIST:
3130 self._SelectSshCheckNodes(node_data_list, self.group_uuid,
3131 self.all_node_info.values()),
3132 constants.NV_HYPERVISOR: hypervisors,
3133 constants.NV_HVPARAMS:
3134 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
3135 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
3136 for node in node_data_list
3137 if not node.offline],
3138 constants.NV_INSTANCELIST: hypervisors,
3139 constants.NV_VERSION: None,
3140 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
3141 constants.NV_NODESETUP: None,
3142 constants.NV_TIME: None,
3143 constants.NV_MASTERIP: (master_node, master_ip),
3144 constants.NV_OSLIST: None,
3145 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
3146 constants.NV_USERSCRIPTS: user_scripts,
3149 if vg_name is not None:
3150 node_verify_param[constants.NV_VGLIST] = None
3151 node_verify_param[constants.NV_LVLIST] = vg_name
3152 node_verify_param[constants.NV_PVLIST] = [vg_name]
3155 node_verify_param[constants.NV_DRBDLIST] = None
3156 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
3159 # FIXME: this needs to be changed per node-group, not cluster-wide
3161 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
3162 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3163 bridges.add(default_nicpp[constants.NIC_LINK])
3164 for instance in self.my_inst_info.values():
3165 for nic in instance.nics:
3166 full_nic = cluster.SimpleFillNIC(nic.nicparams)
3167 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3168 bridges.add(full_nic[constants.NIC_LINK])
3171 node_verify_param[constants.NV_BRIDGES] = list(bridges)
3173 # Build our expected cluster state
3174 node_image = dict((node.name, self.NodeImage(offline=node.offline,
3176 vm_capable=node.vm_capable))
3177 for node in node_data_list)
3181 for node in self.all_node_info.values():
3182 path = _SupportsOob(self.cfg, node)
3183 if path and path not in oob_paths:
3184 oob_paths.append(path)
3187 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
3189 for instance in self.my_inst_names:
3190 inst_config = self.my_inst_info[instance]
3191 if inst_config.admin_state == constants.ADMINST_OFFLINE:
3194 for nname in inst_config.all_nodes:
3195 if nname not in node_image:
3196 gnode = self.NodeImage(name=nname)
3197 gnode.ghost = (nname not in self.all_node_info)
3198 node_image[nname] = gnode
3200 inst_config.MapLVsByNode(node_vol_should)
3202 pnode = inst_config.primary_node
3203 node_image[pnode].pinst.append(instance)
3205 for snode in inst_config.secondary_nodes:
3206 nimg = node_image[snode]
3207 nimg.sinst.append(instance)
3208 if pnode not in nimg.sbp:
3209 nimg.sbp[pnode] = []
3210 nimg.sbp[pnode].append(instance)
3212 # At this point, we have the in-memory data structures complete,
3213 # except for the runtime information, which we'll gather next
3215 # Due to the way our RPC system works, exact response times cannot be
3216 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
3217 # time before and after executing the request, we can at least have a time
3219 nvinfo_starttime = time.time()
3220 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
3222 self.cfg.GetClusterName())
3223 nvinfo_endtime = time.time()
3225 if self.extra_lv_nodes and vg_name is not None:
3227 self.rpc.call_node_verify(self.extra_lv_nodes,
3228 {constants.NV_LVLIST: vg_name},
3229 self.cfg.GetClusterName())
3231 extra_lv_nvinfo = {}
3233 all_drbd_map = self.cfg.ComputeDRBDMap()
3235 feedback_fn("* Gathering disk information (%s nodes)" %
3236 len(self.my_node_names))
3237 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
3240 feedback_fn("* Verifying configuration file consistency")
3242 # If not all nodes are being checked, we need to make sure the master node
3243 # and a non-checked vm_capable node are in the list.
3244 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
3246 vf_nvinfo = all_nvinfo.copy()
3247 vf_node_info = list(self.my_node_info.values())
3248 additional_nodes = []
3249 if master_node not in self.my_node_info:
3250 additional_nodes.append(master_node)
3251 vf_node_info.append(self.all_node_info[master_node])
3252 # Add the first vm_capable node we find which is not included,
3253 # excluding the master node (which we already have)
3254 for node in absent_nodes:
3255 nodeinfo = self.all_node_info[node]
3256 if (nodeinfo.vm_capable and not nodeinfo.offline and
3257 node != master_node):
3258 additional_nodes.append(node)
3259 vf_node_info.append(self.all_node_info[node])
3261 key = constants.NV_FILELIST
3262 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
3263 {key: node_verify_param[key]},
3264 self.cfg.GetClusterName()))
3266 vf_nvinfo = all_nvinfo
3267 vf_node_info = self.my_node_info.values()
3269 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
3271 feedback_fn("* Verifying node status")
3275 for node_i in node_data_list:
3277 nimg = node_image[node]
3281 feedback_fn("* Skipping offline node %s" % (node,))
3285 if node == master_node:
3287 elif node_i.master_candidate:
3288 ntype = "master candidate"
3289 elif node_i.drained:
3295 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
3297 msg = all_nvinfo[node].fail_msg
3298 _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
3301 nimg.rpc_fail = True
3304 nresult = all_nvinfo[node].payload
3306 nimg.call_ok = self._VerifyNode(node_i, nresult)
3307 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
3308 self._VerifyNodeNetwork(node_i, nresult)
3309 self._VerifyNodeUserScripts(node_i, nresult)
3310 self._VerifyOob(node_i, nresult)
3313 self._VerifyNodeLVM(node_i, nresult, vg_name)
3314 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
3317 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
3318 self._UpdateNodeInstances(node_i, nresult, nimg)
3319 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
3320 self._UpdateNodeOS(node_i, nresult, nimg)
3322 if not nimg.os_fail:
3323 if refos_img is None:
3325 self._VerifyNodeOS(node_i, nimg, refos_img)
3326 self._VerifyNodeBridges(node_i, nresult, bridges)
3328 # Check whether all running instancies are primary for the node. (This
3329 # can no longer be done from _VerifyInstance below, since some of the
3330 # wrong instances could be from other node groups.)
3331 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
3333 for inst in non_primary_inst:
3334 test = inst in self.all_inst_info
3335 _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
3336 "instance should not run on node %s", node_i.name)
3337 _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
3338 "node is running unknown instance %s", inst)
3340 for node, result in extra_lv_nvinfo.items():
3341 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
3342 node_image[node], vg_name)
3344 feedback_fn("* Verifying instance status")
3345 for instance in self.my_inst_names:
3347 feedback_fn("* Verifying instance %s" % instance)
3348 inst_config = self.my_inst_info[instance]
3349 self._VerifyInstance(instance, inst_config, node_image,
3351 inst_nodes_offline = []
3353 pnode = inst_config.primary_node
3354 pnode_img = node_image[pnode]
3355 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
3356 constants.CV_ENODERPC, pnode, "instance %s, connection to"
3357 " primary node failed", instance)
3359 _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
3361 constants.CV_EINSTANCEBADNODE, instance,
3362 "instance is marked as running and lives on offline node %s",
3363 inst_config.primary_node)
3365 # If the instance is non-redundant we cannot survive losing its primary
3366 # node, so we are not N+1 compliant.
3367 if inst_config.disk_template not in constants.DTS_MIRRORED:
3368 i_non_redundant.append(instance)
3370 _ErrorIf(len(inst_config.secondary_nodes) > 1,
3371 constants.CV_EINSTANCELAYOUT,
3372 instance, "instance has multiple secondary nodes: %s",
3373 utils.CommaJoin(inst_config.secondary_nodes),
3374 code=self.ETYPE_WARNING)
3376 if inst_config.disk_template in constants.DTS_INT_MIRROR:
3377 pnode = inst_config.primary_node
3378 instance_nodes = utils.NiceSort(inst_config.all_nodes)
3379 instance_groups = {}
3381 for node in instance_nodes:
3382 instance_groups.setdefault(self.all_node_info[node].group,
3386 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
3387 # Sort so that we always list the primary node first.
3388 for group, nodes in sorted(instance_groups.items(),
3389 key=lambda (_, nodes): pnode in nodes,
3392 self._ErrorIf(len(instance_groups) > 1,
3393 constants.CV_EINSTANCESPLITGROUPS,
3394 instance, "instance has primary and secondary nodes in"
3395 " different groups: %s", utils.CommaJoin(pretty_list),
3396 code=self.ETYPE_WARNING)
3398 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
3399 i_non_a_balanced.append(instance)
3401 for snode in inst_config.secondary_nodes:
3402 s_img = node_image[snode]
3403 _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
3404 snode, "instance %s, connection to secondary node failed",
3408 inst_nodes_offline.append(snode)
3410 # warn that the instance lives on offline nodes
3411 _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
3412 "instance has offline secondary node(s) %s",
3413 utils.CommaJoin(inst_nodes_offline))
3414 # ... or ghost/non-vm_capable nodes
3415 for node in inst_config.all_nodes:
3416 _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
3417 instance, "instance lives on ghost node %s", node)
3418 _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
3419 instance, "instance lives on non-vm_capable node %s", node)
3421 feedback_fn("* Verifying orphan volumes")
3422 reserved = utils.FieldSet(*cluster.reserved_lvs)
3424 # We will get spurious "unknown volume" warnings if any node of this group
3425 # is secondary for an instance whose primary is in another group. To avoid
3426 # them, we find these instances and add their volumes to node_vol_should.
3427 for inst in self.all_inst_info.values():
3428 for secondary in inst.secondary_nodes:
3429 if (secondary in self.my_node_info
3430 and inst.name not in self.my_inst_info):
3431 inst.MapLVsByNode(node_vol_should)
3434 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
3436 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
3437 feedback_fn("* Verifying N+1 Memory redundancy")
3438 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
3440 feedback_fn("* Other Notes")
3442 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
3443 % len(i_non_redundant))
3445 if i_non_a_balanced:
3446 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
3447 % len(i_non_a_balanced))
3450 feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
3453 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
3456 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
3460 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
3461 """Analyze the post-hooks' result
3463 This method analyses the hook result, handles it, and sends some
3464 nicely-formatted feedback back to the user.
3466 @param phase: one of L{constants.HOOKS_PHASE_POST} or
3467 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
3468 @param hooks_results: the results of the multi-node hooks rpc call
3469 @param feedback_fn: function used send feedback back to the caller
3470 @param lu_result: previous Exec result
3471 @return: the new Exec result, based on the previous result
3475 # We only really run POST phase hooks, only for non-empty groups,
3476 # and are only interested in their results
3477 if not self.my_node_names:
3480 elif phase == constants.HOOKS_PHASE_POST:
3481 # Used to change hooks' output to proper indentation
3482 feedback_fn("* Hooks Results")
3483 assert hooks_results, "invalid result from hooks"
3485 for node_name in hooks_results:
3486 res = hooks_results[node_name]
3488 test = msg and not res.offline
3489 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3490 "Communication failure in hooks execution: %s", msg)
3491 if res.offline or msg:
3492 # No need to investigate payload if node is offline or gave
3495 for script, hkr, output in res.payload:
3496 test = hkr == constants.HKR_FAIL
3497 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3498 "Script %s failed, output:", script)
3500 output = self._HOOKS_INDENT_RE.sub(" ", output)
3501 feedback_fn("%s" % output)
3507 class LUClusterVerifyDisks(NoHooksLU):
3508 """Verifies the cluster disks status.
3513 def ExpandNames(self):
3514 self.share_locks = _ShareAll()
3515 self.needed_locks = {
3516 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3519 def Exec(self, feedback_fn):
3520 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3522 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3523 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3524 for group in group_names])
3527 class LUGroupVerifyDisks(NoHooksLU):
3528 """Verifies the status of all disks in a node group.
3533 def ExpandNames(self):
3534 # Raises errors.OpPrereqError on its own if group can't be found
3535 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3537 self.share_locks = _ShareAll()
3538 self.needed_locks = {
3539 locking.LEVEL_INSTANCE: [],
3540 locking.LEVEL_NODEGROUP: [],
3541 locking.LEVEL_NODE: [],
3544 def DeclareLocks(self, level):
3545 if level == locking.LEVEL_INSTANCE:
3546 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3548 # Lock instances optimistically, needs verification once node and group
3549 # locks have been acquired
3550 self.needed_locks[locking.LEVEL_INSTANCE] = \
3551 self.cfg.GetNodeGroupInstances(self.group_uuid)
3553 elif level == locking.LEVEL_NODEGROUP:
3554 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3556 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3557 set([self.group_uuid] +
3558 # Lock all groups used by instances optimistically; this requires
3559 # going via the node before it's locked, requiring verification
3562 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3563 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3565 elif level == locking.LEVEL_NODE:
3566 # This will only lock the nodes in the group to be verified which contain
3568 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3569 self._LockInstancesNodes()
3571 # Lock all nodes in group to be verified
3572 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3573 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3574 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3576 def CheckPrereq(self):
3577 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3578 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3579 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3581 assert self.group_uuid in owned_groups
3583 # Check if locked instances are still correct
3584 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3586 # Get instance information
3587 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3589 # Check if node groups for locked instances are still correct
3590 _CheckInstancesNodeGroups(self.cfg, self.instances,
3591 owned_groups, owned_nodes, self.group_uuid)
3593 def Exec(self, feedback_fn):
3594 """Verify integrity of cluster disks.
3596 @rtype: tuple of three items
3597 @return: a tuple of (dict of node-to-node_error, list of instances
3598 which need activate-disks, dict of instance: (node, volume) for
3603 res_instances = set()
3606 nv_dict = _MapInstanceDisksToNodes([inst
3607 for inst in self.instances.values()
3608 if inst.admin_state == constants.ADMINST_UP])
3611 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3612 set(self.cfg.GetVmCapableNodeList()))
3614 node_lvs = self.rpc.call_lv_list(nodes, [])
3616 for (node, node_res) in node_lvs.items():
3617 if node_res.offline:
3620 msg = node_res.fail_msg
3622 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3623 res_nodes[node] = msg
3626 for lv_name, (_, _, lv_online) in node_res.payload.items():
3627 inst = nv_dict.pop((node, lv_name), None)
3628 if not (lv_online or inst is None):
3629 res_instances.add(inst)
3631 # any leftover items in nv_dict are missing LVs, let's arrange the data
3633 for key, inst in nv_dict.iteritems():
3634 res_missing.setdefault(inst, []).append(list(key))
3636 return (res_nodes, list(res_instances), res_missing)
3639 class LUClusterRepairDiskSizes(NoHooksLU):
3640 """Verifies the cluster disks sizes.
3645 def ExpandNames(self):
3646 if self.op.instances:
3647 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3648 self.needed_locks = {
3649 locking.LEVEL_NODE_RES: [],
3650 locking.LEVEL_INSTANCE: self.wanted_names,
3652 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
3654 self.wanted_names = None
3655 self.needed_locks = {
3656 locking.LEVEL_NODE_RES: locking.ALL_SET,
3657 locking.LEVEL_INSTANCE: locking.ALL_SET,
3659 self.share_locks = {
3660 locking.LEVEL_NODE_RES: 1,
3661 locking.LEVEL_INSTANCE: 0,
3664 def DeclareLocks(self, level):
3665 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
3666 self._LockInstancesNodes(primary_only=True, level=level)
3668 def CheckPrereq(self):
3669 """Check prerequisites.
3671 This only checks the optional instance list against the existing names.
3674 if self.wanted_names is None:
3675 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3677 self.wanted_instances = \
3678 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3680 def _EnsureChildSizes(self, disk):
3681 """Ensure children of the disk have the needed disk size.
3683 This is valid mainly for DRBD8 and fixes an issue where the
3684 children have smaller disk size.
3686 @param disk: an L{ganeti.objects.Disk} object
3689 if disk.dev_type == constants.LD_DRBD8:
3690 assert disk.children, "Empty children for DRBD8?"
3691 fchild = disk.children[0]
3692 mismatch = fchild.size < disk.size
3694 self.LogInfo("Child disk has size %d, parent %d, fixing",
3695 fchild.size, disk.size)
3696 fchild.size = disk.size
3698 # and we recurse on this child only, not on the metadev
3699 return self._EnsureChildSizes(fchild) or mismatch
3703 def Exec(self, feedback_fn):
3704 """Verify the size of cluster disks.
3707 # TODO: check child disks too
3708 # TODO: check differences in size between primary/secondary nodes
3710 for instance in self.wanted_instances:
3711 pnode = instance.primary_node
3712 if pnode not in per_node_disks:
3713 per_node_disks[pnode] = []
3714 for idx, disk in enumerate(instance.disks):
3715 per_node_disks[pnode].append((instance, idx, disk))
3717 assert not (frozenset(per_node_disks.keys()) -
3718 self.owned_locks(locking.LEVEL_NODE_RES)), \
3719 "Not owning correct locks"
3720 assert not self.owned_locks(locking.LEVEL_NODE)
3723 for node, dskl in per_node_disks.items():
3724 newl = [v[2].Copy() for v in dskl]
3726 self.cfg.SetDiskID(dsk, node)
3727 result = self.rpc.call_blockdev_getsize(node, newl)
3729 self.LogWarning("Failure in blockdev_getsize call to node"
3730 " %s, ignoring", node)
3732 if len(result.payload) != len(dskl):
3733 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3734 " result.payload=%s", node, len(dskl), result.payload)
3735 self.LogWarning("Invalid result from node %s, ignoring node results",
3738 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3740 self.LogWarning("Disk %d of instance %s did not return size"
3741 " information, ignoring", idx, instance.name)
3743 if not isinstance(size, (int, long)):
3744 self.LogWarning("Disk %d of instance %s did not return valid"
3745 " size information, ignoring", idx, instance.name)
3748 if size != disk.size:
3749 self.LogInfo("Disk %d of instance %s has mismatched size,"
3750 " correcting: recorded %d, actual %d", idx,
3751 instance.name, disk.size, size)
3753 self.cfg.Update(instance, feedback_fn)
3754 changed.append((instance.name, idx, size))
3755 if self._EnsureChildSizes(disk):
3756 self.cfg.Update(instance, feedback_fn)
3757 changed.append((instance.name, idx, disk.size))
3761 class LUClusterRename(LogicalUnit):
3762 """Rename the cluster.
3765 HPATH = "cluster-rename"
3766 HTYPE = constants.HTYPE_CLUSTER
3768 def BuildHooksEnv(self):
3773 "OP_TARGET": self.cfg.GetClusterName(),
3774 "NEW_NAME": self.op.name,
3777 def BuildHooksNodes(self):
3778 """Build hooks nodes.
3781 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3783 def CheckPrereq(self):
3784 """Verify that the passed name is a valid one.
3787 hostname = netutils.GetHostname(name=self.op.name,
3788 family=self.cfg.GetPrimaryIPFamily())
3790 new_name = hostname.name
3791 self.ip = new_ip = hostname.ip
3792 old_name = self.cfg.GetClusterName()
3793 old_ip = self.cfg.GetMasterIP()
3794 if new_name == old_name and new_ip == old_ip:
3795 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3796 " cluster has changed",
3798 if new_ip != old_ip:
3799 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3800 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3801 " reachable on the network" %
3802 new_ip, errors.ECODE_NOTUNIQUE)
3804 self.op.name = new_name
3806 def Exec(self, feedback_fn):
3807 """Rename the cluster.
3810 clustername = self.op.name
3813 # shutdown the master IP
3814 master_params = self.cfg.GetMasterNetworkParameters()
3815 ems = self.cfg.GetUseExternalMipScript()
3816 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
3818 result.Raise("Could not disable the master role")
3821 cluster = self.cfg.GetClusterInfo()
3822 cluster.cluster_name = clustername
3823 cluster.master_ip = new_ip
3824 self.cfg.Update(cluster, feedback_fn)
3826 # update the known hosts file
3827 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3828 node_list = self.cfg.GetOnlineNodeList()
3830 node_list.remove(master_params.name)
3833 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3835 master_params.ip = new_ip
3836 result = self.rpc.call_node_activate_master_ip(master_params.name,
3838 msg = result.fail_msg
3840 self.LogWarning("Could not re-enable the master role on"
3841 " the master, please restart manually: %s", msg)
3846 def _ValidateNetmask(cfg, netmask):
3847 """Checks if a netmask is valid.
3849 @type cfg: L{config.ConfigWriter}
3850 @param cfg: The cluster configuration
3852 @param netmask: the netmask to be verified
3853 @raise errors.OpPrereqError: if the validation fails
3856 ip_family = cfg.GetPrimaryIPFamily()
3858 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
3859 except errors.ProgrammerError:
3860 raise errors.OpPrereqError("Invalid primary ip family: %s." %
3862 if not ipcls.ValidateNetmask(netmask):
3863 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
3867 class LUClusterSetParams(LogicalUnit):
3868 """Change the parameters of the cluster.
3871 HPATH = "cluster-modify"
3872 HTYPE = constants.HTYPE_CLUSTER
3875 def CheckArguments(self):
3879 if self.op.uid_pool:
3880 uidpool.CheckUidPool(self.op.uid_pool)
3882 if self.op.add_uids:
3883 uidpool.CheckUidPool(self.op.add_uids)
3885 if self.op.remove_uids:
3886 uidpool.CheckUidPool(self.op.remove_uids)
3888 if self.op.master_netmask is not None:
3889 _ValidateNetmask(self.cfg, self.op.master_netmask)
3891 if self.op.diskparams:
3892 for dt_params in self.op.diskparams.values():
3893 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
3895 utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
3896 except errors.OpPrereqError, err:
3897 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
3900 def ExpandNames(self):
3901 # FIXME: in the future maybe other cluster params won't require checking on
3902 # all nodes to be modified.
3903 self.needed_locks = {
3904 locking.LEVEL_NODE: locking.ALL_SET,
3905 locking.LEVEL_INSTANCE: locking.ALL_SET,
3906 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3908 self.share_locks = {
3909 locking.LEVEL_NODE: 1,
3910 locking.LEVEL_INSTANCE: 1,
3911 locking.LEVEL_NODEGROUP: 1,
3914 def BuildHooksEnv(self):
3919 "OP_TARGET": self.cfg.GetClusterName(),
3920 "NEW_VG_NAME": self.op.vg_name,
3923 def BuildHooksNodes(self):
3924 """Build hooks nodes.
3927 mn = self.cfg.GetMasterNode()
3930 def CheckPrereq(self):
3931 """Check prerequisites.
3933 This checks whether the given params don't conflict and
3934 if the given volume group is valid.
3937 if self.op.vg_name is not None and not self.op.vg_name:
3938 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3939 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3940 " instances exist", errors.ECODE_INVAL)
3942 if self.op.drbd_helper is not None and not self.op.drbd_helper:
3943 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3944 raise errors.OpPrereqError("Cannot disable drbd helper while"
3945 " drbd-based instances exist",
3948 node_list = self.owned_locks(locking.LEVEL_NODE)
3950 # if vg_name not None, checks given volume group on all nodes
3952 vglist = self.rpc.call_vg_list(node_list)
3953 for node in node_list:
3954 msg = vglist[node].fail_msg
3956 # ignoring down node
3957 self.LogWarning("Error while gathering data on node %s"
3958 " (ignoring node): %s", node, msg)
3960 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3962 constants.MIN_VG_SIZE)
3964 raise errors.OpPrereqError("Error on node '%s': %s" %
3965 (node, vgstatus), errors.ECODE_ENVIRON)
3967 if self.op.drbd_helper:
3968 # checks given drbd helper on all nodes
3969 helpers = self.rpc.call_drbd_helper(node_list)
3970 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3972 self.LogInfo("Not checking drbd helper on offline node %s", node)
3974 msg = helpers[node].fail_msg
3976 raise errors.OpPrereqError("Error checking drbd helper on node"
3977 " '%s': %s" % (node, msg),
3978 errors.ECODE_ENVIRON)
3979 node_helper = helpers[node].payload
3980 if node_helper != self.op.drbd_helper:
3981 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3982 (node, node_helper), errors.ECODE_ENVIRON)
3984 self.cluster = cluster = self.cfg.GetClusterInfo()
3985 # validate params changes
3986 if self.op.beparams:
3987 objects.UpgradeBeParams(self.op.beparams)
3988 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3989 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3991 if self.op.ndparams:
3992 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3993 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3995 # TODO: we need a more general way to handle resetting
3996 # cluster-level parameters to default values
3997 if self.new_ndparams["oob_program"] == "":
3998 self.new_ndparams["oob_program"] = \
3999 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
4001 if self.op.hv_state:
4002 new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
4003 self.cluster.hv_state_static)
4004 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
4005 for hv, values in new_hv_state.items())
4007 if self.op.disk_state:
4008 new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
4009 self.cluster.disk_state_static)
4010 self.new_disk_state = \
4011 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
4012 for name, values in svalues.items()))
4013 for storage, svalues in new_disk_state.items())
4016 self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
4019 all_instances = self.cfg.GetAllInstancesInfo().values()
4021 for group in self.cfg.GetAllNodeGroupsInfo().values():
4022 instances = frozenset([inst for inst in all_instances
4023 if compat.any(node in group.members
4024 for node in inst.all_nodes)])
4025 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
4026 new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
4028 new_ipolicy, instances)
4030 violations.update(new)
4033 self.LogWarning("After the ipolicy change the following instances"
4034 " violate them: %s",
4035 utils.CommaJoin(utils.NiceSort(violations)))
4037 if self.op.nicparams:
4038 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
4039 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
4040 objects.NIC.CheckParameterSyntax(self.new_nicparams)
4043 # check all instances for consistency
4044 for instance in self.cfg.GetAllInstancesInfo().values():
4045 for nic_idx, nic in enumerate(instance.nics):
4046 params_copy = copy.deepcopy(nic.nicparams)
4047 params_filled = objects.FillDict(self.new_nicparams, params_copy)
4049 # check parameter syntax
4051 objects.NIC.CheckParameterSyntax(params_filled)
4052 except errors.ConfigurationError, err:
4053 nic_errors.append("Instance %s, nic/%d: %s" %
4054 (instance.name, nic_idx, err))
4056 # if we're moving instances to routed, check that they have an ip
4057 target_mode = params_filled[constants.NIC_MODE]
4058 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
4059 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
4060 " address" % (instance.name, nic_idx))
4062 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
4063 "\n".join(nic_errors))
4065 # hypervisor list/parameters
4066 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
4067 if self.op.hvparams:
4068 for hv_name, hv_dict in self.op.hvparams.items():
4069 if hv_name not in self.new_hvparams:
4070 self.new_hvparams[hv_name] = hv_dict
4072 self.new_hvparams[hv_name].update(hv_dict)
4074 # disk template parameters
4075 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
4076 if self.op.diskparams:
4077 for dt_name, dt_params in self.op.diskparams.items():
4078 if dt_name not in self.op.diskparams:
4079 self.new_diskparams[dt_name] = dt_params
4081 self.new_diskparams[dt_name].update(dt_params)
4083 # os hypervisor parameters
4084 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
4086 for os_name, hvs in self.op.os_hvp.items():
4087 if os_name not in self.new_os_hvp:
4088 self.new_os_hvp[os_name] = hvs
4090 for hv_name, hv_dict in hvs.items():
4092 # Delete if it exists
4093 self.new_os_hvp[os_name].pop(hv_name, None)
4094 elif hv_name not in self.new_os_hvp[os_name]:
4095 self.new_os_hvp[os_name][hv_name] = hv_dict
4097 self.new_os_hvp[os_name][hv_name].update(hv_dict)
4100 self.new_osp = objects.FillDict(cluster.osparams, {})
4101 if self.op.osparams:
4102 for os_name, osp in self.op.osparams.items():
4103 if os_name not in self.new_osp:
4104 self.new_osp[os_name] = {}
4106 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
4109 if not self.new_osp[os_name]:
4110 # we removed all parameters
4111 del self.new_osp[os_name]
4113 # check the parameter validity (remote check)
4114 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
4115 os_name, self.new_osp[os_name])
4117 # changes to the hypervisor list
4118 if self.op.enabled_hypervisors is not None:
4119 self.hv_list = self.op.enabled_hypervisors
4120 for hv in self.hv_list:
4121 # if the hypervisor doesn't already exist in the cluster
4122 # hvparams, we initialize it to empty, and then (in both
4123 # cases) we make sure to fill the defaults, as we might not
4124 # have a complete defaults list if the hypervisor wasn't
4126 if hv not in new_hvp:
4128 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
4129 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
4131 self.hv_list = cluster.enabled_hypervisors
4133 if self.op.hvparams or self.op.enabled_hypervisors is not None:
4134 # either the enabled list has changed, or the parameters have, validate
4135 for hv_name, hv_params in self.new_hvparams.items():
4136 if ((self.op.hvparams and hv_name in self.op.hvparams) or
4137 (self.op.enabled_hypervisors and
4138 hv_name in self.op.enabled_hypervisors)):
4139 # either this is a new hypervisor, or its parameters have changed
4140 hv_class = hypervisor.GetHypervisorClass(hv_name)
4141 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4142 hv_class.CheckParameterSyntax(hv_params)
4143 _CheckHVParams(self, node_list, hv_name, hv_params)
4146 # no need to check any newly-enabled hypervisors, since the
4147 # defaults have already been checked in the above code-block
4148 for os_name, os_hvp in self.new_os_hvp.items():
4149 for hv_name, hv_params in os_hvp.items():
4150 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4151 # we need to fill in the new os_hvp on top of the actual hv_p
4152 cluster_defaults = self.new_hvparams.get(hv_name, {})
4153 new_osp = objects.FillDict(cluster_defaults, hv_params)
4154 hv_class = hypervisor.GetHypervisorClass(hv_name)
4155 hv_class.CheckParameterSyntax(new_osp)
4156 _CheckHVParams(self, node_list, hv_name, new_osp)
4158 if self.op.default_iallocator:
4159 alloc_script = utils.FindFile(self.op.default_iallocator,
4160 constants.IALLOCATOR_SEARCH_PATH,
4162 if alloc_script is None:
4163 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
4164 " specified" % self.op.default_iallocator,
4167 def Exec(self, feedback_fn):
4168 """Change the parameters of the cluster.
4171 if self.op.vg_name is not None:
4172 new_volume = self.op.vg_name
4175 if new_volume != self.cfg.GetVGName():
4176 self.cfg.SetVGName(new_volume)
4178 feedback_fn("Cluster LVM configuration already in desired"
4179 " state, not changing")
4180 if self.op.drbd_helper is not None:
4181 new_helper = self.op.drbd_helper
4184 if new_helper != self.cfg.GetDRBDHelper():
4185 self.cfg.SetDRBDHelper(new_helper)
4187 feedback_fn("Cluster DRBD helper already in desired state,"
4189 if self.op.hvparams:
4190 self.cluster.hvparams = self.new_hvparams
4192 self.cluster.os_hvp = self.new_os_hvp
4193 if self.op.enabled_hypervisors is not None:
4194 self.cluster.hvparams = self.new_hvparams
4195 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
4196 if self.op.beparams:
4197 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
4198 if self.op.nicparams:
4199 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
4201 self.cluster.ipolicy = self.new_ipolicy
4202 if self.op.osparams:
4203 self.cluster.osparams = self.new_osp
4204 if self.op.ndparams:
4205 self.cluster.ndparams = self.new_ndparams
4206 if self.op.diskparams:
4207 self.cluster.diskparams = self.new_diskparams
4208 if self.op.hv_state:
4209 self.cluster.hv_state_static = self.new_hv_state
4210 if self.op.disk_state:
4211 self.cluster.disk_state_static = self.new_disk_state
4213 if self.op.candidate_pool_size is not None:
4214 self.cluster.candidate_pool_size = self.op.candidate_pool_size
4215 # we need to update the pool size here, otherwise the save will fail
4216 _AdjustCandidatePool(self, [])
4218 if self.op.maintain_node_health is not None:
4219 if self.op.maintain_node_health and not constants.ENABLE_CONFD:
4220 feedback_fn("Note: CONFD was disabled at build time, node health"
4221 " maintenance is not useful (still enabling it)")
4222 self.cluster.maintain_node_health = self.op.maintain_node_health
4224 if self.op.prealloc_wipe_disks is not None:
4225 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
4227 if self.op.add_uids is not None:
4228 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
4230 if self.op.remove_uids is not None:
4231 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
4233 if self.op.uid_pool is not None:
4234 self.cluster.uid_pool = self.op.uid_pool
4236 if self.op.default_iallocator is not None:
4237 self.cluster.default_iallocator = self.op.default_iallocator
4239 if self.op.reserved_lvs is not None:
4240 self.cluster.reserved_lvs = self.op.reserved_lvs
4242 if self.op.use_external_mip_script is not None:
4243 self.cluster.use_external_mip_script = self.op.use_external_mip_script
4245 def helper_os(aname, mods, desc):
4247 lst = getattr(self.cluster, aname)
4248 for key, val in mods:
4249 if key == constants.DDM_ADD:
4251 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
4254 elif key == constants.DDM_REMOVE:
4258 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
4260 raise errors.ProgrammerError("Invalid modification '%s'" % key)
4262 if self.op.hidden_os:
4263 helper_os("hidden_os", self.op.hidden_os, "hidden")
4265 if self.op.blacklisted_os:
4266 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
4268 if self.op.master_netdev:
4269 master_params = self.cfg.GetMasterNetworkParameters()
4270 ems = self.cfg.GetUseExternalMipScript()
4271 feedback_fn("Shutting down master ip on the current netdev (%s)" %
4272 self.cluster.master_netdev)
4273 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4275 result.Raise("Could not disable the master ip")
4276 feedback_fn("Changing master_netdev from %s to %s" %
4277 (master_params.netdev, self.op.master_netdev))
4278 self.cluster.master_netdev = self.op.master_netdev
4280 if self.op.master_netmask:
4281 master_params = self.cfg.GetMasterNetworkParameters()
4282 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
4283 result = self.rpc.call_node_change_master_netmask(master_params.name,
4284 master_params.netmask,
4285 self.op.master_netmask,
4287 master_params.netdev)
4289 msg = "Could not change the master IP netmask: %s" % result.fail_msg
4292 self.cluster.master_netmask = self.op.master_netmask
4294 self.cfg.Update(self.cluster, feedback_fn)
4296 if self.op.master_netdev:
4297 master_params = self.cfg.GetMasterNetworkParameters()
4298 feedback_fn("Starting the master ip on the new master netdev (%s)" %
4299 self.op.master_netdev)
4300 ems = self.cfg.GetUseExternalMipScript()
4301 result = self.rpc.call_node_activate_master_ip(master_params.name,
4304 self.LogWarning("Could not re-enable the master ip on"
4305 " the master, please restart manually: %s",
4309 def _UploadHelper(lu, nodes, fname):
4310 """Helper for uploading a file and showing warnings.
4313 if os.path.exists(fname):
4314 result = lu.rpc.call_upload_file(nodes, fname)
4315 for to_node, to_result in result.items():
4316 msg = to_result.fail_msg
4318 msg = ("Copy of file %s to node %s failed: %s" %
4319 (fname, to_node, msg))
4320 lu.proc.LogWarning(msg)
4323 def _ComputeAncillaryFiles(cluster, redist):
4324 """Compute files external to Ganeti which need to be consistent.
4326 @type redist: boolean
4327 @param redist: Whether to include files which need to be redistributed
4330 # Compute files for all nodes
4332 constants.SSH_KNOWN_HOSTS_FILE,
4333 constants.CONFD_HMAC_KEY,
4334 constants.CLUSTER_DOMAIN_SECRET_FILE,
4335 constants.SPICE_CERT_FILE,
4336 constants.SPICE_CACERT_FILE,
4337 constants.RAPI_USERS_FILE,
4341 files_all.update(constants.ALL_CERT_FILES)
4342 files_all.update(ssconf.SimpleStore().GetFileList())
4344 # we need to ship at least the RAPI certificate
4345 files_all.add(constants.RAPI_CERT_FILE)
4347 if cluster.modify_etc_hosts:
4348 files_all.add(constants.ETC_HOSTS)
4350 if cluster.use_external_mip_script:
4351 files_all.add(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
4353 # Files which are optional, these must:
4354 # - be present in one other category as well
4355 # - either exist or not exist on all nodes of that category (mc, vm all)
4357 constants.RAPI_USERS_FILE,
4360 # Files which should only be on master candidates
4364 files_mc.add(constants.CLUSTER_CONF_FILE)
4366 # Files which should only be on VM-capable nodes
4367 files_vm = set(filename
4368 for hv_name in cluster.enabled_hypervisors
4370 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
4372 files_opt |= set(filename
4373 for hv_name in cluster.enabled_hypervisors
4375 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
4377 # Filenames in each category must be unique
4378 all_files_set = files_all | files_mc | files_vm
4379 assert (len(all_files_set) ==
4380 sum(map(len, [files_all, files_mc, files_vm]))), \
4381 "Found file listed in more than one file list"
4383 # Optional files must be present in one other category
4384 assert all_files_set.issuperset(files_opt), \
4385 "Optional file not in a different required list"
4387 return (files_all, files_opt, files_mc, files_vm)
4390 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
4391 """Distribute additional files which are part of the cluster configuration.
4393 ConfigWriter takes care of distributing the config and ssconf files, but
4394 there are more files which should be distributed to all nodes. This function
4395 makes sure those are copied.
4397 @param lu: calling logical unit
4398 @param additional_nodes: list of nodes not in the config to distribute to
4399 @type additional_vm: boolean
4400 @param additional_vm: whether the additional nodes are vm-capable or not
4403 # Gather target nodes
4404 cluster = lu.cfg.GetClusterInfo()
4405 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
4407 online_nodes = lu.cfg.GetOnlineNodeList()
4408 online_set = frozenset(online_nodes)
4409 vm_nodes = list(online_set.intersection(lu.cfg.GetVmCapableNodeList()))
4411 if additional_nodes is not None:
4412 online_nodes.extend(additional_nodes)
4414 vm_nodes.extend(additional_nodes)
4416 # Never distribute to master node
4417 for nodelist in [online_nodes, vm_nodes]:
4418 if master_info.name in nodelist:
4419 nodelist.remove(master_info.name)
4422 (files_all, _, files_mc, files_vm) = \
4423 _ComputeAncillaryFiles(cluster, True)
4425 # Never re-distribute configuration file from here
4426 assert not (constants.CLUSTER_CONF_FILE in files_all or
4427 constants.CLUSTER_CONF_FILE in files_vm)
4428 assert not files_mc, "Master candidates not handled in this function"
4431 (online_nodes, files_all),
4432 (vm_nodes, files_vm),
4436 for (node_list, files) in filemap:
4438 _UploadHelper(lu, node_list, fname)
4441 class LUClusterRedistConf(NoHooksLU):
4442 """Force the redistribution of cluster configuration.
4444 This is a very simple LU.
4449 def ExpandNames(self):
4450 self.needed_locks = {
4451 locking.LEVEL_NODE: locking.ALL_SET,
4453 self.share_locks[locking.LEVEL_NODE] = 1
4455 def Exec(self, feedback_fn):
4456 """Redistribute the configuration.
4459 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
4460 _RedistributeAncillaryFiles(self)
4463 class LUClusterActivateMasterIp(NoHooksLU):
4464 """Activate the master IP on the master node.
4467 def Exec(self, feedback_fn):
4468 """Activate the master IP.
4471 master_params = self.cfg.GetMasterNetworkParameters()
4472 ems = self.cfg.GetUseExternalMipScript()
4473 result = self.rpc.call_node_activate_master_ip(master_params.name,
4475 result.Raise("Could not activate the master IP")
4478 class LUClusterDeactivateMasterIp(NoHooksLU):
4479 """Deactivate the master IP on the master node.
4482 def Exec(self, feedback_fn):
4483 """Deactivate the master IP.
4486 master_params = self.cfg.GetMasterNetworkParameters()
4487 ems = self.cfg.GetUseExternalMipScript()
4488 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4490 result.Raise("Could not deactivate the master IP")
4493 def _WaitForSync(lu, instance, disks=None, oneshot=False):
4494 """Sleep and poll for an instance's disk to sync.
4497 if not instance.disks or disks is not None and not disks:
4500 disks = _ExpandCheckDisks(instance, disks)
4503 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
4505 node = instance.primary_node
4508 lu.cfg.SetDiskID(dev, node)
4510 # TODO: Convert to utils.Retry
4513 degr_retries = 10 # in seconds, as we sleep 1 second each time
4517 cumul_degraded = False
4518 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
4519 msg = rstats.fail_msg
4521 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
4524 raise errors.RemoteError("Can't contact node %s for mirror data,"
4525 " aborting." % node)
4528 rstats = rstats.payload
4530 for i, mstat in enumerate(rstats):
4532 lu.LogWarning("Can't compute data for node %s/%s",
4533 node, disks[i].iv_name)
4536 cumul_degraded = (cumul_degraded or
4537 (mstat.is_degraded and mstat.sync_percent is None))
4538 if mstat.sync_percent is not None:
4540 if mstat.estimated_time is not None:
4541 rem_time = ("%s remaining (estimated)" %
4542 utils.FormatSeconds(mstat.estimated_time))
4543 max_time = mstat.estimated_time
4545 rem_time = "no time estimate"
4546 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
4547 (disks[i].iv_name, mstat.sync_percent, rem_time))
4549 # if we're done but degraded, let's do a few small retries, to
4550 # make sure we see a stable and not transient situation; therefore
4551 # we force restart of the loop
4552 if (done or oneshot) and cumul_degraded and degr_retries > 0:
4553 logging.info("Degraded disks found, %d retries left", degr_retries)
4561 time.sleep(min(60, max_time))
4564 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
4565 return not cumul_degraded
4568 def _BlockdevFind(lu, node, dev, instance):
4569 """Wrapper around call_blockdev_find to annotate diskparams.
4571 @param lu: A reference to the lu object
4572 @param node: The node to call out
4573 @param dev: The device to find
4574 @param instance: The instance object the device belongs to
4575 @returns The result of the rpc call
4578 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4579 return lu.rpc.call_blockdev_find(node, disk)
4582 def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4583 """Wrapper around L{_CheckDiskConsistencyInner}.
4586 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4587 return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
4591 def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
4593 """Check that mirrors are not degraded.
4595 @attention: The device has to be annotated already.
4597 The ldisk parameter, if True, will change the test from the
4598 is_degraded attribute (which represents overall non-ok status for
4599 the device(s)) to the ldisk (representing the local storage status).
4602 lu.cfg.SetDiskID(dev, node)
4606 if on_primary or dev.AssembleOnSecondary():
4607 rstats = lu.rpc.call_blockdev_find(node, dev)
4608 msg = rstats.fail_msg
4610 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
4612 elif not rstats.payload:
4613 lu.LogWarning("Can't find disk on node %s", node)
4617 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
4619 result = result and not rstats.payload.is_degraded
4622 for child in dev.children:
4623 result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
4629 class LUOobCommand(NoHooksLU):
4630 """Logical unit for OOB handling.
4634 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
4636 def ExpandNames(self):
4637 """Gather locks we need.
4640 if self.op.node_names:
4641 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
4642 lock_names = self.op.node_names
4644 lock_names = locking.ALL_SET
4646 self.needed_locks = {
4647 locking.LEVEL_NODE: lock_names,
4650 def CheckPrereq(self):
4651 """Check prerequisites.
4654 - the node exists in the configuration
4657 Any errors are signaled by raising errors.OpPrereqError.
4661 self.master_node = self.cfg.GetMasterNode()
4663 assert self.op.power_delay >= 0.0
4665 if self.op.node_names:
4666 if (self.op.command in self._SKIP_MASTER and
4667 self.master_node in self.op.node_names):
4668 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
4669 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
4671 if master_oob_handler:
4672 additional_text = ("run '%s %s %s' if you want to operate on the"
4673 " master regardless") % (master_oob_handler,
4677 additional_text = "it does not support out-of-band operations"
4679 raise errors.OpPrereqError(("Operating on the master node %s is not"
4680 " allowed for %s; %s") %
4681 (self.master_node, self.op.command,
4682 additional_text), errors.ECODE_INVAL)
4684 self.op.node_names = self.cfg.GetNodeList()
4685 if self.op.command in self._SKIP_MASTER:
4686 self.op.node_names.remove(self.master_node)
4688 if self.op.command in self._SKIP_MASTER:
4689 assert self.master_node not in self.op.node_names
4691 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
4693 raise errors.OpPrereqError("Node %s not found" % node_name,
4696 self.nodes.append(node)
4698 if (not self.op.ignore_status and
4699 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
4700 raise errors.OpPrereqError(("Cannot power off node %s because it is"
4701 " not marked offline") % node_name,
4704 def Exec(self, feedback_fn):
4705 """Execute OOB and return result if we expect any.
4708 master_node = self.master_node
4711 for idx, node in enumerate(utils.NiceSort(self.nodes,
4712 key=lambda node: node.name)):
4713 node_entry = [(constants.RS_NORMAL, node.name)]
4714 ret.append(node_entry)
4716 oob_program = _SupportsOob(self.cfg, node)
4719 node_entry.append((constants.RS_UNAVAIL, None))
4722 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4723 self.op.command, oob_program, node.name)
4724 result = self.rpc.call_run_oob(master_node, oob_program,
4725 self.op.command, node.name,
4729 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4730 node.name, result.fail_msg)
4731 node_entry.append((constants.RS_NODATA, None))
4734 self._CheckPayload(result)
4735 except errors.OpExecError, err:
4736 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4738 node_entry.append((constants.RS_NODATA, None))
4740 if self.op.command == constants.OOB_HEALTH:
4741 # For health we should log important events
4742 for item, status in result.payload:
4743 if status in [constants.OOB_STATUS_WARNING,
4744 constants.OOB_STATUS_CRITICAL]:
4745 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4746 item, node.name, status)
4748 if self.op.command == constants.OOB_POWER_ON:
4750 elif self.op.command == constants.OOB_POWER_OFF:
4751 node.powered = False
4752 elif self.op.command == constants.OOB_POWER_STATUS:
4753 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4754 if powered != node.powered:
4755 logging.warning(("Recorded power state (%s) of node '%s' does not"
4756 " match actual power state (%s)"), node.powered,
4759 # For configuration changing commands we should update the node
4760 if self.op.command in (constants.OOB_POWER_ON,
4761 constants.OOB_POWER_OFF):
4762 self.cfg.Update(node, feedback_fn)
4764 node_entry.append((constants.RS_NORMAL, result.payload))
4766 if (self.op.command == constants.OOB_POWER_ON and
4767 idx < len(self.nodes) - 1):
4768 time.sleep(self.op.power_delay)
4772 def _CheckPayload(self, result):
4773 """Checks if the payload is valid.
4775 @param result: RPC result
4776 @raises errors.OpExecError: If payload is not valid
4780 if self.op.command == constants.OOB_HEALTH:
4781 if not isinstance(result.payload, list):
4782 errs.append("command 'health' is expected to return a list but got %s" %
4783 type(result.payload))
4785 for item, status in result.payload:
4786 if status not in constants.OOB_STATUSES:
4787 errs.append("health item '%s' has invalid status '%s'" %
4790 if self.op.command == constants.OOB_POWER_STATUS:
4791 if not isinstance(result.payload, dict):
4792 errs.append("power-status is expected to return a dict but got %s" %
4793 type(result.payload))
4795 if self.op.command in [
4796 constants.OOB_POWER_ON,
4797 constants.OOB_POWER_OFF,
4798 constants.OOB_POWER_CYCLE,
4800 if result.payload is not None:
4801 errs.append("%s is expected to not return payload but got '%s'" %
4802 (self.op.command, result.payload))
4805 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4806 utils.CommaJoin(errs))
4809 class _OsQuery(_QueryBase):
4810 FIELDS = query.OS_FIELDS
4812 def ExpandNames(self, lu):
4813 # Lock all nodes in shared mode
4814 # Temporary removal of locks, should be reverted later
4815 # TODO: reintroduce locks when they are lighter-weight
4816 lu.needed_locks = {}
4817 #self.share_locks[locking.LEVEL_NODE] = 1
4818 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4820 # The following variables interact with _QueryBase._GetNames
4822 self.wanted = self.names
4824 self.wanted = locking.ALL_SET
4826 self.do_locking = self.use_locking
4828 def DeclareLocks(self, lu, level):
4832 def _DiagnoseByOS(rlist):
4833 """Remaps a per-node return list into an a per-os per-node dictionary
4835 @param rlist: a map with node names as keys and OS objects as values
4838 @return: a dictionary with osnames as keys and as value another
4839 map, with nodes as keys and tuples of (path, status, diagnose,
4840 variants, parameters, api_versions) as values, eg::
4842 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4843 (/srv/..., False, "invalid api")],
4844 "node2": [(/srv/..., True, "", [], [])]}
4849 # we build here the list of nodes that didn't fail the RPC (at RPC
4850 # level), so that nodes with a non-responding node daemon don't
4851 # make all OSes invalid
4852 good_nodes = [node_name for node_name in rlist
4853 if not rlist[node_name].fail_msg]
4854 for node_name, nr in rlist.items():
4855 if nr.fail_msg or not nr.payload:
4857 for (name, path, status, diagnose, variants,
4858 params, api_versions) in nr.payload:
4859 if name not in all_os:
4860 # build a list of nodes for this os containing empty lists
4861 # for each node in node_list
4863 for nname in good_nodes:
4864 all_os[name][nname] = []
4865 # convert params from [name, help] to (name, help)
4866 params = [tuple(v) for v in params]
4867 all_os[name][node_name].append((path, status, diagnose,
4868 variants, params, api_versions))
4871 def _GetQueryData(self, lu):
4872 """Computes the list of nodes and their attributes.
4875 # Locking is not used
4876 assert not (compat.any(lu.glm.is_owned(level)
4877 for level in locking.LEVELS
4878 if level != locking.LEVEL_CLUSTER) or
4879 self.do_locking or self.use_locking)
4881 valid_nodes = [node.name
4882 for node in lu.cfg.GetAllNodesInfo().values()
4883 if not node.offline and node.vm_capable]
4884 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4885 cluster = lu.cfg.GetClusterInfo()
4889 for (os_name, os_data) in pol.items():
4890 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4891 hidden=(os_name in cluster.hidden_os),
4892 blacklisted=(os_name in cluster.blacklisted_os))
4896 api_versions = set()
4898 for idx, osl in enumerate(os_data.values()):
4899 info.valid = bool(info.valid and osl and osl[0][1])
4903 (node_variants, node_params, node_api) = osl[0][3:6]
4906 variants.update(node_variants)
4907 parameters.update(node_params)
4908 api_versions.update(node_api)
4910 # Filter out inconsistent values
4911 variants.intersection_update(node_variants)
4912 parameters.intersection_update(node_params)
4913 api_versions.intersection_update(node_api)
4915 info.variants = list(variants)
4916 info.parameters = list(parameters)
4917 info.api_versions = list(api_versions)
4919 data[os_name] = info
4921 # Prepare data in requested order
4922 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4926 class LUOsDiagnose(NoHooksLU):
4927 """Logical unit for OS diagnose/query.
4933 def _BuildFilter(fields, names):
4934 """Builds a filter for querying OSes.
4937 name_filter = qlang.MakeSimpleFilter("name", names)
4939 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4940 # respective field is not requested
4941 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4942 for fname in ["hidden", "blacklisted"]
4943 if fname not in fields]
4944 if "valid" not in fields:
4945 status_filter.append([qlang.OP_TRUE, "valid"])
4948 status_filter.insert(0, qlang.OP_AND)
4950 status_filter = None
4952 if name_filter and status_filter:
4953 return [qlang.OP_AND, name_filter, status_filter]
4957 return status_filter
4959 def CheckArguments(self):
4960 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4961 self.op.output_fields, False)
4963 def ExpandNames(self):
4964 self.oq.ExpandNames(self)
4966 def Exec(self, feedback_fn):
4967 return self.oq.OldStyleQuery(self)
4970 class LUNodeRemove(LogicalUnit):
4971 """Logical unit for removing a node.
4974 HPATH = "node-remove"
4975 HTYPE = constants.HTYPE_NODE
4977 def BuildHooksEnv(self):
4982 "OP_TARGET": self.op.node_name,
4983 "NODE_NAME": self.op.node_name,
4986 def BuildHooksNodes(self):
4987 """Build hooks nodes.
4989 This doesn't run on the target node in the pre phase as a failed
4990 node would then be impossible to remove.
4993 all_nodes = self.cfg.GetNodeList()
4995 all_nodes.remove(self.op.node_name)
4998 return (all_nodes, all_nodes)
5000 def CheckPrereq(self):
5001 """Check prerequisites.
5004 - the node exists in the configuration
5005 - it does not have primary or secondary instances
5006 - it's not the master
5008 Any errors are signaled by raising errors.OpPrereqError.
5011 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5012 node = self.cfg.GetNodeInfo(self.op.node_name)
5013 assert node is not None
5015 masternode = self.cfg.GetMasterNode()
5016 if node.name == masternode:
5017 raise errors.OpPrereqError("Node is the master node, failover to another"
5018 " node is required", errors.ECODE_INVAL)
5020 for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
5021 if node.name in instance.all_nodes:
5022 raise errors.OpPrereqError("Instance %s is still running on the node,"
5023 " please remove first" % instance_name,
5025 self.op.node_name = node.name
5028 def Exec(self, feedback_fn):
5029 """Removes the node from the cluster.
5033 logging.info("Stopping the node daemon and removing configs from node %s",
5036 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
5038 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5041 # Promote nodes to master candidate as needed
5042 _AdjustCandidatePool(self, exceptions=[node.name])
5043 self.context.RemoveNode(node.name)
5045 # Run post hooks on the node before it's removed
5046 _RunPostHook(self, node.name)
5048 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
5049 msg = result.fail_msg
5051 self.LogWarning("Errors encountered on the remote node while leaving"
5052 " the cluster: %s", msg)
5054 # Remove node from our /etc/hosts
5055 if self.cfg.GetClusterInfo().modify_etc_hosts:
5056 master_node = self.cfg.GetMasterNode()
5057 result = self.rpc.call_etc_hosts_modify(master_node,
5058 constants.ETC_HOSTS_REMOVE,
5060 result.Raise("Can't update hosts file with new host data")
5061 _RedistributeAncillaryFiles(self)
5064 class _NodeQuery(_QueryBase):
5065 FIELDS = query.NODE_FIELDS
5067 def ExpandNames(self, lu):
5068 lu.needed_locks = {}
5069 lu.share_locks = _ShareAll()
5072 self.wanted = _GetWantedNodes(lu, self.names)
5074 self.wanted = locking.ALL_SET
5076 self.do_locking = (self.use_locking and
5077 query.NQ_LIVE in self.requested_data)
5080 # If any non-static field is requested we need to lock the nodes
5081 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
5083 def DeclareLocks(self, lu, level):
5086 def _GetQueryData(self, lu):
5087 """Computes the list of nodes and their attributes.
5090 all_info = lu.cfg.GetAllNodesInfo()
5092 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
5094 # Gather data as requested
5095 if query.NQ_LIVE in self.requested_data:
5096 # filter out non-vm_capable nodes
5097 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
5099 node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
5100 [lu.cfg.GetHypervisorType()])
5101 live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
5102 for (name, nresult) in node_data.items()
5103 if not nresult.fail_msg and nresult.payload)
5107 if query.NQ_INST in self.requested_data:
5108 node_to_primary = dict([(name, set()) for name in nodenames])
5109 node_to_secondary = dict([(name, set()) for name in nodenames])
5111 inst_data = lu.cfg.GetAllInstancesInfo()
5113 for inst in inst_data.values():
5114 if inst.primary_node in node_to_primary:
5115 node_to_primary[inst.primary_node].add(inst.name)
5116 for secnode in inst.secondary_nodes:
5117 if secnode in node_to_secondary:
5118 node_to_secondary[secnode].add(inst.name)
5120 node_to_primary = None
5121 node_to_secondary = None
5123 if query.NQ_OOB in self.requested_data:
5124 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
5125 for name, node in all_info.iteritems())
5129 if query.NQ_GROUP in self.requested_data:
5130 groups = lu.cfg.GetAllNodeGroupsInfo()
5134 return query.NodeQueryData([all_info[name] for name in nodenames],
5135 live_data, lu.cfg.GetMasterNode(),
5136 node_to_primary, node_to_secondary, groups,
5137 oob_support, lu.cfg.GetClusterInfo())
5140 class LUNodeQuery(NoHooksLU):
5141 """Logical unit for querying nodes.
5144 # pylint: disable=W0142
5147 def CheckArguments(self):
5148 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
5149 self.op.output_fields, self.op.use_locking)
5151 def ExpandNames(self):
5152 self.nq.ExpandNames(self)
5154 def DeclareLocks(self, level):
5155 self.nq.DeclareLocks(self, level)
5157 def Exec(self, feedback_fn):
5158 return self.nq.OldStyleQuery(self)
5161 class LUNodeQueryvols(NoHooksLU):
5162 """Logical unit for getting volumes on node(s).
5166 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
5167 _FIELDS_STATIC = utils.FieldSet("node")
5169 def CheckArguments(self):
5170 _CheckOutputFields(static=self._FIELDS_STATIC,
5171 dynamic=self._FIELDS_DYNAMIC,
5172 selected=self.op.output_fields)
5174 def ExpandNames(self):
5175 self.share_locks = _ShareAll()
5176 self.needed_locks = {}
5178 if not self.op.nodes:
5179 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5181 self.needed_locks[locking.LEVEL_NODE] = \
5182 _GetWantedNodes(self, self.op.nodes)
5184 def Exec(self, feedback_fn):
5185 """Computes the list of nodes and their attributes.
5188 nodenames = self.owned_locks(locking.LEVEL_NODE)
5189 volumes = self.rpc.call_node_volumes(nodenames)
5191 ilist = self.cfg.GetAllInstancesInfo()
5192 vol2inst = _MapInstanceDisksToNodes(ilist.values())
5195 for node in nodenames:
5196 nresult = volumes[node]
5199 msg = nresult.fail_msg
5201 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
5204 node_vols = sorted(nresult.payload,
5205 key=operator.itemgetter("dev"))
5207 for vol in node_vols:
5209 for field in self.op.output_fields:
5212 elif field == "phys":
5216 elif field == "name":
5218 elif field == "size":
5219 val = int(float(vol["size"]))
5220 elif field == "instance":
5221 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
5223 raise errors.ParameterError(field)
5224 node_output.append(str(val))
5226 output.append(node_output)
5231 class LUNodeQueryStorage(NoHooksLU):
5232 """Logical unit for getting information on storage units on node(s).
5235 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
5238 def CheckArguments(self):
5239 _CheckOutputFields(static=self._FIELDS_STATIC,
5240 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
5241 selected=self.op.output_fields)
5243 def ExpandNames(self):
5244 self.share_locks = _ShareAll()
5245 self.needed_locks = {}
5248 self.needed_locks[locking.LEVEL_NODE] = \
5249 _GetWantedNodes(self, self.op.nodes)
5251 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5253 def Exec(self, feedback_fn):
5254 """Computes the list of nodes and their attributes.
5257 self.nodes = self.owned_locks(locking.LEVEL_NODE)
5259 # Always get name to sort by
5260 if constants.SF_NAME in self.op.output_fields:
5261 fields = self.op.output_fields[:]
5263 fields = [constants.SF_NAME] + self.op.output_fields
5265 # Never ask for node or type as it's only known to the LU
5266 for extra in [constants.SF_NODE, constants.SF_TYPE]:
5267 while extra in fields:
5268 fields.remove(extra)
5270 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
5271 name_idx = field_idx[constants.SF_NAME]
5273 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5274 data = self.rpc.call_storage_list(self.nodes,
5275 self.op.storage_type, st_args,
5276 self.op.name, fields)
5280 for node in utils.NiceSort(self.nodes):
5281 nresult = data[node]
5285 msg = nresult.fail_msg
5287 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
5290 rows = dict([(row[name_idx], row) for row in nresult.payload])
5292 for name in utils.NiceSort(rows.keys()):
5297 for field in self.op.output_fields:
5298 if field == constants.SF_NODE:
5300 elif field == constants.SF_TYPE:
5301 val = self.op.storage_type
5302 elif field in field_idx:
5303 val = row[field_idx[field]]
5305 raise errors.ParameterError(field)
5314 class _InstanceQuery(_QueryBase):
5315 FIELDS = query.INSTANCE_FIELDS
5317 def ExpandNames(self, lu):
5318 lu.needed_locks = {}
5319 lu.share_locks = _ShareAll()
5322 self.wanted = _GetWantedInstances(lu, self.names)
5324 self.wanted = locking.ALL_SET
5326 self.do_locking = (self.use_locking and
5327 query.IQ_LIVE in self.requested_data)
5329 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5330 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
5331 lu.needed_locks[locking.LEVEL_NODE] = []
5332 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5334 self.do_grouplocks = (self.do_locking and
5335 query.IQ_NODES in self.requested_data)
5337 def DeclareLocks(self, lu, level):
5339 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
5340 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
5342 # Lock all groups used by instances optimistically; this requires going
5343 # via the node before it's locked, requiring verification later on
5344 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
5346 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
5347 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
5348 elif level == locking.LEVEL_NODE:
5349 lu._LockInstancesNodes() # pylint: disable=W0212
5352 def _CheckGroupLocks(lu):
5353 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
5354 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
5356 # Check if node groups for locked instances are still correct
5357 for instance_name in owned_instances:
5358 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
5360 def _GetQueryData(self, lu):
5361 """Computes the list of instances and their attributes.
5364 if self.do_grouplocks:
5365 self._CheckGroupLocks(lu)
5367 cluster = lu.cfg.GetClusterInfo()
5368 all_info = lu.cfg.GetAllInstancesInfo()
5370 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
5372 instance_list = [all_info[name] for name in instance_names]
5373 nodes = frozenset(itertools.chain(*(inst.all_nodes
5374 for inst in instance_list)))
5375 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5378 wrongnode_inst = set()
5380 # Gather data as requested
5381 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
5383 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
5385 result = node_data[name]
5387 # offline nodes will be in both lists
5388 assert result.fail_msg
5389 offline_nodes.append(name)
5391 bad_nodes.append(name)
5392 elif result.payload:
5393 for inst in result.payload:
5394 if inst in all_info:
5395 if all_info[inst].primary_node == name:
5396 live_data.update(result.payload)
5398 wrongnode_inst.add(inst)
5400 # orphan instance; we don't list it here as we don't
5401 # handle this case yet in the output of instance listing
5402 logging.warning("Orphan instance '%s' found on node %s",
5404 # else no instance is alive
5408 if query.IQ_DISKUSAGE in self.requested_data:
5409 disk_usage = dict((inst.name,
5410 _ComputeDiskSize(inst.disk_template,
5411 [{constants.IDISK_SIZE: disk.size}
5412 for disk in inst.disks]))
5413 for inst in instance_list)
5417 if query.IQ_CONSOLE in self.requested_data:
5419 for inst in instance_list:
5420 if inst.name in live_data:
5421 # Instance is running
5422 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
5424 consinfo[inst.name] = None
5425 assert set(consinfo.keys()) == set(instance_names)
5429 if query.IQ_NODES in self.requested_data:
5430 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
5432 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
5433 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
5434 for uuid in set(map(operator.attrgetter("group"),
5440 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
5441 disk_usage, offline_nodes, bad_nodes,
5442 live_data, wrongnode_inst, consinfo,
5446 class LUQuery(NoHooksLU):
5447 """Query for resources/items of a certain kind.
5450 # pylint: disable=W0142
5453 def CheckArguments(self):
5454 qcls = _GetQueryImplementation(self.op.what)
5456 self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
5458 def ExpandNames(self):
5459 self.impl.ExpandNames(self)
5461 def DeclareLocks(self, level):
5462 self.impl.DeclareLocks(self, level)
5464 def Exec(self, feedback_fn):
5465 return self.impl.NewStyleQuery(self)
5468 class LUQueryFields(NoHooksLU):
5469 """Query for resources/items of a certain kind.
5472 # pylint: disable=W0142
5475 def CheckArguments(self):
5476 self.qcls = _GetQueryImplementation(self.op.what)
5478 def ExpandNames(self):
5479 self.needed_locks = {}
5481 def Exec(self, feedback_fn):
5482 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
5485 class LUNodeModifyStorage(NoHooksLU):
5486 """Logical unit for modifying a storage volume on a node.
5491 def CheckArguments(self):
5492 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5494 storage_type = self.op.storage_type
5497 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
5499 raise errors.OpPrereqError("Storage units of type '%s' can not be"
5500 " modified" % storage_type,
5503 diff = set(self.op.changes.keys()) - modifiable
5505 raise errors.OpPrereqError("The following fields can not be modified for"
5506 " storage units of type '%s': %r" %
5507 (storage_type, list(diff)),
5510 def ExpandNames(self):
5511 self.needed_locks = {
5512 locking.LEVEL_NODE: self.op.node_name,
5515 def Exec(self, feedback_fn):
5516 """Computes the list of nodes and their attributes.
5519 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5520 result = self.rpc.call_storage_modify(self.op.node_name,
5521 self.op.storage_type, st_args,
5522 self.op.name, self.op.changes)
5523 result.Raise("Failed to modify storage unit '%s' on %s" %
5524 (self.op.name, self.op.node_name))
5527 class LUNodeAdd(LogicalUnit):
5528 """Logical unit for adding node to the cluster.
5532 HTYPE = constants.HTYPE_NODE
5533 _NFLAGS = ["master_capable", "vm_capable"]
5535 def CheckArguments(self):
5536 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
5537 # validate/normalize the node name
5538 self.hostname = netutils.GetHostname(name=self.op.node_name,
5539 family=self.primary_ip_family)
5540 self.op.node_name = self.hostname.name
5542 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
5543 raise errors.OpPrereqError("Cannot readd the master node",
5546 if self.op.readd and self.op.group:
5547 raise errors.OpPrereqError("Cannot pass a node group when a node is"
5548 " being readded", errors.ECODE_INVAL)
5550 def BuildHooksEnv(self):
5553 This will run on all nodes before, and on all nodes + the new node after.
5557 "OP_TARGET": self.op.node_name,
5558 "NODE_NAME": self.op.node_name,
5559 "NODE_PIP": self.op.primary_ip,
5560 "NODE_SIP": self.op.secondary_ip,
5561 "MASTER_CAPABLE": str(self.op.master_capable),
5562 "VM_CAPABLE": str(self.op.vm_capable),
5565 def BuildHooksNodes(self):
5566 """Build hooks nodes.
5569 # Exclude added node
5570 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
5571 post_nodes = pre_nodes + [self.op.node_name, ]
5573 return (pre_nodes, post_nodes)
5575 def CheckPrereq(self):
5576 """Check prerequisites.
5579 - the new node is not already in the config
5581 - its parameters (single/dual homed) matches the cluster
5583 Any errors are signaled by raising errors.OpPrereqError.
5587 hostname = self.hostname
5588 node = hostname.name
5589 primary_ip = self.op.primary_ip = hostname.ip
5590 if self.op.secondary_ip is None:
5591 if self.primary_ip_family == netutils.IP6Address.family:
5592 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
5593 " IPv4 address must be given as secondary",
5595 self.op.secondary_ip = primary_ip
5597 secondary_ip = self.op.secondary_ip
5598 if not netutils.IP4Address.IsValid(secondary_ip):
5599 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5600 " address" % secondary_ip, errors.ECODE_INVAL)
5602 node_list = cfg.GetNodeList()
5603 if not self.op.readd and node in node_list:
5604 raise errors.OpPrereqError("Node %s is already in the configuration" %
5605 node, errors.ECODE_EXISTS)
5606 elif self.op.readd and node not in node_list:
5607 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
5610 self.changed_primary_ip = False
5612 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
5613 if self.op.readd and node == existing_node_name:
5614 if existing_node.secondary_ip != secondary_ip:
5615 raise errors.OpPrereqError("Readded node doesn't have the same IP"
5616 " address configuration as before",
5618 if existing_node.primary_ip != primary_ip:
5619 self.changed_primary_ip = True
5623 if (existing_node.primary_ip == primary_ip or
5624 existing_node.secondary_ip == primary_ip or
5625 existing_node.primary_ip == secondary_ip or
5626 existing_node.secondary_ip == secondary_ip):
5627 raise errors.OpPrereqError("New node ip address(es) conflict with"
5628 " existing node %s" % existing_node.name,
5629 errors.ECODE_NOTUNIQUE)
5631 # After this 'if' block, None is no longer a valid value for the
5632 # _capable op attributes
5634 old_node = self.cfg.GetNodeInfo(node)
5635 assert old_node is not None, "Can't retrieve locked node %s" % node
5636 for attr in self._NFLAGS:
5637 if getattr(self.op, attr) is None:
5638 setattr(self.op, attr, getattr(old_node, attr))
5640 for attr in self._NFLAGS:
5641 if getattr(self.op, attr) is None:
5642 setattr(self.op, attr, True)
5644 if self.op.readd and not self.op.vm_capable:
5645 pri, sec = cfg.GetNodeInstances(node)
5647 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
5648 " flag set to false, but it already holds"
5649 " instances" % node,
5652 # check that the type of the node (single versus dual homed) is the
5653 # same as for the master
5654 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
5655 master_singlehomed = myself.secondary_ip == myself.primary_ip
5656 newbie_singlehomed = secondary_ip == primary_ip
5657 if master_singlehomed != newbie_singlehomed:
5658 if master_singlehomed:
5659 raise errors.OpPrereqError("The master has no secondary ip but the"
5660 " new node has one",
5663 raise errors.OpPrereqError("The master has a secondary ip but the"
5664 " new node doesn't have one",
5667 # checks reachability
5668 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
5669 raise errors.OpPrereqError("Node not reachable by ping",
5670 errors.ECODE_ENVIRON)
5672 if not newbie_singlehomed:
5673 # check reachability from my secondary ip to newbie's secondary ip
5674 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5675 source=myself.secondary_ip):
5676 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5677 " based ping to node daemon port",
5678 errors.ECODE_ENVIRON)
5685 if self.op.master_capable:
5686 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
5688 self.master_candidate = False
5691 self.new_node = old_node
5693 node_group = cfg.LookupNodeGroup(self.op.group)
5694 self.new_node = objects.Node(name=node,
5695 primary_ip=primary_ip,
5696 secondary_ip=secondary_ip,
5697 master_candidate=self.master_candidate,
5698 offline=False, drained=False,
5701 if self.op.ndparams:
5702 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
5704 if self.op.hv_state:
5705 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
5707 if self.op.disk_state:
5708 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
5710 # TODO: If we need to have multiple DnsOnlyRunner we probably should make
5711 # it a property on the base class.
5712 result = rpc.DnsOnlyRunner().call_version([node])[node]
5713 result.Raise("Can't get version information from node %s" % node)
5714 if constants.PROTOCOL_VERSION == result.payload:
5715 logging.info("Communication to node %s fine, sw version %s match",
5716 node, result.payload)
5718 raise errors.OpPrereqError("Version mismatch master version %s,"
5719 " node version %s" %
5720 (constants.PROTOCOL_VERSION, result.payload),
5721 errors.ECODE_ENVIRON)
5723 def Exec(self, feedback_fn):
5724 """Adds the new node to the cluster.
5727 new_node = self.new_node
5728 node = new_node.name
5730 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5733 # We adding a new node so we assume it's powered
5734 new_node.powered = True
5736 # for re-adds, reset the offline/drained/master-candidate flags;
5737 # we need to reset here, otherwise offline would prevent RPC calls
5738 # later in the procedure; this also means that if the re-add
5739 # fails, we are left with a non-offlined, broken node
5741 new_node.drained = new_node.offline = False # pylint: disable=W0201
5742 self.LogInfo("Readding a node, the offline/drained flags were reset")
5743 # if we demote the node, we do cleanup later in the procedure
5744 new_node.master_candidate = self.master_candidate
5745 if self.changed_primary_ip:
5746 new_node.primary_ip = self.op.primary_ip
5748 # copy the master/vm_capable flags
5749 for attr in self._NFLAGS:
5750 setattr(new_node, attr, getattr(self.op, attr))
5752 # notify the user about any possible mc promotion
5753 if new_node.master_candidate:
5754 self.LogInfo("Node will be a master candidate")
5756 if self.op.ndparams:
5757 new_node.ndparams = self.op.ndparams
5759 new_node.ndparams = {}
5761 if self.op.hv_state:
5762 new_node.hv_state_static = self.new_hv_state
5764 if self.op.disk_state:
5765 new_node.disk_state_static = self.new_disk_state
5767 # Add node to our /etc/hosts, and add key to known_hosts
5768 if self.cfg.GetClusterInfo().modify_etc_hosts:
5769 master_node = self.cfg.GetMasterNode()
5770 result = self.rpc.call_etc_hosts_modify(master_node,
5771 constants.ETC_HOSTS_ADD,
5774 result.Raise("Can't update hosts file with new host data")
5776 if new_node.secondary_ip != new_node.primary_ip:
5777 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5780 node_verify_list = [self.cfg.GetMasterNode()]
5781 node_verify_param = {
5782 constants.NV_NODELIST: ([node], {}),
5783 # TODO: do a node-net-test as well?
5786 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5787 self.cfg.GetClusterName())
5788 for verifier in node_verify_list:
5789 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5790 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5792 for failed in nl_payload:
5793 feedback_fn("ssh/hostname verification failed"
5794 " (checking from %s): %s" %
5795 (verifier, nl_payload[failed]))
5796 raise errors.OpExecError("ssh/hostname verification failed")
5799 _RedistributeAncillaryFiles(self)
5800 self.context.ReaddNode(new_node)
5801 # make sure we redistribute the config
5802 self.cfg.Update(new_node, feedback_fn)
5803 # and make sure the new node will not have old files around
5804 if not new_node.master_candidate:
5805 result = self.rpc.call_node_demote_from_mc(new_node.name)
5806 msg = result.fail_msg
5808 self.LogWarning("Node failed to demote itself from master"
5809 " candidate status: %s" % msg)
5811 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5812 additional_vm=self.op.vm_capable)
5813 self.context.AddNode(new_node, self.proc.GetECId())
5816 class LUNodeSetParams(LogicalUnit):
5817 """Modifies the parameters of a node.
5819 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5820 to the node role (as _ROLE_*)
5821 @cvar _R2F: a dictionary from node role to tuples of flags
5822 @cvar _FLAGS: a list of attribute names corresponding to the flags
5825 HPATH = "node-modify"
5826 HTYPE = constants.HTYPE_NODE
5828 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5830 (True, False, False): _ROLE_CANDIDATE,
5831 (False, True, False): _ROLE_DRAINED,
5832 (False, False, True): _ROLE_OFFLINE,
5833 (False, False, False): _ROLE_REGULAR,
5835 _R2F = dict((v, k) for k, v in _F2R.items())
5836 _FLAGS = ["master_candidate", "drained", "offline"]
5838 def CheckArguments(self):
5839 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5840 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5841 self.op.master_capable, self.op.vm_capable,
5842 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
5844 if all_mods.count(None) == len(all_mods):
5845 raise errors.OpPrereqError("Please pass at least one modification",
5847 if all_mods.count(True) > 1:
5848 raise errors.OpPrereqError("Can't set the node into more than one"
5849 " state at the same time",
5852 # Boolean value that tells us whether we might be demoting from MC
5853 self.might_demote = (self.op.master_candidate == False or
5854 self.op.offline == True or
5855 self.op.drained == True or
5856 self.op.master_capable == False)
5858 if self.op.secondary_ip:
5859 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5860 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5861 " address" % self.op.secondary_ip,
5864 self.lock_all = self.op.auto_promote and self.might_demote
5865 self.lock_instances = self.op.secondary_ip is not None
5867 def _InstanceFilter(self, instance):
5868 """Filter for getting affected instances.
5871 return (instance.disk_template in constants.DTS_INT_MIRROR and
5872 self.op.node_name in instance.all_nodes)
5874 def ExpandNames(self):
5876 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5878 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5880 # Since modifying a node can have severe effects on currently running
5881 # operations the resource lock is at least acquired in shared mode
5882 self.needed_locks[locking.LEVEL_NODE_RES] = \
5883 self.needed_locks[locking.LEVEL_NODE]
5885 # Get node resource and instance locks in shared mode; they are not used
5886 # for anything but read-only access
5887 self.share_locks[locking.LEVEL_NODE_RES] = 1
5888 self.share_locks[locking.LEVEL_INSTANCE] = 1
5890 if self.lock_instances:
5891 self.needed_locks[locking.LEVEL_INSTANCE] = \
5892 frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
5894 def BuildHooksEnv(self):
5897 This runs on the master node.
5901 "OP_TARGET": self.op.node_name,
5902 "MASTER_CANDIDATE": str(self.op.master_candidate),
5903 "OFFLINE": str(self.op.offline),
5904 "DRAINED": str(self.op.drained),
5905 "MASTER_CAPABLE": str(self.op.master_capable),
5906 "VM_CAPABLE": str(self.op.vm_capable),
5909 def BuildHooksNodes(self):
5910 """Build hooks nodes.
5913 nl = [self.cfg.GetMasterNode(), self.op.node_name]
5916 def CheckPrereq(self):
5917 """Check prerequisites.
5919 This only checks the instance list against the existing names.
5922 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5924 if self.lock_instances:
5925 affected_instances = \
5926 self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
5928 # Verify instance locks
5929 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
5930 wanted_instances = frozenset(affected_instances.keys())
5931 if wanted_instances - owned_instances:
5932 raise errors.OpPrereqError("Instances affected by changing node %s's"
5933 " secondary IP address have changed since"
5934 " locks were acquired, wanted '%s', have"
5935 " '%s'; retry the operation" %
5937 utils.CommaJoin(wanted_instances),
5938 utils.CommaJoin(owned_instances)),
5941 affected_instances = None
5943 if (self.op.master_candidate is not None or
5944 self.op.drained is not None or
5945 self.op.offline is not None):
5946 # we can't change the master's node flags
5947 if self.op.node_name == self.cfg.GetMasterNode():
5948 raise errors.OpPrereqError("The master role can be changed"
5949 " only via master-failover",
5952 if self.op.master_candidate and not node.master_capable:
5953 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5954 " it a master candidate" % node.name,
5957 if self.op.vm_capable == False:
5958 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5960 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5961 " the vm_capable flag" % node.name,
5964 if node.master_candidate and self.might_demote and not self.lock_all:
5965 assert not self.op.auto_promote, "auto_promote set but lock_all not"
5966 # check if after removing the current node, we're missing master
5968 (mc_remaining, mc_should, _) = \
5969 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5970 if mc_remaining < mc_should:
5971 raise errors.OpPrereqError("Not enough master candidates, please"
5972 " pass auto promote option to allow"
5973 " promotion (--auto-promote or RAPI"
5974 " auto_promote=True)", errors.ECODE_STATE)
5976 self.old_flags = old_flags = (node.master_candidate,
5977 node.drained, node.offline)
5978 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5979 self.old_role = old_role = self._F2R[old_flags]
5981 # Check for ineffective changes
5982 for attr in self._FLAGS:
5983 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5984 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5985 setattr(self.op, attr, None)
5987 # Past this point, any flag change to False means a transition
5988 # away from the respective state, as only real changes are kept
5990 # TODO: We might query the real power state if it supports OOB
5991 if _SupportsOob(self.cfg, node):
5992 if self.op.offline is False and not (node.powered or
5993 self.op.powered == True):
5994 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5995 " offline status can be reset") %
5997 elif self.op.powered is not None:
5998 raise errors.OpPrereqError(("Unable to change powered state for node %s"
5999 " as it does not support out-of-band"
6000 " handling") % self.op.node_name)
6002 # If we're being deofflined/drained, we'll MC ourself if needed
6003 if (self.op.drained == False or self.op.offline == False or
6004 (self.op.master_capable and not node.master_capable)):
6005 if _DecideSelfPromotion(self):
6006 self.op.master_candidate = True
6007 self.LogInfo("Auto-promoting node to master candidate")
6009 # If we're no longer master capable, we'll demote ourselves from MC
6010 if self.op.master_capable == False and node.master_candidate:
6011 self.LogInfo("Demoting from master candidate")
6012 self.op.master_candidate = False
6015 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
6016 if self.op.master_candidate:
6017 new_role = self._ROLE_CANDIDATE
6018 elif self.op.drained:
6019 new_role = self._ROLE_DRAINED
6020 elif self.op.offline:
6021 new_role = self._ROLE_OFFLINE
6022 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
6023 # False is still in new flags, which means we're un-setting (the
6025 new_role = self._ROLE_REGULAR
6026 else: # no new flags, nothing, keep old role
6029 self.new_role = new_role
6031 if old_role == self._ROLE_OFFLINE and new_role != old_role:
6032 # Trying to transition out of offline status
6033 result = self.rpc.call_version([node.name])[node.name]
6035 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
6036 " to report its version: %s" %
6037 (node.name, result.fail_msg),
6040 self.LogWarning("Transitioning node from offline to online state"
6041 " without using re-add. Please make sure the node"
6044 if self.op.secondary_ip:
6045 # Ok even without locking, because this can't be changed by any LU
6046 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
6047 master_singlehomed = master.secondary_ip == master.primary_ip
6048 if master_singlehomed and self.op.secondary_ip:
6049 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
6050 " homed cluster", errors.ECODE_INVAL)
6052 assert not (frozenset(affected_instances) -
6053 self.owned_locks(locking.LEVEL_INSTANCE))
6056 if affected_instances:
6057 raise errors.OpPrereqError("Cannot change secondary IP address:"
6058 " offline node has instances (%s)"
6059 " configured to use it" %
6060 utils.CommaJoin(affected_instances.keys()))
6062 # On online nodes, check that no instances are running, and that
6063 # the node has the new ip and we can reach it.
6064 for instance in affected_instances.values():
6065 _CheckInstanceState(self, instance, INSTANCE_DOWN,
6066 msg="cannot change secondary ip")
6068 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
6069 if master.name != node.name:
6070 # check reachability from master secondary ip to new secondary ip
6071 if not netutils.TcpPing(self.op.secondary_ip,
6072 constants.DEFAULT_NODED_PORT,
6073 source=master.secondary_ip):
6074 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
6075 " based ping to node daemon port",
6076 errors.ECODE_ENVIRON)
6078 if self.op.ndparams:
6079 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
6080 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
6081 self.new_ndparams = new_ndparams
6083 if self.op.hv_state:
6084 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
6085 self.node.hv_state_static)
6087 if self.op.disk_state:
6088 self.new_disk_state = \
6089 _MergeAndVerifyDiskState(self.op.disk_state,
6090 self.node.disk_state_static)
6092 def Exec(self, feedback_fn):
6097 old_role = self.old_role
6098 new_role = self.new_role
6102 if self.op.ndparams:
6103 node.ndparams = self.new_ndparams
6105 if self.op.powered is not None:
6106 node.powered = self.op.powered
6108 if self.op.hv_state:
6109 node.hv_state_static = self.new_hv_state
6111 if self.op.disk_state:
6112 node.disk_state_static = self.new_disk_state
6114 for attr in ["master_capable", "vm_capable"]:
6115 val = getattr(self.op, attr)
6117 setattr(node, attr, val)
6118 result.append((attr, str(val)))
6120 if new_role != old_role:
6121 # Tell the node to demote itself, if no longer MC and not offline
6122 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
6123 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
6125 self.LogWarning("Node failed to demote itself: %s", msg)
6127 new_flags = self._R2F[new_role]
6128 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
6130 result.append((desc, str(nf)))
6131 (node.master_candidate, node.drained, node.offline) = new_flags
6133 # we locked all nodes, we adjust the CP before updating this node
6135 _AdjustCandidatePool(self, [node.name])
6137 if self.op.secondary_ip:
6138 node.secondary_ip = self.op.secondary_ip
6139 result.append(("secondary_ip", self.op.secondary_ip))
6141 # this will trigger configuration file update, if needed
6142 self.cfg.Update(node, feedback_fn)
6144 # this will trigger job queue propagation or cleanup if the mc
6146 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
6147 self.context.ReaddNode(node)
6152 class LUNodePowercycle(NoHooksLU):
6153 """Powercycles a node.
6158 def CheckArguments(self):
6159 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6160 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
6161 raise errors.OpPrereqError("The node is the master and the force"
6162 " parameter was not set",
6165 def ExpandNames(self):
6166 """Locking for PowercycleNode.
6168 This is a last-resort option and shouldn't block on other
6169 jobs. Therefore, we grab no locks.
6172 self.needed_locks = {}
6174 def Exec(self, feedback_fn):
6178 result = self.rpc.call_node_powercycle(self.op.node_name,
6179 self.cfg.GetHypervisorType())
6180 result.Raise("Failed to schedule the reboot")
6181 return result.payload
6184 class LUClusterQuery(NoHooksLU):
6185 """Query cluster configuration.
6190 def ExpandNames(self):
6191 self.needed_locks = {}
6193 def Exec(self, feedback_fn):
6194 """Return cluster config.
6197 cluster = self.cfg.GetClusterInfo()
6200 # Filter just for enabled hypervisors
6201 for os_name, hv_dict in cluster.os_hvp.items():
6202 os_hvp[os_name] = {}
6203 for hv_name, hv_params in hv_dict.items():
6204 if hv_name in cluster.enabled_hypervisors:
6205 os_hvp[os_name][hv_name] = hv_params
6207 # Convert ip_family to ip_version
6208 primary_ip_version = constants.IP4_VERSION
6209 if cluster.primary_ip_family == netutils.IP6Address.family:
6210 primary_ip_version = constants.IP6_VERSION
6213 "software_version": constants.RELEASE_VERSION,
6214 "protocol_version": constants.PROTOCOL_VERSION,
6215 "config_version": constants.CONFIG_VERSION,
6216 "os_api_version": max(constants.OS_API_VERSIONS),
6217 "export_version": constants.EXPORT_VERSION,
6218 "architecture": runtime.GetArchInfo(),
6219 "name": cluster.cluster_name,
6220 "master": cluster.master_node,
6221 "default_hypervisor": cluster.primary_hypervisor,
6222 "enabled_hypervisors": cluster.enabled_hypervisors,
6223 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
6224 for hypervisor_name in cluster.enabled_hypervisors]),
6226 "beparams": cluster.beparams,
6227 "osparams": cluster.osparams,
6228 "ipolicy": cluster.ipolicy,
6229 "nicparams": cluster.nicparams,
6230 "ndparams": cluster.ndparams,
6231 "diskparams": cluster.diskparams,
6232 "candidate_pool_size": cluster.candidate_pool_size,
6233 "master_netdev": cluster.master_netdev,
6234 "master_netmask": cluster.master_netmask,
6235 "use_external_mip_script": cluster.use_external_mip_script,
6236 "volume_group_name": cluster.volume_group_name,
6237 "drbd_usermode_helper": cluster.drbd_usermode_helper,
6238 "file_storage_dir": cluster.file_storage_dir,
6239 "shared_file_storage_dir": cluster.shared_file_storage_dir,
6240 "maintain_node_health": cluster.maintain_node_health,
6241 "ctime": cluster.ctime,
6242 "mtime": cluster.mtime,
6243 "uuid": cluster.uuid,
6244 "tags": list(cluster.GetTags()),
6245 "uid_pool": cluster.uid_pool,
6246 "default_iallocator": cluster.default_iallocator,
6247 "reserved_lvs": cluster.reserved_lvs,
6248 "primary_ip_version": primary_ip_version,
6249 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
6250 "hidden_os": cluster.hidden_os,
6251 "blacklisted_os": cluster.blacklisted_os,
6257 class LUClusterConfigQuery(NoHooksLU):
6258 """Return configuration values.
6263 def CheckArguments(self):
6264 self.cq = _ClusterQuery(None, self.op.output_fields, False)
6266 def ExpandNames(self):
6267 self.cq.ExpandNames(self)
6269 def DeclareLocks(self, level):
6270 self.cq.DeclareLocks(self, level)
6272 def Exec(self, feedback_fn):
6273 result = self.cq.OldStyleQuery(self)
6275 assert len(result) == 1
6280 class _ClusterQuery(_QueryBase):
6281 FIELDS = query.CLUSTER_FIELDS
6283 #: Do not sort (there is only one item)
6286 def ExpandNames(self, lu):
6287 lu.needed_locks = {}
6289 # The following variables interact with _QueryBase._GetNames
6290 self.wanted = locking.ALL_SET
6291 self.do_locking = self.use_locking
6294 raise errors.OpPrereqError("Can not use locking for cluster queries",
6297 def DeclareLocks(self, lu, level):
6300 def _GetQueryData(self, lu):
6301 """Computes the list of nodes and their attributes.
6304 # Locking is not used
6305 assert not (compat.any(lu.glm.is_owned(level)
6306 for level in locking.LEVELS
6307 if level != locking.LEVEL_CLUSTER) or
6308 self.do_locking or self.use_locking)
6310 if query.CQ_CONFIG in self.requested_data:
6311 cluster = lu.cfg.GetClusterInfo()
6313 cluster = NotImplemented
6315 if query.CQ_QUEUE_DRAINED in self.requested_data:
6316 drain_flag = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
6318 drain_flag = NotImplemented
6320 if query.CQ_WATCHER_PAUSE in self.requested_data:
6321 watcher_pause = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
6323 watcher_pause = NotImplemented
6325 return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
6328 class LUInstanceActivateDisks(NoHooksLU):
6329 """Bring up an instance's disks.
6334 def ExpandNames(self):
6335 self._ExpandAndLockInstance()
6336 self.needed_locks[locking.LEVEL_NODE] = []
6337 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6339 def DeclareLocks(self, level):
6340 if level == locking.LEVEL_NODE:
6341 self._LockInstancesNodes()
6343 def CheckPrereq(self):
6344 """Check prerequisites.
6346 This checks that the instance is in the cluster.
6349 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6350 assert self.instance is not None, \
6351 "Cannot retrieve locked instance %s" % self.op.instance_name
6352 _CheckNodeOnline(self, self.instance.primary_node)
6354 def Exec(self, feedback_fn):
6355 """Activate the disks.
6358 disks_ok, disks_info = \
6359 _AssembleInstanceDisks(self, self.instance,
6360 ignore_size=self.op.ignore_size)
6362 raise errors.OpExecError("Cannot activate block devices")
6367 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
6369 """Prepare the block devices for an instance.
6371 This sets up the block devices on all nodes.
6373 @type lu: L{LogicalUnit}
6374 @param lu: the logical unit on whose behalf we execute
6375 @type instance: L{objects.Instance}
6376 @param instance: the instance for whose disks we assemble
6377 @type disks: list of L{objects.Disk} or None
6378 @param disks: which disks to assemble (or all, if None)
6379 @type ignore_secondaries: boolean
6380 @param ignore_secondaries: if true, errors on secondary nodes
6381 won't result in an error return from the function
6382 @type ignore_size: boolean
6383 @param ignore_size: if true, the current known size of the disk
6384 will not be used during the disk activation, useful for cases
6385 when the size is wrong
6386 @return: False if the operation failed, otherwise a list of
6387 (host, instance_visible_name, node_visible_name)
6388 with the mapping from node devices to instance devices
6393 iname = instance.name
6394 disks = _ExpandCheckDisks(instance, disks)
6396 # With the two passes mechanism we try to reduce the window of
6397 # opportunity for the race condition of switching DRBD to primary
6398 # before handshaking occured, but we do not eliminate it
6400 # The proper fix would be to wait (with some limits) until the
6401 # connection has been made and drbd transitions from WFConnection
6402 # into any other network-connected state (Connected, SyncTarget,
6405 # 1st pass, assemble on all nodes in secondary mode
6406 for idx, inst_disk in enumerate(disks):
6407 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6409 node_disk = node_disk.Copy()
6410 node_disk.UnsetSize()
6411 lu.cfg.SetDiskID(node_disk, node)
6412 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6414 msg = result.fail_msg
6416 is_offline_secondary = (node in instance.secondary_nodes and
6418 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6419 " (is_primary=False, pass=1): %s",
6420 inst_disk.iv_name, node, msg)
6421 if not (ignore_secondaries or is_offline_secondary):
6424 # FIXME: race condition on drbd migration to primary
6426 # 2nd pass, do only the primary node
6427 for idx, inst_disk in enumerate(disks):
6430 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6431 if node != instance.primary_node:
6434 node_disk = node_disk.Copy()
6435 node_disk.UnsetSize()
6436 lu.cfg.SetDiskID(node_disk, node)
6437 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6439 msg = result.fail_msg
6441 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6442 " (is_primary=True, pass=2): %s",
6443 inst_disk.iv_name, node, msg)
6446 dev_path = result.payload
6448 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
6450 # leave the disks configured for the primary node
6451 # this is a workaround that would be fixed better by
6452 # improving the logical/physical id handling
6454 lu.cfg.SetDiskID(disk, instance.primary_node)
6456 return disks_ok, device_info
6459 def _StartInstanceDisks(lu, instance, force):
6460 """Start the disks of an instance.
6463 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
6464 ignore_secondaries=force)
6466 _ShutdownInstanceDisks(lu, instance)
6467 if force is not None and not force:
6468 lu.proc.LogWarning("", hint="If the message above refers to a"
6470 " you can retry the operation using '--force'.")
6471 raise errors.OpExecError("Disk consistency error")
6474 class LUInstanceDeactivateDisks(NoHooksLU):
6475 """Shutdown an instance's disks.
6480 def ExpandNames(self):
6481 self._ExpandAndLockInstance()
6482 self.needed_locks[locking.LEVEL_NODE] = []
6483 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6485 def DeclareLocks(self, level):
6486 if level == locking.LEVEL_NODE:
6487 self._LockInstancesNodes()
6489 def CheckPrereq(self):
6490 """Check prerequisites.
6492 This checks that the instance is in the cluster.
6495 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6496 assert self.instance is not None, \
6497 "Cannot retrieve locked instance %s" % self.op.instance_name
6499 def Exec(self, feedback_fn):
6500 """Deactivate the disks
6503 instance = self.instance
6505 _ShutdownInstanceDisks(self, instance)
6507 _SafeShutdownInstanceDisks(self, instance)
6510 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
6511 """Shutdown block devices of an instance.
6513 This function checks if an instance is running, before calling
6514 _ShutdownInstanceDisks.
6517 _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
6518 _ShutdownInstanceDisks(lu, instance, disks=disks)
6521 def _ExpandCheckDisks(instance, disks):
6522 """Return the instance disks selected by the disks list
6524 @type disks: list of L{objects.Disk} or None
6525 @param disks: selected disks
6526 @rtype: list of L{objects.Disk}
6527 @return: selected instance disks to act on
6531 return instance.disks
6533 if not set(disks).issubset(instance.disks):
6534 raise errors.ProgrammerError("Can only act on disks belonging to the"
6539 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
6540 """Shutdown block devices of an instance.
6542 This does the shutdown on all nodes of the instance.
6544 If the ignore_primary is false, errors on the primary node are
6549 disks = _ExpandCheckDisks(instance, disks)
6552 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
6553 lu.cfg.SetDiskID(top_disk, node)
6554 result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
6555 msg = result.fail_msg
6557 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
6558 disk.iv_name, node, msg)
6559 if ((node == instance.primary_node and not ignore_primary) or
6560 (node != instance.primary_node and not result.offline)):
6565 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
6566 """Checks if a node has enough free memory.
6568 This function check if a given node has the needed amount of free
6569 memory. In case the node has less memory or we cannot get the
6570 information from the node, this function raise an OpPrereqError
6573 @type lu: C{LogicalUnit}
6574 @param lu: a logical unit from which we get configuration data
6576 @param node: the node to check
6577 @type reason: C{str}
6578 @param reason: string to use in the error message
6579 @type requested: C{int}
6580 @param requested: the amount of memory in MiB to check for
6581 @type hypervisor_name: C{str}
6582 @param hypervisor_name: the hypervisor to ask for memory stats
6584 @return: node current free memory
6585 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
6586 we cannot check the node
6589 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6590 nodeinfo[node].Raise("Can't get data from node %s" % node,
6591 prereq=True, ecode=errors.ECODE_ENVIRON)
6592 (_, _, (hv_info, )) = nodeinfo[node].payload
6594 free_mem = hv_info.get("memory_free", None)
6595 if not isinstance(free_mem, int):
6596 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6597 " was '%s'" % (node, free_mem),
6598 errors.ECODE_ENVIRON)
6599 if requested > free_mem:
6600 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
6601 " needed %s MiB, available %s MiB" %
6602 (node, reason, requested, free_mem),
6607 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
6608 """Checks if nodes have enough free disk space in the all VGs.
6610 This function check if all given nodes have the needed amount of
6611 free disk. In case any node has less disk or we cannot get the
6612 information from the node, this function raise an OpPrereqError
6615 @type lu: C{LogicalUnit}
6616 @param lu: a logical unit from which we get configuration data
6617 @type nodenames: C{list}
6618 @param nodenames: the list of node names to check
6619 @type req_sizes: C{dict}
6620 @param req_sizes: the hash of vg and corresponding amount of disk in
6622 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6623 or we cannot check the node
6626 for vg, req_size in req_sizes.items():
6627 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
6630 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
6631 """Checks if nodes have enough free disk space in the specified VG.
6633 This function check if all given nodes have the needed amount of
6634 free disk. In case any node has less disk or we cannot get the
6635 information from the node, this function raise an OpPrereqError
6638 @type lu: C{LogicalUnit}
6639 @param lu: a logical unit from which we get configuration data
6640 @type nodenames: C{list}
6641 @param nodenames: the list of node names to check
6643 @param vg: the volume group to check
6644 @type requested: C{int}
6645 @param requested: the amount of disk in MiB to check for
6646 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6647 or we cannot check the node
6650 nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6651 for node in nodenames:
6652 info = nodeinfo[node]
6653 info.Raise("Cannot get current information from node %s" % node,
6654 prereq=True, ecode=errors.ECODE_ENVIRON)
6655 (_, (vg_info, ), _) = info.payload
6656 vg_free = vg_info.get("vg_free", None)
6657 if not isinstance(vg_free, int):
6658 raise errors.OpPrereqError("Can't compute free disk space on node"
6659 " %s for vg %s, result was '%s'" %
6660 (node, vg, vg_free), errors.ECODE_ENVIRON)
6661 if requested > vg_free:
6662 raise errors.OpPrereqError("Not enough disk space on target node %s"
6663 " vg %s: required %d MiB, available %d MiB" %
6664 (node, vg, requested, vg_free),
6668 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
6669 """Checks if nodes have enough physical CPUs
6671 This function checks if all given nodes have the needed number of
6672 physical CPUs. In case any node has less CPUs or we cannot get the
6673 information from the node, this function raises an OpPrereqError
6676 @type lu: C{LogicalUnit}
6677 @param lu: a logical unit from which we get configuration data
6678 @type nodenames: C{list}
6679 @param nodenames: the list of node names to check
6680 @type requested: C{int}
6681 @param requested: the minimum acceptable number of physical CPUs
6682 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
6683 or we cannot check the node
6686 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6687 for node in nodenames:
6688 info = nodeinfo[node]
6689 info.Raise("Cannot get current information from node %s" % node,
6690 prereq=True, ecode=errors.ECODE_ENVIRON)
6691 (_, _, (hv_info, )) = info.payload
6692 num_cpus = hv_info.get("cpu_total", None)
6693 if not isinstance(num_cpus, int):
6694 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6695 " on node %s, result was '%s'" %
6696 (node, num_cpus), errors.ECODE_ENVIRON)
6697 if requested > num_cpus:
6698 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
6699 "required" % (node, num_cpus, requested),
6703 class LUInstanceStartup(LogicalUnit):
6704 """Starts an instance.
6707 HPATH = "instance-start"
6708 HTYPE = constants.HTYPE_INSTANCE
6711 def CheckArguments(self):
6713 if self.op.beparams:
6714 # fill the beparams dict
6715 objects.UpgradeBeParams(self.op.beparams)
6716 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6718 def ExpandNames(self):
6719 self._ExpandAndLockInstance()
6720 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
6722 def DeclareLocks(self, level):
6723 if level == locking.LEVEL_NODE_RES:
6724 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
6726 def BuildHooksEnv(self):
6729 This runs on master, primary and secondary nodes of the instance.
6733 "FORCE": self.op.force,
6736 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6740 def BuildHooksNodes(self):
6741 """Build hooks nodes.
6744 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6747 def CheckPrereq(self):
6748 """Check prerequisites.
6750 This checks that the instance is in the cluster.
6753 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6754 assert self.instance is not None, \
6755 "Cannot retrieve locked instance %s" % self.op.instance_name
6758 if self.op.hvparams:
6759 # check hypervisor parameter syntax (locally)
6760 cluster = self.cfg.GetClusterInfo()
6761 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6762 filled_hvp = cluster.FillHV(instance)
6763 filled_hvp.update(self.op.hvparams)
6764 hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
6765 hv_type.CheckParameterSyntax(filled_hvp)
6766 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
6768 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6770 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
6772 if self.primary_offline and self.op.ignore_offline_nodes:
6773 self.proc.LogWarning("Ignoring offline primary node")
6775 if self.op.hvparams or self.op.beparams:
6776 self.proc.LogWarning("Overridden parameters are ignored")
6778 _CheckNodeOnline(self, instance.primary_node)
6780 bep = self.cfg.GetClusterInfo().FillBE(instance)
6781 bep.update(self.op.beparams)
6783 # check bridges existence
6784 _CheckInstanceBridgesExist(self, instance)
6786 remote_info = self.rpc.call_instance_info(instance.primary_node,
6788 instance.hypervisor)
6789 remote_info.Raise("Error checking node %s" % instance.primary_node,
6790 prereq=True, ecode=errors.ECODE_ENVIRON)
6791 if not remote_info.payload: # not running already
6792 _CheckNodeFreeMemory(self, instance.primary_node,
6793 "starting instance %s" % instance.name,
6794 bep[constants.BE_MINMEM], instance.hypervisor)
6796 def Exec(self, feedback_fn):
6797 """Start the instance.
6800 instance = self.instance
6801 force = self.op.force
6803 if not self.op.no_remember:
6804 self.cfg.MarkInstanceUp(instance.name)
6806 if self.primary_offline:
6807 assert self.op.ignore_offline_nodes
6808 self.proc.LogInfo("Primary node offline, marked instance as started")
6810 node_current = instance.primary_node
6812 _StartInstanceDisks(self, instance, force)
6815 self.rpc.call_instance_start(node_current,
6816 (instance, self.op.hvparams,
6818 self.op.startup_paused)
6819 msg = result.fail_msg
6821 _ShutdownInstanceDisks(self, instance)
6822 raise errors.OpExecError("Could not start instance: %s" % msg)
6825 class LUInstanceReboot(LogicalUnit):
6826 """Reboot an instance.
6829 HPATH = "instance-reboot"
6830 HTYPE = constants.HTYPE_INSTANCE
6833 def ExpandNames(self):
6834 self._ExpandAndLockInstance()
6836 def BuildHooksEnv(self):
6839 This runs on master, primary and secondary nodes of the instance.
6843 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6844 "REBOOT_TYPE": self.op.reboot_type,
6845 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6848 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6852 def BuildHooksNodes(self):
6853 """Build hooks nodes.
6856 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6859 def CheckPrereq(self):
6860 """Check prerequisites.
6862 This checks that the instance is in the cluster.
6865 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6866 assert self.instance is not None, \
6867 "Cannot retrieve locked instance %s" % self.op.instance_name
6868 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6869 _CheckNodeOnline(self, instance.primary_node)
6871 # check bridges existence
6872 _CheckInstanceBridgesExist(self, instance)
6874 def Exec(self, feedback_fn):
6875 """Reboot the instance.
6878 instance = self.instance
6879 ignore_secondaries = self.op.ignore_secondaries
6880 reboot_type = self.op.reboot_type
6882 remote_info = self.rpc.call_instance_info(instance.primary_node,
6884 instance.hypervisor)
6885 remote_info.Raise("Error checking node %s" % instance.primary_node)
6886 instance_running = bool(remote_info.payload)
6888 node_current = instance.primary_node
6890 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6891 constants.INSTANCE_REBOOT_HARD]:
6892 for disk in instance.disks:
6893 self.cfg.SetDiskID(disk, node_current)
6894 result = self.rpc.call_instance_reboot(node_current, instance,
6896 self.op.shutdown_timeout)
6897 result.Raise("Could not reboot instance")
6899 if instance_running:
6900 result = self.rpc.call_instance_shutdown(node_current, instance,
6901 self.op.shutdown_timeout)
6902 result.Raise("Could not shutdown instance for full reboot")
6903 _ShutdownInstanceDisks(self, instance)
6905 self.LogInfo("Instance %s was already stopped, starting now",
6907 _StartInstanceDisks(self, instance, ignore_secondaries)
6908 result = self.rpc.call_instance_start(node_current,
6909 (instance, None, None), False)
6910 msg = result.fail_msg
6912 _ShutdownInstanceDisks(self, instance)
6913 raise errors.OpExecError("Could not start instance for"
6914 " full reboot: %s" % msg)
6916 self.cfg.MarkInstanceUp(instance.name)
6919 class LUInstanceShutdown(LogicalUnit):
6920 """Shutdown an instance.
6923 HPATH = "instance-stop"
6924 HTYPE = constants.HTYPE_INSTANCE
6927 def ExpandNames(self):
6928 self._ExpandAndLockInstance()
6930 def BuildHooksEnv(self):
6933 This runs on master, primary and secondary nodes of the instance.
6936 env = _BuildInstanceHookEnvByObject(self, self.instance)
6937 env["TIMEOUT"] = self.op.timeout
6940 def BuildHooksNodes(self):
6941 """Build hooks nodes.
6944 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6947 def CheckPrereq(self):
6948 """Check prerequisites.
6950 This checks that the instance is in the cluster.
6953 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6954 assert self.instance is not None, \
6955 "Cannot retrieve locked instance %s" % self.op.instance_name
6957 _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
6959 self.primary_offline = \
6960 self.cfg.GetNodeInfo(self.instance.primary_node).offline
6962 if self.primary_offline and self.op.ignore_offline_nodes:
6963 self.proc.LogWarning("Ignoring offline primary node")
6965 _CheckNodeOnline(self, self.instance.primary_node)
6967 def Exec(self, feedback_fn):
6968 """Shutdown the instance.
6971 instance = self.instance
6972 node_current = instance.primary_node
6973 timeout = self.op.timeout
6975 if not self.op.no_remember:
6976 self.cfg.MarkInstanceDown(instance.name)
6978 if self.primary_offline:
6979 assert self.op.ignore_offline_nodes
6980 self.proc.LogInfo("Primary node offline, marked instance as stopped")
6982 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6983 msg = result.fail_msg
6985 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6987 _ShutdownInstanceDisks(self, instance)
6990 class LUInstanceReinstall(LogicalUnit):
6991 """Reinstall an instance.
6994 HPATH = "instance-reinstall"
6995 HTYPE = constants.HTYPE_INSTANCE
6998 def ExpandNames(self):
6999 self._ExpandAndLockInstance()
7001 def BuildHooksEnv(self):
7004 This runs on master, primary and secondary nodes of the instance.
7007 return _BuildInstanceHookEnvByObject(self, self.instance)
7009 def BuildHooksNodes(self):
7010 """Build hooks nodes.
7013 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7016 def CheckPrereq(self):
7017 """Check prerequisites.
7019 This checks that the instance is in the cluster and is not running.
7022 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7023 assert instance is not None, \
7024 "Cannot retrieve locked instance %s" % self.op.instance_name
7025 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
7026 " offline, cannot reinstall")
7028 if instance.disk_template == constants.DT_DISKLESS:
7029 raise errors.OpPrereqError("Instance '%s' has no disks" %
7030 self.op.instance_name,
7032 _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
7034 if self.op.os_type is not None:
7036 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
7037 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
7038 instance_os = self.op.os_type
7040 instance_os = instance.os
7042 nodelist = list(instance.all_nodes)
7044 if self.op.osparams:
7045 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
7046 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
7047 self.os_inst = i_osdict # the new dict (without defaults)
7051 self.instance = instance
7053 def Exec(self, feedback_fn):
7054 """Reinstall the instance.
7057 inst = self.instance
7059 if self.op.os_type is not None:
7060 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
7061 inst.os = self.op.os_type
7062 # Write to configuration
7063 self.cfg.Update(inst, feedback_fn)
7065 _StartInstanceDisks(self, inst, None)
7067 feedback_fn("Running the instance OS create scripts...")
7068 # FIXME: pass debug option from opcode to backend
7069 result = self.rpc.call_instance_os_add(inst.primary_node,
7070 (inst, self.os_inst), True,
7071 self.op.debug_level)
7072 result.Raise("Could not install OS for instance %s on node %s" %
7073 (inst.name, inst.primary_node))
7075 _ShutdownInstanceDisks(self, inst)
7078 class LUInstanceRecreateDisks(LogicalUnit):
7079 """Recreate an instance's missing disks.
7082 HPATH = "instance-recreate-disks"
7083 HTYPE = constants.HTYPE_INSTANCE
7086 _MODIFYABLE = frozenset([
7087 constants.IDISK_SIZE,
7088 constants.IDISK_MODE,
7091 # New or changed disk parameters may have different semantics
7092 assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
7093 constants.IDISK_ADOPT,
7095 # TODO: Implement support changing VG while recreating
7097 constants.IDISK_METAVG,
7100 def CheckArguments(self):
7101 if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
7102 # Normalize and convert deprecated list of disk indices
7103 self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
7105 duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
7107 raise errors.OpPrereqError("Some disks have been specified more than"
7108 " once: %s" % utils.CommaJoin(duplicates),
7111 for (idx, params) in self.op.disks:
7112 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
7113 unsupported = frozenset(params.keys()) - self._MODIFYABLE
7115 raise errors.OpPrereqError("Parameters for disk %s try to change"
7116 " unmodifyable parameter(s): %s" %
7117 (idx, utils.CommaJoin(unsupported)),
7120 def ExpandNames(self):
7121 self._ExpandAndLockInstance()
7122 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7124 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
7125 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
7127 self.needed_locks[locking.LEVEL_NODE] = []
7128 self.needed_locks[locking.LEVEL_NODE_RES] = []
7130 def DeclareLocks(self, level):
7131 if level == locking.LEVEL_NODE:
7132 # if we replace the nodes, we only need to lock the old primary,
7133 # otherwise we need to lock all nodes for disk re-creation
7134 primary_only = bool(self.op.nodes)
7135 self._LockInstancesNodes(primary_only=primary_only)
7136 elif level == locking.LEVEL_NODE_RES:
7138 self.needed_locks[locking.LEVEL_NODE_RES] = \
7139 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7141 def BuildHooksEnv(self):
7144 This runs on master, primary and secondary nodes of the instance.
7147 return _BuildInstanceHookEnvByObject(self, self.instance)
7149 def BuildHooksNodes(self):
7150 """Build hooks nodes.
7153 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7156 def CheckPrereq(self):
7157 """Check prerequisites.
7159 This checks that the instance is in the cluster and is not running.
7162 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7163 assert instance is not None, \
7164 "Cannot retrieve locked instance %s" % self.op.instance_name
7166 if len(self.op.nodes) != len(instance.all_nodes):
7167 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
7168 " %d replacement nodes were specified" %
7169 (instance.name, len(instance.all_nodes),
7170 len(self.op.nodes)),
7172 assert instance.disk_template != constants.DT_DRBD8 or \
7173 len(self.op.nodes) == 2
7174 assert instance.disk_template != constants.DT_PLAIN or \
7175 len(self.op.nodes) == 1
7176 primary_node = self.op.nodes[0]
7178 primary_node = instance.primary_node
7179 _CheckNodeOnline(self, primary_node)
7181 if instance.disk_template == constants.DT_DISKLESS:
7182 raise errors.OpPrereqError("Instance '%s' has no disks" %
7183 self.op.instance_name, errors.ECODE_INVAL)
7185 # if we replace nodes *and* the old primary is offline, we don't
7187 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
7188 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE_RES)
7189 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
7190 if not (self.op.nodes and old_pnode.offline):
7191 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7192 msg="cannot recreate disks")
7195 self.disks = dict(self.op.disks)
7197 self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
7199 maxidx = max(self.disks.keys())
7200 if maxidx >= len(instance.disks):
7201 raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
7204 if (self.op.nodes and
7205 sorted(self.disks.keys()) != range(len(instance.disks))):
7206 raise errors.OpPrereqError("Can't recreate disks partially and"
7207 " change the nodes at the same time",
7210 self.instance = instance
7212 def Exec(self, feedback_fn):
7213 """Recreate the disks.
7216 instance = self.instance
7218 assert (self.owned_locks(locking.LEVEL_NODE) ==
7219 self.owned_locks(locking.LEVEL_NODE_RES))
7222 mods = [] # keeps track of needed changes
7224 for idx, disk in enumerate(instance.disks):
7226 changes = self.disks[idx]
7228 # Disk should not be recreated
7232 # update secondaries for disks, if needed
7233 if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
7234 # need to update the nodes and minors
7235 assert len(self.op.nodes) == 2
7236 assert len(disk.logical_id) == 6 # otherwise disk internals
7238 (_, _, old_port, _, _, old_secret) = disk.logical_id
7239 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
7240 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
7241 new_minors[0], new_minors[1], old_secret)
7242 assert len(disk.logical_id) == len(new_id)
7246 mods.append((idx, new_id, changes))
7248 # now that we have passed all asserts above, we can apply the mods
7249 # in a single run (to avoid partial changes)
7250 for idx, new_id, changes in mods:
7251 disk = instance.disks[idx]
7252 if new_id is not None:
7253 assert disk.dev_type == constants.LD_DRBD8
7254 disk.logical_id = new_id
7256 disk.Update(size=changes.get(constants.IDISK_SIZE, None),
7257 mode=changes.get(constants.IDISK_MODE, None))
7259 # change primary node, if needed
7261 instance.primary_node = self.op.nodes[0]
7262 self.LogWarning("Changing the instance's nodes, you will have to"
7263 " remove any disks left on the older nodes manually")
7266 self.cfg.Update(instance, feedback_fn)
7268 _CreateDisks(self, instance, to_skip=to_skip)
7271 class LUInstanceRename(LogicalUnit):
7272 """Rename an instance.
7275 HPATH = "instance-rename"
7276 HTYPE = constants.HTYPE_INSTANCE
7278 def CheckArguments(self):
7282 if self.op.ip_check and not self.op.name_check:
7283 # TODO: make the ip check more flexible and not depend on the name check
7284 raise errors.OpPrereqError("IP address check requires a name check",
7287 def BuildHooksEnv(self):
7290 This runs on master, primary and secondary nodes of the instance.
7293 env = _BuildInstanceHookEnvByObject(self, self.instance)
7294 env["INSTANCE_NEW_NAME"] = self.op.new_name
7297 def BuildHooksNodes(self):
7298 """Build hooks nodes.
7301 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7304 def CheckPrereq(self):
7305 """Check prerequisites.
7307 This checks that the instance is in the cluster and is not running.
7310 self.op.instance_name = _ExpandInstanceName(self.cfg,
7311 self.op.instance_name)
7312 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7313 assert instance is not None
7314 _CheckNodeOnline(self, instance.primary_node)
7315 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7316 msg="cannot rename")
7317 self.instance = instance
7319 new_name = self.op.new_name
7320 if self.op.name_check:
7321 hostname = _CheckHostnameSane(self, new_name)
7322 new_name = self.op.new_name = hostname.name
7323 if (self.op.ip_check and
7324 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
7325 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7326 (hostname.ip, new_name),
7327 errors.ECODE_NOTUNIQUE)
7329 instance_list = self.cfg.GetInstanceList()
7330 if new_name in instance_list and new_name != instance.name:
7331 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7332 new_name, errors.ECODE_EXISTS)
7334 def Exec(self, feedback_fn):
7335 """Rename the instance.
7338 inst = self.instance
7339 old_name = inst.name
7341 rename_file_storage = False
7342 if (inst.disk_template in constants.DTS_FILEBASED and
7343 self.op.new_name != inst.name):
7344 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7345 rename_file_storage = True
7347 self.cfg.RenameInstance(inst.name, self.op.new_name)
7348 # Change the instance lock. This is definitely safe while we hold the BGL.
7349 # Otherwise the new lock would have to be added in acquired mode.
7351 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
7352 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
7354 # re-read the instance from the configuration after rename
7355 inst = self.cfg.GetInstanceInfo(self.op.new_name)
7357 if rename_file_storage:
7358 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7359 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
7360 old_file_storage_dir,
7361 new_file_storage_dir)
7362 result.Raise("Could not rename on node %s directory '%s' to '%s'"
7363 " (but the instance has been renamed in Ganeti)" %
7364 (inst.primary_node, old_file_storage_dir,
7365 new_file_storage_dir))
7367 _StartInstanceDisks(self, inst, None)
7369 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
7370 old_name, self.op.debug_level)
7371 msg = result.fail_msg
7373 msg = ("Could not run OS rename script for instance %s on node %s"
7374 " (but the instance has been renamed in Ganeti): %s" %
7375 (inst.name, inst.primary_node, msg))
7376 self.proc.LogWarning(msg)
7378 _ShutdownInstanceDisks(self, inst)
7383 class LUInstanceRemove(LogicalUnit):
7384 """Remove an instance.
7387 HPATH = "instance-remove"
7388 HTYPE = constants.HTYPE_INSTANCE
7391 def ExpandNames(self):
7392 self._ExpandAndLockInstance()
7393 self.needed_locks[locking.LEVEL_NODE] = []
7394 self.needed_locks[locking.LEVEL_NODE_RES] = []
7395 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7397 def DeclareLocks(self, level):
7398 if level == locking.LEVEL_NODE:
7399 self._LockInstancesNodes()
7400 elif level == locking.LEVEL_NODE_RES:
7402 self.needed_locks[locking.LEVEL_NODE_RES] = \
7403 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7405 def BuildHooksEnv(self):
7408 This runs on master, primary and secondary nodes of the instance.
7411 env = _BuildInstanceHookEnvByObject(self, self.instance)
7412 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
7415 def BuildHooksNodes(self):
7416 """Build hooks nodes.
7419 nl = [self.cfg.GetMasterNode()]
7420 nl_post = list(self.instance.all_nodes) + nl
7421 return (nl, nl_post)
7423 def CheckPrereq(self):
7424 """Check prerequisites.
7426 This checks that the instance is in the cluster.
7429 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7430 assert self.instance is not None, \
7431 "Cannot retrieve locked instance %s" % self.op.instance_name
7433 def Exec(self, feedback_fn):
7434 """Remove the instance.
7437 instance = self.instance
7438 logging.info("Shutting down instance %s on node %s",
7439 instance.name, instance.primary_node)
7441 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
7442 self.op.shutdown_timeout)
7443 msg = result.fail_msg
7445 if self.op.ignore_failures:
7446 feedback_fn("Warning: can't shutdown instance: %s" % msg)
7448 raise errors.OpExecError("Could not shutdown instance %s on"
7450 (instance.name, instance.primary_node, msg))
7452 assert (self.owned_locks(locking.LEVEL_NODE) ==
7453 self.owned_locks(locking.LEVEL_NODE_RES))
7454 assert not (set(instance.all_nodes) -
7455 self.owned_locks(locking.LEVEL_NODE)), \
7456 "Not owning correct locks"
7458 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
7461 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
7462 """Utility function to remove an instance.
7465 logging.info("Removing block devices for instance %s", instance.name)
7467 if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
7468 if not ignore_failures:
7469 raise errors.OpExecError("Can't remove instance's disks")
7470 feedback_fn("Warning: can't remove instance's disks")
7472 logging.info("Removing instance %s out of cluster config", instance.name)
7474 lu.cfg.RemoveInstance(instance.name)
7476 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
7477 "Instance lock removal conflict"
7479 # Remove lock for the instance
7480 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
7483 class LUInstanceQuery(NoHooksLU):
7484 """Logical unit for querying instances.
7487 # pylint: disable=W0142
7490 def CheckArguments(self):
7491 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
7492 self.op.output_fields, self.op.use_locking)
7494 def ExpandNames(self):
7495 self.iq.ExpandNames(self)
7497 def DeclareLocks(self, level):
7498 self.iq.DeclareLocks(self, level)
7500 def Exec(self, feedback_fn):
7501 return self.iq.OldStyleQuery(self)
7504 class LUInstanceFailover(LogicalUnit):
7505 """Failover an instance.
7508 HPATH = "instance-failover"
7509 HTYPE = constants.HTYPE_INSTANCE
7512 def CheckArguments(self):
7513 """Check the arguments.
7516 self.iallocator = getattr(self.op, "iallocator", None)
7517 self.target_node = getattr(self.op, "target_node", None)
7519 def ExpandNames(self):
7520 self._ExpandAndLockInstance()
7522 if self.op.target_node is not None:
7523 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7525 self.needed_locks[locking.LEVEL_NODE] = []
7526 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7528 self.needed_locks[locking.LEVEL_NODE_RES] = []
7529 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
7531 ignore_consistency = self.op.ignore_consistency
7532 shutdown_timeout = self.op.shutdown_timeout
7533 self._migrater = TLMigrateInstance(self, self.op.instance_name,
7536 ignore_consistency=ignore_consistency,
7537 shutdown_timeout=shutdown_timeout,
7538 ignore_ipolicy=self.op.ignore_ipolicy)
7539 self.tasklets = [self._migrater]
7541 def DeclareLocks(self, level):
7542 if level == locking.LEVEL_NODE:
7543 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7544 if instance.disk_template in constants.DTS_EXT_MIRROR:
7545 if self.op.target_node is None:
7546 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7548 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7549 self.op.target_node]
7550 del self.recalculate_locks[locking.LEVEL_NODE]
7552 self._LockInstancesNodes()
7553 elif level == locking.LEVEL_NODE_RES:
7555 self.needed_locks[locking.LEVEL_NODE_RES] = \
7556 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7558 def BuildHooksEnv(self):
7561 This runs on master, primary and secondary nodes of the instance.
7564 instance = self._migrater.instance
7565 source_node = instance.primary_node
7566 target_node = self.op.target_node
7568 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
7569 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7570 "OLD_PRIMARY": source_node,
7571 "NEW_PRIMARY": target_node,
7574 if instance.disk_template in constants.DTS_INT_MIRROR:
7575 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
7576 env["NEW_SECONDARY"] = source_node
7578 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
7580 env.update(_BuildInstanceHookEnvByObject(self, instance))
7584 def BuildHooksNodes(self):
7585 """Build hooks nodes.
7588 instance = self._migrater.instance
7589 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7590 return (nl, nl + [instance.primary_node])
7593 class LUInstanceMigrate(LogicalUnit):
7594 """Migrate an instance.
7596 This is migration without shutting down, compared to the failover,
7597 which is done with shutdown.
7600 HPATH = "instance-migrate"
7601 HTYPE = constants.HTYPE_INSTANCE
7604 def ExpandNames(self):
7605 self._ExpandAndLockInstance()
7607 if self.op.target_node is not None:
7608 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7610 self.needed_locks[locking.LEVEL_NODE] = []
7611 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7613 self.needed_locks[locking.LEVEL_NODE] = []
7614 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7617 TLMigrateInstance(self, self.op.instance_name,
7618 cleanup=self.op.cleanup,
7620 fallback=self.op.allow_failover,
7621 allow_runtime_changes=self.op.allow_runtime_changes,
7622 ignore_ipolicy=self.op.ignore_ipolicy)
7623 self.tasklets = [self._migrater]
7625 def DeclareLocks(self, level):
7626 if level == locking.LEVEL_NODE:
7627 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7628 if instance.disk_template in constants.DTS_EXT_MIRROR:
7629 if self.op.target_node is None:
7630 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7632 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7633 self.op.target_node]
7634 del self.recalculate_locks[locking.LEVEL_NODE]
7636 self._LockInstancesNodes()
7637 elif level == locking.LEVEL_NODE_RES:
7639 self.needed_locks[locking.LEVEL_NODE_RES] = \
7640 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7642 def BuildHooksEnv(self):
7645 This runs on master, primary and secondary nodes of the instance.
7648 instance = self._migrater.instance
7649 source_node = instance.primary_node
7650 target_node = self.op.target_node
7651 env = _BuildInstanceHookEnvByObject(self, instance)
7653 "MIGRATE_LIVE": self._migrater.live,
7654 "MIGRATE_CLEANUP": self.op.cleanup,
7655 "OLD_PRIMARY": source_node,
7656 "NEW_PRIMARY": target_node,
7657 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7660 if instance.disk_template in constants.DTS_INT_MIRROR:
7661 env["OLD_SECONDARY"] = target_node
7662 env["NEW_SECONDARY"] = source_node
7664 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
7668 def BuildHooksNodes(self):
7669 """Build hooks nodes.
7672 instance = self._migrater.instance
7673 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7674 return (nl, nl + [instance.primary_node])
7677 class LUInstanceMove(LogicalUnit):
7678 """Move an instance by data-copying.
7681 HPATH = "instance-move"
7682 HTYPE = constants.HTYPE_INSTANCE
7685 def ExpandNames(self):
7686 self._ExpandAndLockInstance()
7687 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7688 self.op.target_node = target_node
7689 self.needed_locks[locking.LEVEL_NODE] = [target_node]
7690 self.needed_locks[locking.LEVEL_NODE_RES] = []
7691 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7693 def DeclareLocks(self, level):
7694 if level == locking.LEVEL_NODE:
7695 self._LockInstancesNodes(primary_only=True)
7696 elif level == locking.LEVEL_NODE_RES:
7698 self.needed_locks[locking.LEVEL_NODE_RES] = \
7699 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7701 def BuildHooksEnv(self):
7704 This runs on master, primary and secondary nodes of the instance.
7708 "TARGET_NODE": self.op.target_node,
7709 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7711 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7714 def BuildHooksNodes(self):
7715 """Build hooks nodes.
7719 self.cfg.GetMasterNode(),
7720 self.instance.primary_node,
7721 self.op.target_node,
7725 def CheckPrereq(self):
7726 """Check prerequisites.
7728 This checks that the instance is in the cluster.
7731 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7732 assert self.instance is not None, \
7733 "Cannot retrieve locked instance %s" % self.op.instance_name
7735 node = self.cfg.GetNodeInfo(self.op.target_node)
7736 assert node is not None, \
7737 "Cannot retrieve locked node %s" % self.op.target_node
7739 self.target_node = target_node = node.name
7741 if target_node == instance.primary_node:
7742 raise errors.OpPrereqError("Instance %s is already on the node %s" %
7743 (instance.name, target_node),
7746 bep = self.cfg.GetClusterInfo().FillBE(instance)
7748 for idx, dsk in enumerate(instance.disks):
7749 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
7750 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
7751 " cannot copy" % idx, errors.ECODE_STATE)
7753 _CheckNodeOnline(self, target_node)
7754 _CheckNodeNotDrained(self, target_node)
7755 _CheckNodeVmCapable(self, target_node)
7756 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
7757 self.cfg.GetNodeGroup(node.group))
7758 _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
7759 ignore=self.op.ignore_ipolicy)
7761 if instance.admin_state == constants.ADMINST_UP:
7762 # check memory requirements on the secondary node
7763 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7764 instance.name, bep[constants.BE_MAXMEM],
7765 instance.hypervisor)
7767 self.LogInfo("Not checking memory on the secondary node as"
7768 " instance will not be started")
7770 # check bridge existance
7771 _CheckInstanceBridgesExist(self, instance, node=target_node)
7773 def Exec(self, feedback_fn):
7774 """Move an instance.
7776 The move is done by shutting it down on its present node, copying
7777 the data over (slow) and starting it on the new node.
7780 instance = self.instance
7782 source_node = instance.primary_node
7783 target_node = self.target_node
7785 self.LogInfo("Shutting down instance %s on source node %s",
7786 instance.name, source_node)
7788 assert (self.owned_locks(locking.LEVEL_NODE) ==
7789 self.owned_locks(locking.LEVEL_NODE_RES))
7791 result = self.rpc.call_instance_shutdown(source_node, instance,
7792 self.op.shutdown_timeout)
7793 msg = result.fail_msg
7795 if self.op.ignore_consistency:
7796 self.proc.LogWarning("Could not shutdown instance %s on node %s."
7797 " Proceeding anyway. Please make sure node"
7798 " %s is down. Error details: %s",
7799 instance.name, source_node, source_node, msg)
7801 raise errors.OpExecError("Could not shutdown instance %s on"
7803 (instance.name, source_node, msg))
7805 # create the target disks
7807 _CreateDisks(self, instance, target_node=target_node)
7808 except errors.OpExecError:
7809 self.LogWarning("Device creation failed, reverting...")
7811 _RemoveDisks(self, instance, target_node=target_node)
7813 self.cfg.ReleaseDRBDMinors(instance.name)
7816 cluster_name = self.cfg.GetClusterInfo().cluster_name
7819 # activate, get path, copy the data over
7820 for idx, disk in enumerate(instance.disks):
7821 self.LogInfo("Copying data for disk %d", idx)
7822 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
7823 instance.name, True, idx)
7825 self.LogWarning("Can't assemble newly created disk %d: %s",
7826 idx, result.fail_msg)
7827 errs.append(result.fail_msg)
7829 dev_path = result.payload
7830 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
7831 target_node, dev_path,
7834 self.LogWarning("Can't copy data over for disk %d: %s",
7835 idx, result.fail_msg)
7836 errs.append(result.fail_msg)
7840 self.LogWarning("Some disks failed to copy, aborting")
7842 _RemoveDisks(self, instance, target_node=target_node)
7844 self.cfg.ReleaseDRBDMinors(instance.name)
7845 raise errors.OpExecError("Errors during disk copy: %s" %
7848 instance.primary_node = target_node
7849 self.cfg.Update(instance, feedback_fn)
7851 self.LogInfo("Removing the disks on the original node")
7852 _RemoveDisks(self, instance, target_node=source_node)
7854 # Only start the instance if it's marked as up
7855 if instance.admin_state == constants.ADMINST_UP:
7856 self.LogInfo("Starting instance %s on node %s",
7857 instance.name, target_node)
7859 disks_ok, _ = _AssembleInstanceDisks(self, instance,
7860 ignore_secondaries=True)
7862 _ShutdownInstanceDisks(self, instance)
7863 raise errors.OpExecError("Can't activate the instance's disks")
7865 result = self.rpc.call_instance_start(target_node,
7866 (instance, None, None), False)
7867 msg = result.fail_msg
7869 _ShutdownInstanceDisks(self, instance)
7870 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7871 (instance.name, target_node, msg))
7874 class LUNodeMigrate(LogicalUnit):
7875 """Migrate all instances from a node.
7878 HPATH = "node-migrate"
7879 HTYPE = constants.HTYPE_NODE
7882 def CheckArguments(self):
7885 def ExpandNames(self):
7886 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7888 self.share_locks = _ShareAll()
7889 self.needed_locks = {
7890 locking.LEVEL_NODE: [self.op.node_name],
7893 def BuildHooksEnv(self):
7896 This runs on the master, the primary and all the secondaries.
7900 "NODE_NAME": self.op.node_name,
7901 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7904 def BuildHooksNodes(self):
7905 """Build hooks nodes.
7908 nl = [self.cfg.GetMasterNode()]
7911 def CheckPrereq(self):
7914 def Exec(self, feedback_fn):
7915 # Prepare jobs for migration instances
7916 allow_runtime_changes = self.op.allow_runtime_changes
7918 [opcodes.OpInstanceMigrate(instance_name=inst.name,
7921 iallocator=self.op.iallocator,
7922 target_node=self.op.target_node,
7923 allow_runtime_changes=allow_runtime_changes,
7924 ignore_ipolicy=self.op.ignore_ipolicy)]
7925 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
7928 # TODO: Run iallocator in this opcode and pass correct placement options to
7929 # OpInstanceMigrate. Since other jobs can modify the cluster between
7930 # running the iallocator and the actual migration, a good consistency model
7931 # will have to be found.
7933 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
7934 frozenset([self.op.node_name]))
7936 return ResultWithJobs(jobs)
7939 class TLMigrateInstance(Tasklet):
7940 """Tasklet class for instance migration.
7943 @ivar live: whether the migration will be done live or non-live;
7944 this variable is initalized only after CheckPrereq has run
7945 @type cleanup: boolean
7946 @ivar cleanup: Wheater we cleanup from a failed migration
7947 @type iallocator: string
7948 @ivar iallocator: The iallocator used to determine target_node
7949 @type target_node: string
7950 @ivar target_node: If given, the target_node to reallocate the instance to
7951 @type failover: boolean
7952 @ivar failover: Whether operation results in failover or migration
7953 @type fallback: boolean
7954 @ivar fallback: Whether fallback to failover is allowed if migration not
7956 @type ignore_consistency: boolean
7957 @ivar ignore_consistency: Wheter we should ignore consistency between source
7959 @type shutdown_timeout: int
7960 @ivar shutdown_timeout: In case of failover timeout of the shutdown
7961 @type ignore_ipolicy: bool
7962 @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
7967 _MIGRATION_POLL_INTERVAL = 1 # seconds
7968 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7970 def __init__(self, lu, instance_name, cleanup=False,
7971 failover=False, fallback=False,
7972 ignore_consistency=False,
7973 allow_runtime_changes=True,
7974 shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
7975 ignore_ipolicy=False):
7976 """Initializes this class.
7979 Tasklet.__init__(self, lu)
7982 self.instance_name = instance_name
7983 self.cleanup = cleanup
7984 self.live = False # will be overridden later
7985 self.failover = failover
7986 self.fallback = fallback
7987 self.ignore_consistency = ignore_consistency
7988 self.shutdown_timeout = shutdown_timeout
7989 self.ignore_ipolicy = ignore_ipolicy
7990 self.allow_runtime_changes = allow_runtime_changes
7992 def CheckPrereq(self):
7993 """Check prerequisites.
7995 This checks that the instance is in the cluster.
7998 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7999 instance = self.cfg.GetInstanceInfo(instance_name)
8000 assert instance is not None
8001 self.instance = instance
8002 cluster = self.cfg.GetClusterInfo()
8004 if (not self.cleanup and
8005 not instance.admin_state == constants.ADMINST_UP and
8006 not self.failover and self.fallback):
8007 self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
8008 " switching to failover")
8009 self.failover = True
8011 if instance.disk_template not in constants.DTS_MIRRORED:
8016 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
8017 " %s" % (instance.disk_template, text),
8020 if instance.disk_template in constants.DTS_EXT_MIRROR:
8021 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
8023 if self.lu.op.iallocator:
8024 self._RunAllocator()
8026 # We set set self.target_node as it is required by
8028 self.target_node = self.lu.op.target_node
8030 # Check that the target node is correct in terms of instance policy
8031 nodeinfo = self.cfg.GetNodeInfo(self.target_node)
8032 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8033 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8034 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8035 ignore=self.ignore_ipolicy)
8037 # self.target_node is already populated, either directly or by the
8039 target_node = self.target_node
8040 if self.target_node == instance.primary_node:
8041 raise errors.OpPrereqError("Cannot migrate instance %s"
8042 " to its primary (%s)" %
8043 (instance.name, instance.primary_node))
8045 if len(self.lu.tasklets) == 1:
8046 # It is safe to release locks only when we're the only tasklet
8048 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
8049 keep=[instance.primary_node, self.target_node])
8052 secondary_nodes = instance.secondary_nodes
8053 if not secondary_nodes:
8054 raise errors.ConfigurationError("No secondary node but using"
8055 " %s disk template" %
8056 instance.disk_template)
8057 target_node = secondary_nodes[0]
8058 if self.lu.op.iallocator or (self.lu.op.target_node and
8059 self.lu.op.target_node != target_node):
8061 text = "failed over"
8064 raise errors.OpPrereqError("Instances with disk template %s cannot"
8065 " be %s to arbitrary nodes"
8066 " (neither an iallocator nor a target"
8067 " node can be passed)" %
8068 (instance.disk_template, text),
8070 nodeinfo = self.cfg.GetNodeInfo(target_node)
8071 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8072 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8073 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8074 ignore=self.ignore_ipolicy)
8076 i_be = cluster.FillBE(instance)
8078 # check memory requirements on the secondary node
8079 if (not self.cleanup and
8080 (not self.failover or instance.admin_state == constants.ADMINST_UP)):
8081 self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
8082 "migrating instance %s" %
8084 i_be[constants.BE_MINMEM],
8085 instance.hypervisor)
8087 self.lu.LogInfo("Not checking memory on the secondary node as"
8088 " instance will not be started")
8090 # check if failover must be forced instead of migration
8091 if (not self.cleanup and not self.failover and
8092 i_be[constants.BE_ALWAYS_FAILOVER]):
8093 self.lu.LogInfo("Instance configured to always failover; fallback"
8095 self.failover = True
8097 # check bridge existance
8098 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
8100 if not self.cleanup:
8101 _CheckNodeNotDrained(self.lu, target_node)
8102 if not self.failover:
8103 result = self.rpc.call_instance_migratable(instance.primary_node,
8105 if result.fail_msg and self.fallback:
8106 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
8108 self.failover = True
8110 result.Raise("Can't migrate, please use failover",
8111 prereq=True, ecode=errors.ECODE_STATE)
8113 assert not (self.failover and self.cleanup)
8115 if not self.failover:
8116 if self.lu.op.live is not None and self.lu.op.mode is not None:
8117 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
8118 " parameters are accepted",
8120 if self.lu.op.live is not None:
8122 self.lu.op.mode = constants.HT_MIGRATION_LIVE
8124 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
8125 # reset the 'live' parameter to None so that repeated
8126 # invocations of CheckPrereq do not raise an exception
8127 self.lu.op.live = None
8128 elif self.lu.op.mode is None:
8129 # read the default value from the hypervisor
8130 i_hv = cluster.FillHV(self.instance, skip_globals=False)
8131 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
8133 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
8135 # Failover is never live
8138 if not (self.failover or self.cleanup):
8139 remote_info = self.rpc.call_instance_info(instance.primary_node,
8141 instance.hypervisor)
8142 remote_info.Raise("Error checking instance on node %s" %
8143 instance.primary_node)
8144 instance_running = bool(remote_info.payload)
8145 if instance_running:
8146 self.current_mem = int(remote_info.payload["memory"])
8148 def _RunAllocator(self):
8149 """Run the allocator based on input opcode.
8152 # FIXME: add a self.ignore_ipolicy option
8153 ial = IAllocator(self.cfg, self.rpc,
8154 mode=constants.IALLOCATOR_MODE_RELOC,
8155 name=self.instance_name,
8156 relocate_from=[self.instance.primary_node],
8159 ial.Run(self.lu.op.iallocator)
8162 raise errors.OpPrereqError("Can't compute nodes using"
8163 " iallocator '%s': %s" %
8164 (self.lu.op.iallocator, ial.info),
8166 if len(ial.result) != ial.required_nodes:
8167 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8168 " of nodes (%s), required %s" %
8169 (self.lu.op.iallocator, len(ial.result),
8170 ial.required_nodes), errors.ECODE_FAULT)
8171 self.target_node = ial.result[0]
8172 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8173 self.instance_name, self.lu.op.iallocator,
8174 utils.CommaJoin(ial.result))
8176 def _WaitUntilSync(self):
8177 """Poll with custom rpc for disk sync.
8179 This uses our own step-based rpc call.
8182 self.feedback_fn("* wait until resync is done")
8186 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8188 (self.instance.disks,
8191 for node, nres in result.items():
8192 nres.Raise("Cannot resync disks on node %s" % node)
8193 node_done, node_percent = nres.payload
8194 all_done = all_done and node_done
8195 if node_percent is not None:
8196 min_percent = min(min_percent, node_percent)
8198 if min_percent < 100:
8199 self.feedback_fn(" - progress: %.1f%%" % min_percent)
8202 def _EnsureSecondary(self, node):
8203 """Demote a node to secondary.
8206 self.feedback_fn("* switching node %s to secondary mode" % node)
8208 for dev in self.instance.disks:
8209 self.cfg.SetDiskID(dev, node)
8211 result = self.rpc.call_blockdev_close(node, self.instance.name,
8212 self.instance.disks)
8213 result.Raise("Cannot change disk to secondary on node %s" % node)
8215 def _GoStandalone(self):
8216 """Disconnect from the network.
8219 self.feedback_fn("* changing into standalone mode")
8220 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
8221 self.instance.disks)
8222 for node, nres in result.items():
8223 nres.Raise("Cannot disconnect disks node %s" % node)
8225 def _GoReconnect(self, multimaster):
8226 """Reconnect to the network.
8232 msg = "single-master"
8233 self.feedback_fn("* changing disks into %s mode" % msg)
8234 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8235 (self.instance.disks, self.instance),
8236 self.instance.name, multimaster)
8237 for node, nres in result.items():
8238 nres.Raise("Cannot change disks config on node %s" % node)
8240 def _ExecCleanup(self):
8241 """Try to cleanup after a failed migration.
8243 The cleanup is done by:
8244 - check that the instance is running only on one node
8245 (and update the config if needed)
8246 - change disks on its secondary node to secondary
8247 - wait until disks are fully synchronized
8248 - disconnect from the network
8249 - change disks into single-master mode
8250 - wait again until disks are fully synchronized
8253 instance = self.instance
8254 target_node = self.target_node
8255 source_node = self.source_node
8257 # check running on only one node
8258 self.feedback_fn("* checking where the instance actually runs"
8259 " (if this hangs, the hypervisor might be in"
8261 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
8262 for node, result in ins_l.items():
8263 result.Raise("Can't contact node %s" % node)
8265 runningon_source = instance.name in ins_l[source_node].payload
8266 runningon_target = instance.name in ins_l[target_node].payload
8268 if runningon_source and runningon_target:
8269 raise errors.OpExecError("Instance seems to be running on two nodes,"
8270 " or the hypervisor is confused; you will have"
8271 " to ensure manually that it runs only on one"
8272 " and restart this operation")
8274 if not (runningon_source or runningon_target):
8275 raise errors.OpExecError("Instance does not seem to be running at all;"
8276 " in this case it's safer to repair by"
8277 " running 'gnt-instance stop' to ensure disk"
8278 " shutdown, and then restarting it")
8280 if runningon_target:
8281 # the migration has actually succeeded, we need to update the config
8282 self.feedback_fn("* instance running on secondary node (%s),"
8283 " updating config" % target_node)
8284 instance.primary_node = target_node
8285 self.cfg.Update(instance, self.feedback_fn)
8286 demoted_node = source_node
8288 self.feedback_fn("* instance confirmed to be running on its"
8289 " primary node (%s)" % source_node)
8290 demoted_node = target_node
8292 if instance.disk_template in constants.DTS_INT_MIRROR:
8293 self._EnsureSecondary(demoted_node)
8295 self._WaitUntilSync()
8296 except errors.OpExecError:
8297 # we ignore here errors, since if the device is standalone, it
8298 # won't be able to sync
8300 self._GoStandalone()
8301 self._GoReconnect(False)
8302 self._WaitUntilSync()
8304 self.feedback_fn("* done")
8306 def _RevertDiskStatus(self):
8307 """Try to revert the disk status after a failed migration.
8310 target_node = self.target_node
8311 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
8315 self._EnsureSecondary(target_node)
8316 self._GoStandalone()
8317 self._GoReconnect(False)
8318 self._WaitUntilSync()
8319 except errors.OpExecError, err:
8320 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
8321 " please try to recover the instance manually;"
8322 " error '%s'" % str(err))
8324 def _AbortMigration(self):
8325 """Call the hypervisor code to abort a started migration.
8328 instance = self.instance
8329 target_node = self.target_node
8330 source_node = self.source_node
8331 migration_info = self.migration_info
8333 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
8337 abort_msg = abort_result.fail_msg
8339 logging.error("Aborting migration failed on target node %s: %s",
8340 target_node, abort_msg)
8341 # Don't raise an exception here, as we stil have to try to revert the
8342 # disk status, even if this step failed.
8344 abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
8345 instance, False, self.live)
8346 abort_msg = abort_result.fail_msg
8348 logging.error("Aborting migration failed on source node %s: %s",
8349 source_node, abort_msg)
8351 def _ExecMigration(self):
8352 """Migrate an instance.
8354 The migrate is done by:
8355 - change the disks into dual-master mode
8356 - wait until disks are fully synchronized again
8357 - migrate the instance
8358 - change disks on the new secondary node (the old primary) to secondary
8359 - wait until disks are fully synchronized
8360 - change disks into single-master mode
8363 instance = self.instance
8364 target_node = self.target_node
8365 source_node = self.source_node
8367 # Check for hypervisor version mismatch and warn the user.
8368 nodeinfo = self.rpc.call_node_info([source_node, target_node],
8369 None, [self.instance.hypervisor])
8370 for ninfo in nodeinfo.values():
8371 ninfo.Raise("Unable to retrieve node information from node '%s'" %
8373 (_, _, (src_info, )) = nodeinfo[source_node].payload
8374 (_, _, (dst_info, )) = nodeinfo[target_node].payload
8376 if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
8377 (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
8378 src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
8379 dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
8380 if src_version != dst_version:
8381 self.feedback_fn("* warning: hypervisor version mismatch between"
8382 " source (%s) and target (%s) node" %
8383 (src_version, dst_version))
8385 self.feedback_fn("* checking disk consistency between source and target")
8386 for (idx, dev) in enumerate(instance.disks):
8387 if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8388 raise errors.OpExecError("Disk %s is degraded or not fully"
8389 " synchronized on target node,"
8390 " aborting migration" % idx)
8392 if self.current_mem > self.tgt_free_mem:
8393 if not self.allow_runtime_changes:
8394 raise errors.OpExecError("Memory ballooning not allowed and not enough"
8395 " free memory to fit instance %s on target"
8396 " node %s (have %dMB, need %dMB)" %
8397 (instance.name, target_node,
8398 self.tgt_free_mem, self.current_mem))
8399 self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
8400 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
8403 rpcres.Raise("Cannot modify instance runtime memory")
8405 # First get the migration information from the remote node
8406 result = self.rpc.call_migration_info(source_node, instance)
8407 msg = result.fail_msg
8409 log_err = ("Failed fetching source migration information from %s: %s" %
8411 logging.error(log_err)
8412 raise errors.OpExecError(log_err)
8414 self.migration_info = migration_info = result.payload
8416 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8417 # Then switch the disks to master/master mode
8418 self._EnsureSecondary(target_node)
8419 self._GoStandalone()
8420 self._GoReconnect(True)
8421 self._WaitUntilSync()
8423 self.feedback_fn("* preparing %s to accept the instance" % target_node)
8424 result = self.rpc.call_accept_instance(target_node,
8427 self.nodes_ip[target_node])
8429 msg = result.fail_msg
8431 logging.error("Instance pre-migration failed, trying to revert"
8432 " disk status: %s", msg)
8433 self.feedback_fn("Pre-migration failed, aborting")
8434 self._AbortMigration()
8435 self._RevertDiskStatus()
8436 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
8437 (instance.name, msg))
8439 self.feedback_fn("* migrating instance to %s" % target_node)
8440 result = self.rpc.call_instance_migrate(source_node, instance,
8441 self.nodes_ip[target_node],
8443 msg = result.fail_msg
8445 logging.error("Instance migration failed, trying to revert"
8446 " disk status: %s", msg)
8447 self.feedback_fn("Migration failed, aborting")
8448 self._AbortMigration()
8449 self._RevertDiskStatus()
8450 raise errors.OpExecError("Could not migrate instance %s: %s" %
8451 (instance.name, msg))
8453 self.feedback_fn("* starting memory transfer")
8454 last_feedback = time.time()
8456 result = self.rpc.call_instance_get_migration_status(source_node,
8458 msg = result.fail_msg
8459 ms = result.payload # MigrationStatus instance
8460 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
8461 logging.error("Instance migration failed, trying to revert"
8462 " disk status: %s", msg)
8463 self.feedback_fn("Migration failed, aborting")
8464 self._AbortMigration()
8465 self._RevertDiskStatus()
8467 msg = "hypervisor returned failure"
8468 raise errors.OpExecError("Could not migrate instance %s: %s" %
8469 (instance.name, msg))
8471 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
8472 self.feedback_fn("* memory transfer complete")
8475 if (utils.TimeoutExpired(last_feedback,
8476 self._MIGRATION_FEEDBACK_INTERVAL) and
8477 ms.transferred_ram is not None):
8478 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
8479 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
8480 last_feedback = time.time()
8482 time.sleep(self._MIGRATION_POLL_INTERVAL)
8484 result = self.rpc.call_instance_finalize_migration_src(source_node,
8488 msg = result.fail_msg
8490 logging.error("Instance migration succeeded, but finalization failed"
8491 " on the source node: %s", msg)
8492 raise errors.OpExecError("Could not finalize instance migration: %s" %
8495 instance.primary_node = target_node
8497 # distribute new instance config to the other nodes
8498 self.cfg.Update(instance, self.feedback_fn)
8500 result = self.rpc.call_instance_finalize_migration_dst(target_node,
8504 msg = result.fail_msg
8506 logging.error("Instance migration succeeded, but finalization failed"
8507 " on the target node: %s", msg)
8508 raise errors.OpExecError("Could not finalize instance migration: %s" %
8511 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8512 self._EnsureSecondary(source_node)
8513 self._WaitUntilSync()
8514 self._GoStandalone()
8515 self._GoReconnect(False)
8516 self._WaitUntilSync()
8518 # If the instance's disk template is `rbd' and there was a successful
8519 # migration, unmap the device from the source node.
8520 if self.instance.disk_template == constants.DT_RBD:
8521 disks = _ExpandCheckDisks(instance, instance.disks)
8522 self.feedback_fn("* unmapping instance's disks from %s" % source_node)
8524 result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
8525 msg = result.fail_msg
8527 logging.error("Migration was successful, but couldn't unmap the"
8528 " block device %s on source node %s: %s",
8529 disk.iv_name, source_node, msg)
8530 logging.error("You need to unmap the device %s manually on %s",
8531 disk.iv_name, source_node)
8533 self.feedback_fn("* done")
8535 def _ExecFailover(self):
8536 """Failover an instance.
8538 The failover is done by shutting it down on its present node and
8539 starting it on the secondary.
8542 instance = self.instance
8543 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
8545 source_node = instance.primary_node
8546 target_node = self.target_node
8548 if instance.admin_state == constants.ADMINST_UP:
8549 self.feedback_fn("* checking disk consistency between source and target")
8550 for (idx, dev) in enumerate(instance.disks):
8551 # for drbd, these are drbd over lvm
8552 if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8554 if primary_node.offline:
8555 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8557 (primary_node.name, idx, target_node))
8558 elif not self.ignore_consistency:
8559 raise errors.OpExecError("Disk %s is degraded on target node,"
8560 " aborting failover" % idx)
8562 self.feedback_fn("* not checking disk consistency as instance is not"
8565 self.feedback_fn("* shutting down instance on source node")
8566 logging.info("Shutting down instance %s on node %s",
8567 instance.name, source_node)
8569 result = self.rpc.call_instance_shutdown(source_node, instance,
8570 self.shutdown_timeout)
8571 msg = result.fail_msg
8573 if self.ignore_consistency or primary_node.offline:
8574 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
8575 " proceeding anyway; please make sure node"
8576 " %s is down; error details: %s",
8577 instance.name, source_node, source_node, msg)
8579 raise errors.OpExecError("Could not shutdown instance %s on"
8581 (instance.name, source_node, msg))
8583 self.feedback_fn("* deactivating the instance's disks on source node")
8584 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
8585 raise errors.OpExecError("Can't shut down the instance's disks")
8587 instance.primary_node = target_node
8588 # distribute new instance config to the other nodes
8589 self.cfg.Update(instance, self.feedback_fn)
8591 # Only start the instance if it's marked as up
8592 if instance.admin_state == constants.ADMINST_UP:
8593 self.feedback_fn("* activating the instance's disks on target node %s" %
8595 logging.info("Starting instance %s on node %s",
8596 instance.name, target_node)
8598 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
8599 ignore_secondaries=True)
8601 _ShutdownInstanceDisks(self.lu, instance)
8602 raise errors.OpExecError("Can't activate the instance's disks")
8604 self.feedback_fn("* starting the instance on the target node %s" %
8606 result = self.rpc.call_instance_start(target_node, (instance, None, None),
8608 msg = result.fail_msg
8610 _ShutdownInstanceDisks(self.lu, instance)
8611 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8612 (instance.name, target_node, msg))
8614 def Exec(self, feedback_fn):
8615 """Perform the migration.
8618 self.feedback_fn = feedback_fn
8619 self.source_node = self.instance.primary_node
8621 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
8622 if self.instance.disk_template in constants.DTS_INT_MIRROR:
8623 self.target_node = self.instance.secondary_nodes[0]
8624 # Otherwise self.target_node has been populated either
8625 # directly, or through an iallocator.
8627 self.all_nodes = [self.source_node, self.target_node]
8628 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
8629 in self.cfg.GetMultiNodeInfo(self.all_nodes))
8632 feedback_fn("Failover instance %s" % self.instance.name)
8633 self._ExecFailover()
8635 feedback_fn("Migrating instance %s" % self.instance.name)
8638 return self._ExecCleanup()
8640 return self._ExecMigration()
8643 def _CreateBlockDev(lu, node, instance, device, force_create, info,
8645 """Wrapper around L{_CreateBlockDevInner}.
8647 This method annotates the root device first.
8650 (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
8651 return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
8655 def _CreateBlockDevInner(lu, node, instance, device, force_create,
8657 """Create a tree of block devices on a given node.
8659 If this device type has to be created on secondaries, create it and
8662 If not, just recurse to children keeping the same 'force' value.
8664 @attention: The device has to be annotated already.
8666 @param lu: the lu on whose behalf we execute
8667 @param node: the node on which to create the device
8668 @type instance: L{objects.Instance}
8669 @param instance: the instance which owns the device
8670 @type device: L{objects.Disk}
8671 @param device: the device to create
8672 @type force_create: boolean
8673 @param force_create: whether to force creation of this device; this
8674 will be change to True whenever we find a device which has
8675 CreateOnSecondary() attribute
8676 @param info: the extra 'metadata' we should attach to the device
8677 (this will be represented as a LVM tag)
8678 @type force_open: boolean
8679 @param force_open: this parameter will be passes to the
8680 L{backend.BlockdevCreate} function where it specifies
8681 whether we run on primary or not, and it affects both
8682 the child assembly and the device own Open() execution
8685 if device.CreateOnSecondary():
8689 for child in device.children:
8690 _CreateBlockDevInner(lu, node, instance, child, force_create,
8693 if not force_create:
8696 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
8699 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
8700 """Create a single block device on a given node.
8702 This will not recurse over children of the device, so they must be
8705 @param lu: the lu on whose behalf we execute
8706 @param node: the node on which to create the device
8707 @type instance: L{objects.Instance}
8708 @param instance: the instance which owns the device
8709 @type device: L{objects.Disk}
8710 @param device: the device to create
8711 @param info: the extra 'metadata' we should attach to the device
8712 (this will be represented as a LVM tag)
8713 @type force_open: boolean
8714 @param force_open: this parameter will be passes to the
8715 L{backend.BlockdevCreate} function where it specifies
8716 whether we run on primary or not, and it affects both
8717 the child assembly and the device own Open() execution
8720 lu.cfg.SetDiskID(device, node)
8721 result = lu.rpc.call_blockdev_create(node, device, device.size,
8722 instance.name, force_open, info)
8723 result.Raise("Can't create block device %s on"
8724 " node %s for instance %s" % (device, node, instance.name))
8725 if device.physical_id is None:
8726 device.physical_id = result.payload
8729 def _GenerateUniqueNames(lu, exts):
8730 """Generate a suitable LV name.
8732 This will generate a logical volume name for the given instance.
8737 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
8738 results.append("%s%s" % (new_id, val))
8742 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
8743 iv_name, p_minor, s_minor):
8744 """Generate a drbd8 device complete with its children.
8747 assert len(vgnames) == len(names) == 2
8748 port = lu.cfg.AllocatePort()
8749 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
8751 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
8752 logical_id=(vgnames[0], names[0]),
8754 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
8755 logical_id=(vgnames[1], names[1]),
8757 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
8758 logical_id=(primary, secondary, port,
8761 children=[dev_data, dev_meta],
8762 iv_name=iv_name, params={})
8766 _DISK_TEMPLATE_NAME_PREFIX = {
8767 constants.DT_PLAIN: "",
8768 constants.DT_RBD: ".rbd",
8772 _DISK_TEMPLATE_DEVICE_TYPE = {
8773 constants.DT_PLAIN: constants.LD_LV,
8774 constants.DT_FILE: constants.LD_FILE,
8775 constants.DT_SHARED_FILE: constants.LD_FILE,
8776 constants.DT_BLOCK: constants.LD_BLOCKDEV,
8777 constants.DT_RBD: constants.LD_RBD,
8781 def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
8782 secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
8783 feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8784 _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8785 """Generate the entire disk layout for a given template type.
8788 #TODO: compute space requirements
8790 vgname = lu.cfg.GetVGName()
8791 disk_count = len(disk_info)
8794 if template_name == constants.DT_DISKLESS:
8796 elif template_name == constants.DT_DRBD8:
8797 if len(secondary_nodes) != 1:
8798 raise errors.ProgrammerError("Wrong template configuration")
8799 remote_node = secondary_nodes[0]
8800 minors = lu.cfg.AllocateDRBDMinor(
8801 [primary_node, remote_node] * len(disk_info), instance_name)
8803 (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
8805 drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
8808 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
8809 for i in range(disk_count)]):
8810 names.append(lv_prefix + "_data")
8811 names.append(lv_prefix + "_meta")
8812 for idx, disk in enumerate(disk_info):
8813 disk_index = idx + base_index
8814 data_vg = disk.get(constants.IDISK_VG, vgname)
8815 meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
8816 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
8817 disk[constants.IDISK_SIZE],
8819 names[idx * 2:idx * 2 + 2],
8820 "disk/%d" % disk_index,
8821 minors[idx * 2], minors[idx * 2 + 1])
8822 disk_dev.mode = disk[constants.IDISK_MODE]
8823 disks.append(disk_dev)
8826 raise errors.ProgrammerError("Wrong template configuration")
8828 if template_name == constants.DT_FILE:
8830 elif template_name == constants.DT_SHARED_FILE:
8831 _req_shr_file_storage()
8833 name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
8834 if name_prefix is None:
8837 names = _GenerateUniqueNames(lu, ["%s.disk%s" %
8838 (name_prefix, base_index + i)
8839 for i in range(disk_count)])
8841 if template_name == constants.DT_PLAIN:
8842 def logical_id_fn(idx, _, disk):
8843 vg = disk.get(constants.IDISK_VG, vgname)
8844 return (vg, names[idx])
8845 elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
8847 lambda _, disk_index, disk: (file_driver,
8848 "%s/disk%d" % (file_storage_dir,
8850 elif template_name == constants.DT_BLOCK:
8852 lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
8853 disk[constants.IDISK_ADOPT])
8854 elif template_name == constants.DT_RBD:
8855 logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
8857 raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
8859 dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
8861 for idx, disk in enumerate(disk_info):
8862 disk_index = idx + base_index
8863 size = disk[constants.IDISK_SIZE]
8864 feedback_fn("* disk %s, size %s" %
8865 (disk_index, utils.FormatUnit(size, "h")))
8866 disks.append(objects.Disk(dev_type=dev_type, size=size,
8867 logical_id=logical_id_fn(idx, disk_index, disk),
8868 iv_name="disk/%d" % disk_index,
8869 mode=disk[constants.IDISK_MODE],
8875 def _GetInstanceInfoText(instance):
8876 """Compute that text that should be added to the disk's metadata.
8879 return "originstname+%s" % instance.name
8882 def _CalcEta(time_taken, written, total_size):
8883 """Calculates the ETA based on size written and total size.
8885 @param time_taken: The time taken so far
8886 @param written: amount written so far
8887 @param total_size: The total size of data to be written
8888 @return: The remaining time in seconds
8891 avg_time = time_taken / float(written)
8892 return (total_size - written) * avg_time
8895 def _WipeDisks(lu, instance):
8896 """Wipes instance disks.
8898 @type lu: L{LogicalUnit}
8899 @param lu: the logical unit on whose behalf we execute
8900 @type instance: L{objects.Instance}
8901 @param instance: the instance whose disks we should create
8902 @return: the success of the wipe
8905 node = instance.primary_node
8907 for device in instance.disks:
8908 lu.cfg.SetDiskID(device, node)
8910 logging.info("Pause sync of instance %s disks", instance.name)
8911 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8912 (instance.disks, instance),
8914 result.Raise("Failed RPC to node %s for pausing the disk syncing" % node)
8916 for idx, success in enumerate(result.payload):
8918 logging.warn("pause-sync of instance %s for disks %d failed",
8922 for idx, device in enumerate(instance.disks):
8923 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
8924 # MAX_WIPE_CHUNK at max
8925 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
8926 constants.MIN_WIPE_CHUNK_PERCENT)
8927 # we _must_ make this an int, otherwise rounding errors will
8929 wipe_chunk_size = int(wipe_chunk_size)
8931 lu.LogInfo("* Wiping disk %d", idx)
8932 logging.info("Wiping disk %d for instance %s, node %s using"
8933 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
8938 start_time = time.time()
8940 while offset < size:
8941 wipe_size = min(wipe_chunk_size, size - offset)
8942 logging.debug("Wiping disk %d, offset %s, chunk %s",
8943 idx, offset, wipe_size)
8944 result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
8946 result.Raise("Could not wipe disk %d at offset %d for size %d" %
8947 (idx, offset, wipe_size))
8950 if now - last_output >= 60:
8951 eta = _CalcEta(now - start_time, offset, size)
8952 lu.LogInfo(" - done: %.1f%% ETA: %s" %
8953 (offset / float(size) * 100, utils.FormatSeconds(eta)))
8956 logging.info("Resume sync of instance %s disks", instance.name)
8958 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8959 (instance.disks, instance),
8963 lu.LogWarning("RPC call to %s for resuming disk syncing failed,"
8964 " please have a look at the status and troubleshoot"
8965 " the issue: %s", node, result.fail_msg)
8967 for idx, success in enumerate(result.payload):
8969 lu.LogWarning("Resume sync of disk %d failed, please have a"
8970 " look at the status and troubleshoot the issue", idx)
8971 logging.warn("resume-sync of instance %s for disks %d failed",
8975 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
8976 """Create all disks for an instance.
8978 This abstracts away some work from AddInstance.
8980 @type lu: L{LogicalUnit}
8981 @param lu: the logical unit on whose behalf we execute
8982 @type instance: L{objects.Instance}
8983 @param instance: the instance whose disks we should create
8985 @param to_skip: list of indices to skip
8986 @type target_node: string
8987 @param target_node: if passed, overrides the target node for creation
8989 @return: the success of the creation
8992 info = _GetInstanceInfoText(instance)
8993 if target_node is None:
8994 pnode = instance.primary_node
8995 all_nodes = instance.all_nodes
9000 if instance.disk_template in constants.DTS_FILEBASED:
9001 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9002 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
9004 result.Raise("Failed to create directory '%s' on"
9005 " node %s" % (file_storage_dir, pnode))
9007 # Note: this needs to be kept in sync with adding of disks in
9008 # LUInstanceSetParams
9009 for idx, device in enumerate(instance.disks):
9010 if to_skip and idx in to_skip:
9012 logging.info("Creating disk %s for instance '%s'", idx, instance.name)
9014 for node in all_nodes:
9015 f_create = node == pnode
9016 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
9019 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
9020 """Remove all disks for an instance.
9022 This abstracts away some work from `AddInstance()` and
9023 `RemoveInstance()`. Note that in case some of the devices couldn't
9024 be removed, the removal will continue with the other ones (compare
9025 with `_CreateDisks()`).
9027 @type lu: L{LogicalUnit}
9028 @param lu: the logical unit on whose behalf we execute
9029 @type instance: L{objects.Instance}
9030 @param instance: the instance whose disks we should remove
9031 @type target_node: string
9032 @param target_node: used to override the node on which to remove the disks
9034 @return: the success of the removal
9037 logging.info("Removing block devices for instance %s", instance.name)
9040 ports_to_release = set()
9041 anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
9042 for (idx, device) in enumerate(anno_disks):
9044 edata = [(target_node, device)]
9046 edata = device.ComputeNodeTree(instance.primary_node)
9047 for node, disk in edata:
9048 lu.cfg.SetDiskID(disk, node)
9049 result = lu.rpc.call_blockdev_remove(node, disk)
9051 lu.LogWarning("Could not remove disk %s on node %s,"
9052 " continuing anyway: %s", idx, node, result.fail_msg)
9053 if not (result.offline and node != instance.primary_node):
9056 # if this is a DRBD disk, return its port to the pool
9057 if device.dev_type in constants.LDS_DRBD:
9058 ports_to_release.add(device.logical_id[2])
9060 if all_result or ignore_failures:
9061 for port in ports_to_release:
9062 lu.cfg.AddTcpUdpPort(port)
9064 if instance.disk_template in constants.DTS_FILEBASED:
9065 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9069 tgt = instance.primary_node
9070 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
9072 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
9073 file_storage_dir, instance.primary_node, result.fail_msg)
9079 def _ComputeDiskSizePerVG(disk_template, disks):
9080 """Compute disk size requirements in the volume group
9083 def _compute(disks, payload):
9084 """Universal algorithm.
9089 vgs[disk[constants.IDISK_VG]] = \
9090 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
9094 # Required free disk space as a function of disk and swap space
9096 constants.DT_DISKLESS: {},
9097 constants.DT_PLAIN: _compute(disks, 0),
9098 # 128 MB are added for drbd metadata for each disk
9099 constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
9100 constants.DT_FILE: {},
9101 constants.DT_SHARED_FILE: {},
9104 if disk_template not in req_size_dict:
9105 raise errors.ProgrammerError("Disk template '%s' size requirement"
9106 " is unknown" % disk_template)
9108 return req_size_dict[disk_template]
9111 def _ComputeDiskSize(disk_template, disks):
9112 """Compute disk size requirements according to disk template
9115 # Required free disk space as a function of disk and swap space
9117 constants.DT_DISKLESS: None,
9118 constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
9119 # 128 MB are added for drbd metadata for each disk
9121 sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
9122 constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
9123 constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
9124 constants.DT_BLOCK: 0,
9125 constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
9128 if disk_template not in req_size_dict:
9129 raise errors.ProgrammerError("Disk template '%s' size requirement"
9130 " is unknown" % disk_template)
9132 return req_size_dict[disk_template]
9135 def _FilterVmNodes(lu, nodenames):
9136 """Filters out non-vm_capable nodes from a list.
9138 @type lu: L{LogicalUnit}
9139 @param lu: the logical unit for which we check
9140 @type nodenames: list
9141 @param nodenames: the list of nodes on which we should check
9143 @return: the list of vm-capable nodes
9146 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
9147 return [name for name in nodenames if name not in vm_nodes]
9150 def _CheckHVParams(lu, nodenames, hvname, hvparams):
9151 """Hypervisor parameter validation.
9153 This function abstract the hypervisor parameter validation to be
9154 used in both instance create and instance modify.
9156 @type lu: L{LogicalUnit}
9157 @param lu: the logical unit for which we check
9158 @type nodenames: list
9159 @param nodenames: the list of nodes on which we should check
9160 @type hvname: string
9161 @param hvname: the name of the hypervisor we should use
9162 @type hvparams: dict
9163 @param hvparams: the parameters which we need to check
9164 @raise errors.OpPrereqError: if the parameters are not valid
9167 nodenames = _FilterVmNodes(lu, nodenames)
9169 cluster = lu.cfg.GetClusterInfo()
9170 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
9172 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
9173 for node in nodenames:
9177 info.Raise("Hypervisor parameter validation failed on node %s" % node)
9180 def _CheckOSParams(lu, required, nodenames, osname, osparams):
9181 """OS parameters validation.
9183 @type lu: L{LogicalUnit}
9184 @param lu: the logical unit for which we check
9185 @type required: boolean
9186 @param required: whether the validation should fail if the OS is not
9188 @type nodenames: list
9189 @param nodenames: the list of nodes on which we should check
9190 @type osname: string
9191 @param osname: the name of the hypervisor we should use
9192 @type osparams: dict
9193 @param osparams: the parameters which we need to check
9194 @raise errors.OpPrereqError: if the parameters are not valid
9197 nodenames = _FilterVmNodes(lu, nodenames)
9198 result = lu.rpc.call_os_validate(nodenames, required, osname,
9199 [constants.OS_VALIDATE_PARAMETERS],
9201 for node, nres in result.items():
9202 # we don't check for offline cases since this should be run only
9203 # against the master node and/or an instance's nodes
9204 nres.Raise("OS Parameters validation failed on node %s" % node)
9205 if not nres.payload:
9206 lu.LogInfo("OS %s not found on node %s, validation skipped",
9210 class LUInstanceCreate(LogicalUnit):
9211 """Create an instance.
9214 HPATH = "instance-add"
9215 HTYPE = constants.HTYPE_INSTANCE
9218 def CheckArguments(self):
9222 # do not require name_check to ease forward/backward compatibility
9224 if self.op.no_install and self.op.start:
9225 self.LogInfo("No-installation mode selected, disabling startup")
9226 self.op.start = False
9227 # validate/normalize the instance name
9228 self.op.instance_name = \
9229 netutils.Hostname.GetNormalizedName(self.op.instance_name)
9231 if self.op.ip_check and not self.op.name_check:
9232 # TODO: make the ip check more flexible and not depend on the name check
9233 raise errors.OpPrereqError("Cannot do IP address check without a name"
9234 " check", errors.ECODE_INVAL)
9236 # check nics' parameter names
9237 for nic in self.op.nics:
9238 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
9240 # check disks. parameter names and consistent adopt/no-adopt strategy
9241 has_adopt = has_no_adopt = False
9242 for disk in self.op.disks:
9243 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
9244 if constants.IDISK_ADOPT in disk:
9248 if has_adopt and has_no_adopt:
9249 raise errors.OpPrereqError("Either all disks are adopted or none is",
9252 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
9253 raise errors.OpPrereqError("Disk adoption is not supported for the"
9254 " '%s' disk template" %
9255 self.op.disk_template,
9257 if self.op.iallocator is not None:
9258 raise errors.OpPrereqError("Disk adoption not allowed with an"
9259 " iallocator script", errors.ECODE_INVAL)
9260 if self.op.mode == constants.INSTANCE_IMPORT:
9261 raise errors.OpPrereqError("Disk adoption not allowed for"
9262 " instance import", errors.ECODE_INVAL)
9264 if self.op.disk_template in constants.DTS_MUST_ADOPT:
9265 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
9266 " but no 'adopt' parameter given" %
9267 self.op.disk_template,
9270 self.adopt_disks = has_adopt
9272 # instance name verification
9273 if self.op.name_check:
9274 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
9275 self.op.instance_name = self.hostname1.name
9276 # used in CheckPrereq for ip ping check
9277 self.check_ip = self.hostname1.ip
9279 self.check_ip = None
9281 # file storage checks
9282 if (self.op.file_driver and
9283 not self.op.file_driver in constants.FILE_DRIVER):
9284 raise errors.OpPrereqError("Invalid file driver name '%s'" %
9285 self.op.file_driver, errors.ECODE_INVAL)
9287 if self.op.disk_template == constants.DT_FILE:
9288 opcodes.RequireFileStorage()
9289 elif self.op.disk_template == constants.DT_SHARED_FILE:
9290 opcodes.RequireSharedFileStorage()
9292 ### Node/iallocator related checks
9293 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
9295 if self.op.pnode is not None:
9296 if self.op.disk_template in constants.DTS_INT_MIRROR:
9297 if self.op.snode is None:
9298 raise errors.OpPrereqError("The networked disk templates need"
9299 " a mirror node", errors.ECODE_INVAL)
9301 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
9303 self.op.snode = None
9305 self._cds = _GetClusterDomainSecret()
9307 if self.op.mode == constants.INSTANCE_IMPORT:
9308 # On import force_variant must be True, because if we forced it at
9309 # initial install, our only chance when importing it back is that it
9311 self.op.force_variant = True
9313 if self.op.no_install:
9314 self.LogInfo("No-installation mode has no effect during import")
9316 elif self.op.mode == constants.INSTANCE_CREATE:
9317 if self.op.os_type is None:
9318 raise errors.OpPrereqError("No guest OS specified",
9320 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
9321 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
9322 " installation" % self.op.os_type,
9324 if self.op.disk_template is None:
9325 raise errors.OpPrereqError("No disk template specified",
9328 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9329 # Check handshake to ensure both clusters have the same domain secret
9330 src_handshake = self.op.source_handshake
9331 if not src_handshake:
9332 raise errors.OpPrereqError("Missing source handshake",
9335 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
9338 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
9341 # Load and check source CA
9342 self.source_x509_ca_pem = self.op.source_x509_ca
9343 if not self.source_x509_ca_pem:
9344 raise errors.OpPrereqError("Missing source X509 CA",
9348 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
9350 except OpenSSL.crypto.Error, err:
9351 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
9352 (err, ), errors.ECODE_INVAL)
9354 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9355 if errcode is not None:
9356 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
9359 self.source_x509_ca = cert
9361 src_instance_name = self.op.source_instance_name
9362 if not src_instance_name:
9363 raise errors.OpPrereqError("Missing source instance name",
9366 self.source_instance_name = \
9367 netutils.GetHostname(name=src_instance_name).name
9370 raise errors.OpPrereqError("Invalid instance creation mode %r" %
9371 self.op.mode, errors.ECODE_INVAL)
9373 def ExpandNames(self):
9374 """ExpandNames for CreateInstance.
9376 Figure out the right locks for instance creation.
9379 self.needed_locks = {}
9381 instance_name = self.op.instance_name
9382 # this is just a preventive check, but someone might still add this
9383 # instance in the meantime, and creation will fail at lock-add time
9384 if instance_name in self.cfg.GetInstanceList():
9385 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
9386 instance_name, errors.ECODE_EXISTS)
9388 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
9390 if self.op.iallocator:
9391 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
9392 # specifying a group on instance creation and then selecting nodes from
9394 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9395 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
9397 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
9398 nodelist = [self.op.pnode]
9399 if self.op.snode is not None:
9400 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
9401 nodelist.append(self.op.snode)
9402 self.needed_locks[locking.LEVEL_NODE] = nodelist
9403 # Lock resources of instance's primary and secondary nodes (copy to
9404 # prevent accidential modification)
9405 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
9407 # in case of import lock the source node too
9408 if self.op.mode == constants.INSTANCE_IMPORT:
9409 src_node = self.op.src_node
9410 src_path = self.op.src_path
9412 if src_path is None:
9413 self.op.src_path = src_path = self.op.instance_name
9415 if src_node is None:
9416 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9417 self.op.src_node = None
9418 if os.path.isabs(src_path):
9419 raise errors.OpPrereqError("Importing an instance from a path"
9420 " requires a source node option",
9423 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
9424 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
9425 self.needed_locks[locking.LEVEL_NODE].append(src_node)
9426 if not os.path.isabs(src_path):
9427 self.op.src_path = src_path = \
9428 utils.PathJoin(constants.EXPORT_DIR, src_path)
9430 def _RunAllocator(self):
9431 """Run the allocator based on input opcode.
9434 nics = [n.ToDict() for n in self.nics]
9435 ial = IAllocator(self.cfg, self.rpc,
9436 mode=constants.IALLOCATOR_MODE_ALLOC,
9437 name=self.op.instance_name,
9438 disk_template=self.op.disk_template,
9441 vcpus=self.be_full[constants.BE_VCPUS],
9442 memory=self.be_full[constants.BE_MAXMEM],
9443 spindle_use=self.be_full[constants.BE_SPINDLE_USE],
9446 hypervisor=self.op.hypervisor,
9449 ial.Run(self.op.iallocator)
9452 raise errors.OpPrereqError("Can't compute nodes using"
9453 " iallocator '%s': %s" %
9454 (self.op.iallocator, ial.info),
9456 if len(ial.result) != ial.required_nodes:
9457 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9458 " of nodes (%s), required %s" %
9459 (self.op.iallocator, len(ial.result),
9460 ial.required_nodes), errors.ECODE_FAULT)
9461 self.op.pnode = ial.result[0]
9462 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
9463 self.op.instance_name, self.op.iallocator,
9464 utils.CommaJoin(ial.result))
9465 if ial.required_nodes == 2:
9466 self.op.snode = ial.result[1]
9468 def BuildHooksEnv(self):
9471 This runs on master, primary and secondary nodes of the instance.
9475 "ADD_MODE": self.op.mode,
9477 if self.op.mode == constants.INSTANCE_IMPORT:
9478 env["SRC_NODE"] = self.op.src_node
9479 env["SRC_PATH"] = self.op.src_path
9480 env["SRC_IMAGES"] = self.src_images
9482 env.update(_BuildInstanceHookEnv(
9483 name=self.op.instance_name,
9484 primary_node=self.op.pnode,
9485 secondary_nodes=self.secondaries,
9486 status=self.op.start,
9487 os_type=self.op.os_type,
9488 minmem=self.be_full[constants.BE_MINMEM],
9489 maxmem=self.be_full[constants.BE_MAXMEM],
9490 vcpus=self.be_full[constants.BE_VCPUS],
9491 nics=_NICListToTuple(self, self.nics),
9492 disk_template=self.op.disk_template,
9493 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
9494 for d in self.disks],
9497 hypervisor_name=self.op.hypervisor,
9503 def BuildHooksNodes(self):
9504 """Build hooks nodes.
9507 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
9510 def _ReadExportInfo(self):
9511 """Reads the export information from disk.
9513 It will override the opcode source node and path with the actual
9514 information, if these two were not specified before.
9516 @return: the export information
9519 assert self.op.mode == constants.INSTANCE_IMPORT
9521 src_node = self.op.src_node
9522 src_path = self.op.src_path
9524 if src_node is None:
9525 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
9526 exp_list = self.rpc.call_export_list(locked_nodes)
9528 for node in exp_list:
9529 if exp_list[node].fail_msg:
9531 if src_path in exp_list[node].payload:
9533 self.op.src_node = src_node = node
9534 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
9538 raise errors.OpPrereqError("No export found for relative path %s" %
9539 src_path, errors.ECODE_INVAL)
9541 _CheckNodeOnline(self, src_node)
9542 result = self.rpc.call_export_info(src_node, src_path)
9543 result.Raise("No export or invalid export found in dir %s" % src_path)
9545 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
9546 if not export_info.has_section(constants.INISECT_EXP):
9547 raise errors.ProgrammerError("Corrupted export config",
9548 errors.ECODE_ENVIRON)
9550 ei_version = export_info.get(constants.INISECT_EXP, "version")
9551 if (int(ei_version) != constants.EXPORT_VERSION):
9552 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
9553 (ei_version, constants.EXPORT_VERSION),
9554 errors.ECODE_ENVIRON)
9557 def _ReadExportParams(self, einfo):
9558 """Use export parameters as defaults.
9560 In case the opcode doesn't specify (as in override) some instance
9561 parameters, then try to use them from the export information, if
9565 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
9567 if self.op.disk_template is None:
9568 if einfo.has_option(constants.INISECT_INS, "disk_template"):
9569 self.op.disk_template = einfo.get(constants.INISECT_INS,
9571 if self.op.disk_template not in constants.DISK_TEMPLATES:
9572 raise errors.OpPrereqError("Disk template specified in configuration"
9573 " file is not one of the allowed values:"
9574 " %s" % " ".join(constants.DISK_TEMPLATES))
9576 raise errors.OpPrereqError("No disk template specified and the export"
9577 " is missing the disk_template information",
9580 if not self.op.disks:
9582 # TODO: import the disk iv_name too
9583 for idx in range(constants.MAX_DISKS):
9584 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
9585 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
9586 disks.append({constants.IDISK_SIZE: disk_sz})
9587 self.op.disks = disks
9588 if not disks and self.op.disk_template != constants.DT_DISKLESS:
9589 raise errors.OpPrereqError("No disk info specified and the export"
9590 " is missing the disk information",
9593 if not self.op.nics:
9595 for idx in range(constants.MAX_NICS):
9596 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
9598 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
9599 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
9606 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
9607 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
9609 if (self.op.hypervisor is None and
9610 einfo.has_option(constants.INISECT_INS, "hypervisor")):
9611 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
9613 if einfo.has_section(constants.INISECT_HYP):
9614 # use the export parameters but do not override the ones
9615 # specified by the user
9616 for name, value in einfo.items(constants.INISECT_HYP):
9617 if name not in self.op.hvparams:
9618 self.op.hvparams[name] = value
9620 if einfo.has_section(constants.INISECT_BEP):
9621 # use the parameters, without overriding
9622 for name, value in einfo.items(constants.INISECT_BEP):
9623 if name not in self.op.beparams:
9624 self.op.beparams[name] = value
9625 # Compatibility for the old "memory" be param
9626 if name == constants.BE_MEMORY:
9627 if constants.BE_MAXMEM not in self.op.beparams:
9628 self.op.beparams[constants.BE_MAXMEM] = value
9629 if constants.BE_MINMEM not in self.op.beparams:
9630 self.op.beparams[constants.BE_MINMEM] = value
9632 # try to read the parameters old style, from the main section
9633 for name in constants.BES_PARAMETERS:
9634 if (name not in self.op.beparams and
9635 einfo.has_option(constants.INISECT_INS, name)):
9636 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
9638 if einfo.has_section(constants.INISECT_OSP):
9639 # use the parameters, without overriding
9640 for name, value in einfo.items(constants.INISECT_OSP):
9641 if name not in self.op.osparams:
9642 self.op.osparams[name] = value
9644 def _RevertToDefaults(self, cluster):
9645 """Revert the instance parameters to the default values.
9649 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
9650 for name in self.op.hvparams.keys():
9651 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
9652 del self.op.hvparams[name]
9654 be_defs = cluster.SimpleFillBE({})
9655 for name in self.op.beparams.keys():
9656 if name in be_defs and be_defs[name] == self.op.beparams[name]:
9657 del self.op.beparams[name]
9659 nic_defs = cluster.SimpleFillNIC({})
9660 for nic in self.op.nics:
9661 for name in constants.NICS_PARAMETERS:
9662 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
9665 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
9666 for name in self.op.osparams.keys():
9667 if name in os_defs and os_defs[name] == self.op.osparams[name]:
9668 del self.op.osparams[name]
9670 def _CalculateFileStorageDir(self):
9671 """Calculate final instance file storage dir.
9674 # file storage dir calculation/check
9675 self.instance_file_storage_dir = None
9676 if self.op.disk_template in constants.DTS_FILEBASED:
9677 # build the full file storage dir path
9680 if self.op.disk_template == constants.DT_SHARED_FILE:
9681 get_fsd_fn = self.cfg.GetSharedFileStorageDir
9683 get_fsd_fn = self.cfg.GetFileStorageDir
9685 cfg_storagedir = get_fsd_fn()
9686 if not cfg_storagedir:
9687 raise errors.OpPrereqError("Cluster file storage dir not defined")
9688 joinargs.append(cfg_storagedir)
9690 if self.op.file_storage_dir is not None:
9691 joinargs.append(self.op.file_storage_dir)
9693 joinargs.append(self.op.instance_name)
9695 # pylint: disable=W0142
9696 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
9698 def CheckPrereq(self): # pylint: disable=R0914
9699 """Check prerequisites.
9702 self._CalculateFileStorageDir()
9704 if self.op.mode == constants.INSTANCE_IMPORT:
9705 export_info = self._ReadExportInfo()
9706 self._ReadExportParams(export_info)
9707 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
9709 self._old_instance_name = None
9711 if (not self.cfg.GetVGName() and
9712 self.op.disk_template not in constants.DTS_NOT_LVM):
9713 raise errors.OpPrereqError("Cluster does not support lvm-based"
9714 " instances", errors.ECODE_STATE)
9716 if (self.op.hypervisor is None or
9717 self.op.hypervisor == constants.VALUE_AUTO):
9718 self.op.hypervisor = self.cfg.GetHypervisorType()
9720 cluster = self.cfg.GetClusterInfo()
9721 enabled_hvs = cluster.enabled_hypervisors
9722 if self.op.hypervisor not in enabled_hvs:
9723 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
9724 " cluster (%s)" % (self.op.hypervisor,
9725 ",".join(enabled_hvs)),
9728 # Check tag validity
9729 for tag in self.op.tags:
9730 objects.TaggableObject.ValidateTag(tag)
9732 # check hypervisor parameter syntax (locally)
9733 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
9734 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
9736 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
9737 hv_type.CheckParameterSyntax(filled_hvp)
9738 self.hv_full = filled_hvp
9739 # check that we don't specify global parameters on an instance
9740 _CheckGlobalHvParams(self.op.hvparams)
9742 # fill and remember the beparams dict
9743 default_beparams = cluster.beparams[constants.PP_DEFAULT]
9744 for param, value in self.op.beparams.iteritems():
9745 if value == constants.VALUE_AUTO:
9746 self.op.beparams[param] = default_beparams[param]
9747 objects.UpgradeBeParams(self.op.beparams)
9748 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
9749 self.be_full = cluster.SimpleFillBE(self.op.beparams)
9751 # build os parameters
9752 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
9754 # now that hvp/bep are in final format, let's reset to defaults,
9756 if self.op.identify_defaults:
9757 self._RevertToDefaults(cluster)
9761 for idx, nic in enumerate(self.op.nics):
9762 nic_mode_req = nic.get(constants.INIC_MODE, None)
9763 nic_mode = nic_mode_req
9764 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
9765 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
9767 # in routed mode, for the first nic, the default ip is 'auto'
9768 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
9769 default_ip_mode = constants.VALUE_AUTO
9771 default_ip_mode = constants.VALUE_NONE
9773 # ip validity checks
9774 ip = nic.get(constants.INIC_IP, default_ip_mode)
9775 if ip is None or ip.lower() == constants.VALUE_NONE:
9777 elif ip.lower() == constants.VALUE_AUTO:
9778 if not self.op.name_check:
9779 raise errors.OpPrereqError("IP address set to auto but name checks"
9780 " have been skipped",
9782 nic_ip = self.hostname1.ip
9784 if not netutils.IPAddress.IsValid(ip):
9785 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
9789 # TODO: check the ip address for uniqueness
9790 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
9791 raise errors.OpPrereqError("Routed nic mode requires an ip address",
9794 # MAC address verification
9795 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
9796 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9797 mac = utils.NormalizeAndValidateMac(mac)
9800 self.cfg.ReserveMAC(mac, self.proc.GetECId())
9801 except errors.ReservationError:
9802 raise errors.OpPrereqError("MAC address %s already in use"
9803 " in cluster" % mac,
9804 errors.ECODE_NOTUNIQUE)
9806 # Build nic parameters
9807 link = nic.get(constants.INIC_LINK, None)
9808 if link == constants.VALUE_AUTO:
9809 link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
9812 nicparams[constants.NIC_MODE] = nic_mode
9814 nicparams[constants.NIC_LINK] = link
9816 check_params = cluster.SimpleFillNIC(nicparams)
9817 objects.NIC.CheckParameterSyntax(check_params)
9818 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
9820 # disk checks/pre-build
9821 default_vg = self.cfg.GetVGName()
9823 for disk in self.op.disks:
9824 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
9825 if mode not in constants.DISK_ACCESS_SET:
9826 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
9827 mode, errors.ECODE_INVAL)
9828 size = disk.get(constants.IDISK_SIZE, None)
9830 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
9833 except (TypeError, ValueError):
9834 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
9837 data_vg = disk.get(constants.IDISK_VG, default_vg)
9839 constants.IDISK_SIZE: size,
9840 constants.IDISK_MODE: mode,
9841 constants.IDISK_VG: data_vg,
9843 if constants.IDISK_METAVG in disk:
9844 new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
9845 if constants.IDISK_ADOPT in disk:
9846 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
9847 self.disks.append(new_disk)
9849 if self.op.mode == constants.INSTANCE_IMPORT:
9851 for idx in range(len(self.disks)):
9852 option = "disk%d_dump" % idx
9853 if export_info.has_option(constants.INISECT_INS, option):
9854 # FIXME: are the old os-es, disk sizes, etc. useful?
9855 export_name = export_info.get(constants.INISECT_INS, option)
9856 image = utils.PathJoin(self.op.src_path, export_name)
9857 disk_images.append(image)
9859 disk_images.append(False)
9861 self.src_images = disk_images
9863 if self.op.instance_name == self._old_instance_name:
9864 for idx, nic in enumerate(self.nics):
9865 if nic.mac == constants.VALUE_AUTO:
9866 nic_mac_ini = "nic%d_mac" % idx
9867 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
9869 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
9871 # ip ping checks (we use the same ip that was resolved in ExpandNames)
9872 if self.op.ip_check:
9873 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
9874 raise errors.OpPrereqError("IP %s of instance %s already in use" %
9875 (self.check_ip, self.op.instance_name),
9876 errors.ECODE_NOTUNIQUE)
9878 #### mac address generation
9879 # By generating here the mac address both the allocator and the hooks get
9880 # the real final mac address rather than the 'auto' or 'generate' value.
9881 # There is a race condition between the generation and the instance object
9882 # creation, which means that we know the mac is valid now, but we're not
9883 # sure it will be when we actually add the instance. If things go bad
9884 # adding the instance will abort because of a duplicate mac, and the
9885 # creation job will fail.
9886 for nic in self.nics:
9887 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9888 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
9892 if self.op.iallocator is not None:
9893 self._RunAllocator()
9895 # Release all unneeded node locks
9896 _ReleaseLocks(self, locking.LEVEL_NODE,
9897 keep=filter(None, [self.op.pnode, self.op.snode,
9899 _ReleaseLocks(self, locking.LEVEL_NODE_RES,
9900 keep=filter(None, [self.op.pnode, self.op.snode,
9903 #### node related checks
9905 # check primary node
9906 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
9907 assert self.pnode is not None, \
9908 "Cannot retrieve locked node %s" % self.op.pnode
9910 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
9911 pnode.name, errors.ECODE_STATE)
9913 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
9914 pnode.name, errors.ECODE_STATE)
9915 if not pnode.vm_capable:
9916 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
9917 " '%s'" % pnode.name, errors.ECODE_STATE)
9919 self.secondaries = []
9921 # mirror node verification
9922 if self.op.disk_template in constants.DTS_INT_MIRROR:
9923 if self.op.snode == pnode.name:
9924 raise errors.OpPrereqError("The secondary node cannot be the"
9925 " primary node", errors.ECODE_INVAL)
9926 _CheckNodeOnline(self, self.op.snode)
9927 _CheckNodeNotDrained(self, self.op.snode)
9928 _CheckNodeVmCapable(self, self.op.snode)
9929 self.secondaries.append(self.op.snode)
9931 snode = self.cfg.GetNodeInfo(self.op.snode)
9932 if pnode.group != snode.group:
9933 self.LogWarning("The primary and secondary nodes are in two"
9934 " different node groups; the disk parameters"
9935 " from the first disk's node group will be"
9938 nodenames = [pnode.name] + self.secondaries
9940 if not self.adopt_disks:
9941 if self.op.disk_template == constants.DT_RBD:
9942 # _CheckRADOSFreeSpace() is just a placeholder.
9943 # Any function that checks prerequisites can be placed here.
9944 # Check if there is enough space on the RADOS cluster.
9945 _CheckRADOSFreeSpace()
9947 # Check lv size requirements, if not adopting
9948 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
9949 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
9951 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
9952 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
9953 disk[constants.IDISK_ADOPT])
9954 for disk in self.disks])
9955 if len(all_lvs) != len(self.disks):
9956 raise errors.OpPrereqError("Duplicate volume names given for adoption",
9958 for lv_name in all_lvs:
9960 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
9961 # to ReserveLV uses the same syntax
9962 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
9963 except errors.ReservationError:
9964 raise errors.OpPrereqError("LV named %s used by another instance" %
9965 lv_name, errors.ECODE_NOTUNIQUE)
9967 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
9968 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
9970 node_lvs = self.rpc.call_lv_list([pnode.name],
9971 vg_names.payload.keys())[pnode.name]
9972 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
9973 node_lvs = node_lvs.payload
9975 delta = all_lvs.difference(node_lvs.keys())
9977 raise errors.OpPrereqError("Missing logical volume(s): %s" %
9978 utils.CommaJoin(delta),
9980 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
9982 raise errors.OpPrereqError("Online logical volumes found, cannot"
9983 " adopt: %s" % utils.CommaJoin(online_lvs),
9985 # update the size of disk based on what is found
9986 for dsk in self.disks:
9987 dsk[constants.IDISK_SIZE] = \
9988 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
9989 dsk[constants.IDISK_ADOPT])][0]))
9991 elif self.op.disk_template == constants.DT_BLOCK:
9992 # Normalize and de-duplicate device paths
9993 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
9994 for disk in self.disks])
9995 if len(all_disks) != len(self.disks):
9996 raise errors.OpPrereqError("Duplicate disk names given for adoption",
9998 baddisks = [d for d in all_disks
9999 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
10001 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
10002 " cannot be adopted" %
10003 (", ".join(baddisks),
10004 constants.ADOPTABLE_BLOCKDEV_ROOT),
10005 errors.ECODE_INVAL)
10007 node_disks = self.rpc.call_bdev_sizes([pnode.name],
10008 list(all_disks))[pnode.name]
10009 node_disks.Raise("Cannot get block device information from node %s" %
10011 node_disks = node_disks.payload
10012 delta = all_disks.difference(node_disks.keys())
10014 raise errors.OpPrereqError("Missing block device(s): %s" %
10015 utils.CommaJoin(delta),
10016 errors.ECODE_INVAL)
10017 for dsk in self.disks:
10018 dsk[constants.IDISK_SIZE] = \
10019 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
10021 # Verify instance specs
10022 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
10024 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
10025 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
10026 constants.ISPEC_DISK_COUNT: len(self.disks),
10027 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
10028 for disk in self.disks],
10029 constants.ISPEC_NIC_COUNT: len(self.nics),
10030 constants.ISPEC_SPINDLE_USE: spindle_use,
10033 group_info = self.cfg.GetNodeGroup(pnode.group)
10034 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
10035 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
10036 if not self.op.ignore_ipolicy and res:
10037 raise errors.OpPrereqError(("Instance allocation to group %s violates"
10038 " policy: %s") % (pnode.group,
10039 utils.CommaJoin(res)),
10040 errors.ECODE_INVAL)
10042 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
10044 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
10045 # check OS parameters (remotely)
10046 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
10048 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
10050 # memory check on primary node
10051 #TODO(dynmem): use MINMEM for checking
10053 _CheckNodeFreeMemory(self, self.pnode.name,
10054 "creating instance %s" % self.op.instance_name,
10055 self.be_full[constants.BE_MAXMEM],
10056 self.op.hypervisor)
10058 self.dry_run_result = list(nodenames)
10060 def Exec(self, feedback_fn):
10061 """Create and add the instance to the cluster.
10064 instance = self.op.instance_name
10065 pnode_name = self.pnode.name
10067 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
10068 self.owned_locks(locking.LEVEL_NODE)), \
10069 "Node locks differ from node resource locks"
10071 ht_kind = self.op.hypervisor
10072 if ht_kind in constants.HTS_REQ_PORT:
10073 network_port = self.cfg.AllocatePort()
10075 network_port = None
10077 # This is ugly but we got a chicken-egg problem here
10078 # We can only take the group disk parameters, as the instance
10079 # has no disks yet (we are generating them right here).
10080 node = self.cfg.GetNodeInfo(pnode_name)
10081 nodegroup = self.cfg.GetNodeGroup(node.group)
10082 disks = _GenerateDiskTemplate(self,
10083 self.op.disk_template,
10084 instance, pnode_name,
10087 self.instance_file_storage_dir,
10088 self.op.file_driver,
10091 self.cfg.GetGroupDiskParams(nodegroup))
10093 iobj = objects.Instance(name=instance, os=self.op.os_type,
10094 primary_node=pnode_name,
10095 nics=self.nics, disks=disks,
10096 disk_template=self.op.disk_template,
10097 admin_state=constants.ADMINST_DOWN,
10098 network_port=network_port,
10099 beparams=self.op.beparams,
10100 hvparams=self.op.hvparams,
10101 hypervisor=self.op.hypervisor,
10102 osparams=self.op.osparams,
10106 for tag in self.op.tags:
10109 if self.adopt_disks:
10110 if self.op.disk_template == constants.DT_PLAIN:
10111 # rename LVs to the newly-generated names; we need to construct
10112 # 'fake' LV disks with the old data, plus the new unique_id
10113 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
10115 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
10116 rename_to.append(t_dsk.logical_id)
10117 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
10118 self.cfg.SetDiskID(t_dsk, pnode_name)
10119 result = self.rpc.call_blockdev_rename(pnode_name,
10120 zip(tmp_disks, rename_to))
10121 result.Raise("Failed to rename adoped LVs")
10123 feedback_fn("* creating instance disks...")
10125 _CreateDisks(self, iobj)
10126 except errors.OpExecError:
10127 self.LogWarning("Device creation failed, reverting...")
10129 _RemoveDisks(self, iobj)
10131 self.cfg.ReleaseDRBDMinors(instance)
10134 feedback_fn("adding instance %s to cluster config" % instance)
10136 self.cfg.AddInstance(iobj, self.proc.GetECId())
10138 # Declare that we don't want to remove the instance lock anymore, as we've
10139 # added the instance to the config
10140 del self.remove_locks[locking.LEVEL_INSTANCE]
10142 if self.op.mode == constants.INSTANCE_IMPORT:
10143 # Release unused nodes
10144 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
10146 # Release all nodes
10147 _ReleaseLocks(self, locking.LEVEL_NODE)
10150 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
10151 feedback_fn("* wiping instance disks...")
10153 _WipeDisks(self, iobj)
10154 except errors.OpExecError, err:
10155 logging.exception("Wiping disks failed")
10156 self.LogWarning("Wiping instance disks failed (%s)", err)
10160 # Something is already wrong with the disks, don't do anything else
10162 elif self.op.wait_for_sync:
10163 disk_abort = not _WaitForSync(self, iobj)
10164 elif iobj.disk_template in constants.DTS_INT_MIRROR:
10165 # make sure the disks are not degraded (still sync-ing is ok)
10166 feedback_fn("* checking mirrors status")
10167 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
10172 _RemoveDisks(self, iobj)
10173 self.cfg.RemoveInstance(iobj.name)
10174 # Make sure the instance lock gets removed
10175 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
10176 raise errors.OpExecError("There are some degraded disks for"
10179 # Release all node resource locks
10180 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
10182 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
10183 # we need to set the disks ID to the primary node, since the
10184 # preceding code might or might have not done it, depending on
10185 # disk template and other options
10186 for disk in iobj.disks:
10187 self.cfg.SetDiskID(disk, pnode_name)
10188 if self.op.mode == constants.INSTANCE_CREATE:
10189 if not self.op.no_install:
10190 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
10191 not self.op.wait_for_sync)
10193 feedback_fn("* pausing disk sync to install instance OS")
10194 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10197 for idx, success in enumerate(result.payload):
10199 logging.warn("pause-sync of instance %s for disk %d failed",
10202 feedback_fn("* running the instance OS create scripts...")
10203 # FIXME: pass debug option from opcode to backend
10205 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
10206 self.op.debug_level)
10208 feedback_fn("* resuming disk sync")
10209 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10212 for idx, success in enumerate(result.payload):
10214 logging.warn("resume-sync of instance %s for disk %d failed",
10217 os_add_result.Raise("Could not add os for instance %s"
10218 " on node %s" % (instance, pnode_name))
10221 if self.op.mode == constants.INSTANCE_IMPORT:
10222 feedback_fn("* running the instance OS import scripts...")
10226 for idx, image in enumerate(self.src_images):
10230 # FIXME: pass debug option from opcode to backend
10231 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
10232 constants.IEIO_FILE, (image, ),
10233 constants.IEIO_SCRIPT,
10234 (iobj.disks[idx], idx),
10236 transfers.append(dt)
10239 masterd.instance.TransferInstanceData(self, feedback_fn,
10240 self.op.src_node, pnode_name,
10241 self.pnode.secondary_ip,
10243 if not compat.all(import_result):
10244 self.LogWarning("Some disks for instance %s on node %s were not"
10245 " imported successfully" % (instance, pnode_name))
10247 rename_from = self._old_instance_name
10249 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
10250 feedback_fn("* preparing remote import...")
10251 # The source cluster will stop the instance before attempting to make
10252 # a connection. In some cases stopping an instance can take a long
10253 # time, hence the shutdown timeout is added to the connection
10255 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
10256 self.op.source_shutdown_timeout)
10257 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10259 assert iobj.primary_node == self.pnode.name
10261 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
10262 self.source_x509_ca,
10263 self._cds, timeouts)
10264 if not compat.all(disk_results):
10265 # TODO: Should the instance still be started, even if some disks
10266 # failed to import (valid for local imports, too)?
10267 self.LogWarning("Some disks for instance %s on node %s were not"
10268 " imported successfully" % (instance, pnode_name))
10270 rename_from = self.source_instance_name
10273 # also checked in the prereq part
10274 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
10277 # Run rename script on newly imported instance
10278 assert iobj.name == instance
10279 feedback_fn("Running rename script for %s" % instance)
10280 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
10282 self.op.debug_level)
10283 if result.fail_msg:
10284 self.LogWarning("Failed to run rename script for %s on node"
10285 " %s: %s" % (instance, pnode_name, result.fail_msg))
10287 assert not self.owned_locks(locking.LEVEL_NODE_RES)
10290 iobj.admin_state = constants.ADMINST_UP
10291 self.cfg.Update(iobj, feedback_fn)
10292 logging.info("Starting instance %s on node %s", instance, pnode_name)
10293 feedback_fn("* starting instance...")
10294 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
10296 result.Raise("Could not start instance")
10298 return list(iobj.all_nodes)
10301 def _CheckRADOSFreeSpace():
10302 """Compute disk size requirements inside the RADOS cluster.
10305 # For the RADOS cluster we assume there is always enough space.
10309 class LUInstanceConsole(NoHooksLU):
10310 """Connect to an instance's console.
10312 This is somewhat special in that it returns the command line that
10313 you need to run on the master node in order to connect to the
10319 def ExpandNames(self):
10320 self.share_locks = _ShareAll()
10321 self._ExpandAndLockInstance()
10323 def CheckPrereq(self):
10324 """Check prerequisites.
10326 This checks that the instance is in the cluster.
10329 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10330 assert self.instance is not None, \
10331 "Cannot retrieve locked instance %s" % self.op.instance_name
10332 _CheckNodeOnline(self, self.instance.primary_node)
10334 def Exec(self, feedback_fn):
10335 """Connect to the console of an instance
10338 instance = self.instance
10339 node = instance.primary_node
10341 node_insts = self.rpc.call_instance_list([node],
10342 [instance.hypervisor])[node]
10343 node_insts.Raise("Can't get node information from %s" % node)
10345 if instance.name not in node_insts.payload:
10346 if instance.admin_state == constants.ADMINST_UP:
10347 state = constants.INSTST_ERRORDOWN
10348 elif instance.admin_state == constants.ADMINST_DOWN:
10349 state = constants.INSTST_ADMINDOWN
10351 state = constants.INSTST_ADMINOFFLINE
10352 raise errors.OpExecError("Instance %s is not running (state %s)" %
10353 (instance.name, state))
10355 logging.debug("Connecting to console of %s on %s", instance.name, node)
10357 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
10360 def _GetInstanceConsole(cluster, instance):
10361 """Returns console information for an instance.
10363 @type cluster: L{objects.Cluster}
10364 @type instance: L{objects.Instance}
10368 hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
10369 # beparams and hvparams are passed separately, to avoid editing the
10370 # instance and then saving the defaults in the instance itself.
10371 hvparams = cluster.FillHV(instance)
10372 beparams = cluster.FillBE(instance)
10373 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
10375 assert console.instance == instance.name
10376 assert console.Validate()
10378 return console.ToDict()
10381 class LUInstanceReplaceDisks(LogicalUnit):
10382 """Replace the disks of an instance.
10385 HPATH = "mirrors-replace"
10386 HTYPE = constants.HTYPE_INSTANCE
10389 def CheckArguments(self):
10390 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
10391 self.op.iallocator)
10393 def ExpandNames(self):
10394 self._ExpandAndLockInstance()
10396 assert locking.LEVEL_NODE not in self.needed_locks
10397 assert locking.LEVEL_NODE_RES not in self.needed_locks
10398 assert locking.LEVEL_NODEGROUP not in self.needed_locks
10400 assert self.op.iallocator is None or self.op.remote_node is None, \
10401 "Conflicting options"
10403 if self.op.remote_node is not None:
10404 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10406 # Warning: do not remove the locking of the new secondary here
10407 # unless DRBD8.AddChildren is changed to work in parallel;
10408 # currently it doesn't since parallel invocations of
10409 # FindUnusedMinor will conflict
10410 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
10411 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
10413 self.needed_locks[locking.LEVEL_NODE] = []
10414 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10416 if self.op.iallocator is not None:
10417 # iallocator will select a new node in the same group
10418 self.needed_locks[locking.LEVEL_NODEGROUP] = []
10420 self.needed_locks[locking.LEVEL_NODE_RES] = []
10422 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
10423 self.op.iallocator, self.op.remote_node,
10424 self.op.disks, False, self.op.early_release,
10425 self.op.ignore_ipolicy)
10427 self.tasklets = [self.replacer]
10429 def DeclareLocks(self, level):
10430 if level == locking.LEVEL_NODEGROUP:
10431 assert self.op.remote_node is None
10432 assert self.op.iallocator is not None
10433 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
10435 self.share_locks[locking.LEVEL_NODEGROUP] = 1
10436 # Lock all groups used by instance optimistically; this requires going
10437 # via the node before it's locked, requiring verification later on
10438 self.needed_locks[locking.LEVEL_NODEGROUP] = \
10439 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
10441 elif level == locking.LEVEL_NODE:
10442 if self.op.iallocator is not None:
10443 assert self.op.remote_node is None
10444 assert not self.needed_locks[locking.LEVEL_NODE]
10446 # Lock member nodes of all locked groups
10447 self.needed_locks[locking.LEVEL_NODE] = [node_name
10448 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10449 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10451 self._LockInstancesNodes()
10452 elif level == locking.LEVEL_NODE_RES:
10454 self.needed_locks[locking.LEVEL_NODE_RES] = \
10455 self.needed_locks[locking.LEVEL_NODE]
10457 def BuildHooksEnv(self):
10458 """Build hooks env.
10460 This runs on the master, the primary and all the secondaries.
10463 instance = self.replacer.instance
10465 "MODE": self.op.mode,
10466 "NEW_SECONDARY": self.op.remote_node,
10467 "OLD_SECONDARY": instance.secondary_nodes[0],
10469 env.update(_BuildInstanceHookEnvByObject(self, instance))
10472 def BuildHooksNodes(self):
10473 """Build hooks nodes.
10476 instance = self.replacer.instance
10478 self.cfg.GetMasterNode(),
10479 instance.primary_node,
10481 if self.op.remote_node is not None:
10482 nl.append(self.op.remote_node)
10485 def CheckPrereq(self):
10486 """Check prerequisites.
10489 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
10490 self.op.iallocator is None)
10492 # Verify if node group locks are still correct
10493 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10495 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
10497 return LogicalUnit.CheckPrereq(self)
10500 class TLReplaceDisks(Tasklet):
10501 """Replaces disks for an instance.
10503 Note: Locking is not within the scope of this class.
10506 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
10507 disks, delay_iallocator, early_release, ignore_ipolicy):
10508 """Initializes this class.
10511 Tasklet.__init__(self, lu)
10514 self.instance_name = instance_name
10516 self.iallocator_name = iallocator_name
10517 self.remote_node = remote_node
10519 self.delay_iallocator = delay_iallocator
10520 self.early_release = early_release
10521 self.ignore_ipolicy = ignore_ipolicy
10524 self.instance = None
10525 self.new_node = None
10526 self.target_node = None
10527 self.other_node = None
10528 self.remote_node_info = None
10529 self.node_secondary_ip = None
10532 def CheckArguments(mode, remote_node, iallocator):
10533 """Helper function for users of this class.
10536 # check for valid parameter combination
10537 if mode == constants.REPLACE_DISK_CHG:
10538 if remote_node is None and iallocator is None:
10539 raise errors.OpPrereqError("When changing the secondary either an"
10540 " iallocator script must be used or the"
10541 " new node given", errors.ECODE_INVAL)
10543 if remote_node is not None and iallocator is not None:
10544 raise errors.OpPrereqError("Give either the iallocator or the new"
10545 " secondary, not both", errors.ECODE_INVAL)
10547 elif remote_node is not None or iallocator is not None:
10548 # Not replacing the secondary
10549 raise errors.OpPrereqError("The iallocator and new node options can"
10550 " only be used when changing the"
10551 " secondary node", errors.ECODE_INVAL)
10554 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
10555 """Compute a new secondary node using an IAllocator.
10558 ial = IAllocator(lu.cfg, lu.rpc,
10559 mode=constants.IALLOCATOR_MODE_RELOC,
10560 name=instance_name,
10561 relocate_from=list(relocate_from))
10563 ial.Run(iallocator_name)
10565 if not ial.success:
10566 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
10567 " %s" % (iallocator_name, ial.info),
10568 errors.ECODE_NORES)
10570 if len(ial.result) != ial.required_nodes:
10571 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
10572 " of nodes (%s), required %s" %
10574 len(ial.result), ial.required_nodes),
10575 errors.ECODE_FAULT)
10577 remote_node_name = ial.result[0]
10579 lu.LogInfo("Selected new secondary for instance '%s': %s",
10580 instance_name, remote_node_name)
10582 return remote_node_name
10584 def _FindFaultyDisks(self, node_name):
10585 """Wrapper for L{_FindFaultyInstanceDisks}.
10588 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
10591 def _CheckDisksActivated(self, instance):
10592 """Checks if the instance disks are activated.
10594 @param instance: The instance to check disks
10595 @return: True if they are activated, False otherwise
10598 nodes = instance.all_nodes
10600 for idx, dev in enumerate(instance.disks):
10602 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
10603 self.cfg.SetDiskID(dev, node)
10605 result = _BlockdevFind(self, node, dev, instance)
10609 elif result.fail_msg or not result.payload:
10614 def CheckPrereq(self):
10615 """Check prerequisites.
10617 This checks that the instance is in the cluster.
10620 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
10621 assert instance is not None, \
10622 "Cannot retrieve locked instance %s" % self.instance_name
10624 if instance.disk_template != constants.DT_DRBD8:
10625 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
10626 " instances", errors.ECODE_INVAL)
10628 if len(instance.secondary_nodes) != 1:
10629 raise errors.OpPrereqError("The instance has a strange layout,"
10630 " expected one secondary but found %d" %
10631 len(instance.secondary_nodes),
10632 errors.ECODE_FAULT)
10634 if not self.delay_iallocator:
10635 self._CheckPrereq2()
10637 def _CheckPrereq2(self):
10638 """Check prerequisites, second part.
10640 This function should always be part of CheckPrereq. It was separated and is
10641 now called from Exec because during node evacuation iallocator was only
10642 called with an unmodified cluster model, not taking planned changes into
10646 instance = self.instance
10647 secondary_node = instance.secondary_nodes[0]
10649 if self.iallocator_name is None:
10650 remote_node = self.remote_node
10652 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
10653 instance.name, instance.secondary_nodes)
10655 if remote_node is None:
10656 self.remote_node_info = None
10658 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
10659 "Remote node '%s' is not locked" % remote_node
10661 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
10662 assert self.remote_node_info is not None, \
10663 "Cannot retrieve locked node %s" % remote_node
10665 if remote_node == self.instance.primary_node:
10666 raise errors.OpPrereqError("The specified node is the primary node of"
10667 " the instance", errors.ECODE_INVAL)
10669 if remote_node == secondary_node:
10670 raise errors.OpPrereqError("The specified node is already the"
10671 " secondary node of the instance",
10672 errors.ECODE_INVAL)
10674 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
10675 constants.REPLACE_DISK_CHG):
10676 raise errors.OpPrereqError("Cannot specify disks to be replaced",
10677 errors.ECODE_INVAL)
10679 if self.mode == constants.REPLACE_DISK_AUTO:
10680 if not self._CheckDisksActivated(instance):
10681 raise errors.OpPrereqError("Please run activate-disks on instance %s"
10682 " first" % self.instance_name,
10683 errors.ECODE_STATE)
10684 faulty_primary = self._FindFaultyDisks(instance.primary_node)
10685 faulty_secondary = self._FindFaultyDisks(secondary_node)
10687 if faulty_primary and faulty_secondary:
10688 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
10689 " one node and can not be repaired"
10690 " automatically" % self.instance_name,
10691 errors.ECODE_STATE)
10694 self.disks = faulty_primary
10695 self.target_node = instance.primary_node
10696 self.other_node = secondary_node
10697 check_nodes = [self.target_node, self.other_node]
10698 elif faulty_secondary:
10699 self.disks = faulty_secondary
10700 self.target_node = secondary_node
10701 self.other_node = instance.primary_node
10702 check_nodes = [self.target_node, self.other_node]
10708 # Non-automatic modes
10709 if self.mode == constants.REPLACE_DISK_PRI:
10710 self.target_node = instance.primary_node
10711 self.other_node = secondary_node
10712 check_nodes = [self.target_node, self.other_node]
10714 elif self.mode == constants.REPLACE_DISK_SEC:
10715 self.target_node = secondary_node
10716 self.other_node = instance.primary_node
10717 check_nodes = [self.target_node, self.other_node]
10719 elif self.mode == constants.REPLACE_DISK_CHG:
10720 self.new_node = remote_node
10721 self.other_node = instance.primary_node
10722 self.target_node = secondary_node
10723 check_nodes = [self.new_node, self.other_node]
10725 _CheckNodeNotDrained(self.lu, remote_node)
10726 _CheckNodeVmCapable(self.lu, remote_node)
10728 old_node_info = self.cfg.GetNodeInfo(secondary_node)
10729 assert old_node_info is not None
10730 if old_node_info.offline and not self.early_release:
10731 # doesn't make sense to delay the release
10732 self.early_release = True
10733 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
10734 " early-release mode", secondary_node)
10737 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
10740 # If not specified all disks should be replaced
10742 self.disks = range(len(self.instance.disks))
10744 # TODO: This is ugly, but right now we can't distinguish between internal
10745 # submitted opcode and external one. We should fix that.
10746 if self.remote_node_info:
10747 # We change the node, lets verify it still meets instance policy
10748 new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
10749 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
10751 _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
10752 ignore=self.ignore_ipolicy)
10754 for node in check_nodes:
10755 _CheckNodeOnline(self.lu, node)
10757 touched_nodes = frozenset(node_name for node_name in [self.new_node,
10760 if node_name is not None)
10762 # Release unneeded node and node resource locks
10763 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
10764 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
10766 # Release any owned node group
10767 if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
10768 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
10770 # Check whether disks are valid
10771 for disk_idx in self.disks:
10772 instance.FindDisk(disk_idx)
10774 # Get secondary node IP addresses
10775 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
10776 in self.cfg.GetMultiNodeInfo(touched_nodes))
10778 def Exec(self, feedback_fn):
10779 """Execute disk replacement.
10781 This dispatches the disk replacement to the appropriate handler.
10784 if self.delay_iallocator:
10785 self._CheckPrereq2()
10788 # Verify owned locks before starting operation
10789 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
10790 assert set(owned_nodes) == set(self.node_secondary_ip), \
10791 ("Incorrect node locks, owning %s, expected %s" %
10792 (owned_nodes, self.node_secondary_ip.keys()))
10793 assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
10794 self.lu.owned_locks(locking.LEVEL_NODE_RES))
10796 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
10797 assert list(owned_instances) == [self.instance_name], \
10798 "Instance '%s' not locked" % self.instance_name
10800 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
10801 "Should not own any node group lock at this point"
10804 feedback_fn("No disks need replacement for instance '%s'" %
10805 self.instance.name)
10808 feedback_fn("Replacing disk(s) %s for instance '%s'" %
10809 (utils.CommaJoin(self.disks), self.instance.name))
10810 feedback_fn("Current primary node: %s", self.instance.primary_node)
10811 feedback_fn("Current seconary node: %s",
10812 utils.CommaJoin(self.instance.secondary_nodes))
10814 activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
10816 # Activate the instance disks if we're replacing them on a down instance
10818 _StartInstanceDisks(self.lu, self.instance, True)
10821 # Should we replace the secondary node?
10822 if self.new_node is not None:
10823 fn = self._ExecDrbd8Secondary
10825 fn = self._ExecDrbd8DiskOnly
10827 result = fn(feedback_fn)
10829 # Deactivate the instance disks if we're replacing them on a
10832 _SafeShutdownInstanceDisks(self.lu, self.instance)
10834 assert not self.lu.owned_locks(locking.LEVEL_NODE)
10837 # Verify owned locks
10838 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
10839 nodes = frozenset(self.node_secondary_ip)
10840 assert ((self.early_release and not owned_nodes) or
10841 (not self.early_release and not (set(owned_nodes) - nodes))), \
10842 ("Not owning the correct locks, early_release=%s, owned=%r,"
10843 " nodes=%r" % (self.early_release, owned_nodes, nodes))
10847 def _CheckVolumeGroup(self, nodes):
10848 self.lu.LogInfo("Checking volume groups")
10850 vgname = self.cfg.GetVGName()
10852 # Make sure volume group exists on all involved nodes
10853 results = self.rpc.call_vg_list(nodes)
10855 raise errors.OpExecError("Can't list volume groups on the nodes")
10858 res = results[node]
10859 res.Raise("Error checking node %s" % node)
10860 if vgname not in res.payload:
10861 raise errors.OpExecError("Volume group '%s' not found on node %s" %
10864 def _CheckDisksExistence(self, nodes):
10865 # Check disk existence
10866 for idx, dev in enumerate(self.instance.disks):
10867 if idx not in self.disks:
10871 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
10872 self.cfg.SetDiskID(dev, node)
10874 result = _BlockdevFind(self, node, dev, self.instance)
10876 msg = result.fail_msg
10877 if msg or not result.payload:
10879 msg = "disk not found"
10880 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
10883 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
10884 for idx, dev in enumerate(self.instance.disks):
10885 if idx not in self.disks:
10888 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
10891 if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
10892 on_primary, ldisk=ldisk):
10893 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
10894 " replace disks for instance %s" %
10895 (node_name, self.instance.name))
10897 def _CreateNewStorage(self, node_name):
10898 """Create new storage on the primary or secondary node.
10900 This is only used for same-node replaces, not for changing the
10901 secondary node, hence we don't want to modify the existing disk.
10906 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
10907 for idx, dev in enumerate(disks):
10908 if idx not in self.disks:
10911 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
10913 self.cfg.SetDiskID(dev, node_name)
10915 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
10916 names = _GenerateUniqueNames(self.lu, lv_names)
10918 (data_disk, meta_disk) = dev.children
10919 vg_data = data_disk.logical_id[0]
10920 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
10921 logical_id=(vg_data, names[0]),
10922 params=data_disk.params)
10923 vg_meta = meta_disk.logical_id[0]
10924 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
10925 logical_id=(vg_meta, names[1]),
10926 params=meta_disk.params)
10928 new_lvs = [lv_data, lv_meta]
10929 old_lvs = [child.Copy() for child in dev.children]
10930 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
10932 # we pass force_create=True to force the LVM creation
10933 for new_lv in new_lvs:
10934 _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
10935 _GetInstanceInfoText(self.instance), False)
10939 def _CheckDevices(self, node_name, iv_names):
10940 for name, (dev, _, _) in iv_names.iteritems():
10941 self.cfg.SetDiskID(dev, node_name)
10943 result = _BlockdevFind(self, node_name, dev, self.instance)
10945 msg = result.fail_msg
10946 if msg or not result.payload:
10948 msg = "disk not found"
10949 raise errors.OpExecError("Can't find DRBD device %s: %s" %
10952 if result.payload.is_degraded:
10953 raise errors.OpExecError("DRBD device %s is degraded!" % name)
10955 def _RemoveOldStorage(self, node_name, iv_names):
10956 for name, (_, old_lvs, _) in iv_names.iteritems():
10957 self.lu.LogInfo("Remove logical volumes for %s" % name)
10960 self.cfg.SetDiskID(lv, node_name)
10962 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
10964 self.lu.LogWarning("Can't remove old LV: %s" % msg,
10965 hint="remove unused LVs manually")
10967 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
10968 """Replace a disk on the primary or secondary for DRBD 8.
10970 The algorithm for replace is quite complicated:
10972 1. for each disk to be replaced:
10974 1. create new LVs on the target node with unique names
10975 1. detach old LVs from the drbd device
10976 1. rename old LVs to name_replaced.<time_t>
10977 1. rename new LVs to old LVs
10978 1. attach the new LVs (with the old names now) to the drbd device
10980 1. wait for sync across all devices
10982 1. for each modified disk:
10984 1. remove old LVs (which have the name name_replaces.<time_t>)
10986 Failures are not very well handled.
10991 # Step: check device activation
10992 self.lu.LogStep(1, steps_total, "Check device existence")
10993 self._CheckDisksExistence([self.other_node, self.target_node])
10994 self._CheckVolumeGroup([self.target_node, self.other_node])
10996 # Step: check other node consistency
10997 self.lu.LogStep(2, steps_total, "Check peer consistency")
10998 self._CheckDisksConsistency(self.other_node,
10999 self.other_node == self.instance.primary_node,
11002 # Step: create new storage
11003 self.lu.LogStep(3, steps_total, "Allocate new storage")
11004 iv_names = self._CreateNewStorage(self.target_node)
11006 # Step: for each lv, detach+rename*2+attach
11007 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11008 for dev, old_lvs, new_lvs in iv_names.itervalues():
11009 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
11011 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
11013 result.Raise("Can't detach drbd from local storage on node"
11014 " %s for device %s" % (self.target_node, dev.iv_name))
11016 #cfg.Update(instance)
11018 # ok, we created the new LVs, so now we know we have the needed
11019 # storage; as such, we proceed on the target node to rename
11020 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
11021 # using the assumption that logical_id == physical_id (which in
11022 # turn is the unique_id on that node)
11024 # FIXME(iustin): use a better name for the replaced LVs
11025 temp_suffix = int(time.time())
11026 ren_fn = lambda d, suff: (d.physical_id[0],
11027 d.physical_id[1] + "_replaced-%s" % suff)
11029 # Build the rename list based on what LVs exist on the node
11030 rename_old_to_new = []
11031 for to_ren in old_lvs:
11032 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
11033 if not result.fail_msg and result.payload:
11035 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
11037 self.lu.LogInfo("Renaming the old LVs on the target node")
11038 result = self.rpc.call_blockdev_rename(self.target_node,
11040 result.Raise("Can't rename old LVs on node %s" % self.target_node)
11042 # Now we rename the new LVs to the old LVs
11043 self.lu.LogInfo("Renaming the new LVs on the target node")
11044 rename_new_to_old = [(new, old.physical_id)
11045 for old, new in zip(old_lvs, new_lvs)]
11046 result = self.rpc.call_blockdev_rename(self.target_node,
11048 result.Raise("Can't rename new LVs on node %s" % self.target_node)
11050 # Intermediate steps of in memory modifications
11051 for old, new in zip(old_lvs, new_lvs):
11052 new.logical_id = old.logical_id
11053 self.cfg.SetDiskID(new, self.target_node)
11055 # We need to modify old_lvs so that removal later removes the
11056 # right LVs, not the newly added ones; note that old_lvs is a
11058 for disk in old_lvs:
11059 disk.logical_id = ren_fn(disk, temp_suffix)
11060 self.cfg.SetDiskID(disk, self.target_node)
11062 # Now that the new lvs have the old name, we can add them to the device
11063 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
11064 result = self.rpc.call_blockdev_addchildren(self.target_node,
11065 (dev, self.instance), new_lvs)
11066 msg = result.fail_msg
11068 for new_lv in new_lvs:
11069 msg2 = self.rpc.call_blockdev_remove(self.target_node,
11072 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
11073 hint=("cleanup manually the unused logical"
11075 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
11077 cstep = itertools.count(5)
11079 if self.early_release:
11080 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11081 self._RemoveOldStorage(self.target_node, iv_names)
11082 # TODO: Check if releasing locks early still makes sense
11083 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11085 # Release all resource locks except those used by the instance
11086 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11087 keep=self.node_secondary_ip.keys())
11089 # Release all node locks while waiting for sync
11090 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11092 # TODO: Can the instance lock be downgraded here? Take the optional disk
11093 # shutdown in the caller into consideration.
11096 # This can fail as the old devices are degraded and _WaitForSync
11097 # does a combined result over all disks, so we don't check its return value
11098 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11099 _WaitForSync(self.lu, self.instance)
11101 # Check all devices manually
11102 self._CheckDevices(self.instance.primary_node, iv_names)
11104 # Step: remove old storage
11105 if not self.early_release:
11106 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11107 self._RemoveOldStorage(self.target_node, iv_names)
11109 def _ExecDrbd8Secondary(self, feedback_fn):
11110 """Replace the secondary node for DRBD 8.
11112 The algorithm for replace is quite complicated:
11113 - for all disks of the instance:
11114 - create new LVs on the new node with same names
11115 - shutdown the drbd device on the old secondary
11116 - disconnect the drbd network on the primary
11117 - create the drbd device on the new secondary
11118 - network attach the drbd on the primary, using an artifice:
11119 the drbd code for Attach() will connect to the network if it
11120 finds a device which is connected to the good local disks but
11121 not network enabled
11122 - wait for sync across all devices
11123 - remove all disks from the old secondary
11125 Failures are not very well handled.
11130 pnode = self.instance.primary_node
11132 # Step: check device activation
11133 self.lu.LogStep(1, steps_total, "Check device existence")
11134 self._CheckDisksExistence([self.instance.primary_node])
11135 self._CheckVolumeGroup([self.instance.primary_node])
11137 # Step: check other node consistency
11138 self.lu.LogStep(2, steps_total, "Check peer consistency")
11139 self._CheckDisksConsistency(self.instance.primary_node, True, True)
11141 # Step: create new storage
11142 self.lu.LogStep(3, steps_total, "Allocate new storage")
11143 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
11144 for idx, dev in enumerate(disks):
11145 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
11146 (self.new_node, idx))
11147 # we pass force_create=True to force LVM creation
11148 for new_lv in dev.children:
11149 _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
11150 True, _GetInstanceInfoText(self.instance), False)
11152 # Step 4: dbrd minors and drbd setups changes
11153 # after this, we must manually remove the drbd minors on both the
11154 # error and the success paths
11155 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11156 minors = self.cfg.AllocateDRBDMinor([self.new_node
11157 for dev in self.instance.disks],
11158 self.instance.name)
11159 logging.debug("Allocated minors %r", minors)
11162 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
11163 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
11164 (self.new_node, idx))
11165 # create new devices on new_node; note that we create two IDs:
11166 # one without port, so the drbd will be activated without
11167 # networking information on the new node at this stage, and one
11168 # with network, for the latter activation in step 4
11169 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
11170 if self.instance.primary_node == o_node1:
11173 assert self.instance.primary_node == o_node2, "Three-node instance?"
11176 new_alone_id = (self.instance.primary_node, self.new_node, None,
11177 p_minor, new_minor, o_secret)
11178 new_net_id = (self.instance.primary_node, self.new_node, o_port,
11179 p_minor, new_minor, o_secret)
11181 iv_names[idx] = (dev, dev.children, new_net_id)
11182 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
11184 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
11185 logical_id=new_alone_id,
11186 children=dev.children,
11189 (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
11192 _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
11194 _GetInstanceInfoText(self.instance), False)
11195 except errors.GenericError:
11196 self.cfg.ReleaseDRBDMinors(self.instance.name)
11199 # We have new devices, shutdown the drbd on the old secondary
11200 for idx, dev in enumerate(self.instance.disks):
11201 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
11202 self.cfg.SetDiskID(dev, self.target_node)
11203 msg = self.rpc.call_blockdev_shutdown(self.target_node,
11204 (dev, self.instance)).fail_msg
11206 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
11207 "node: %s" % (idx, msg),
11208 hint=("Please cleanup this device manually as"
11209 " soon as possible"))
11211 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
11212 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
11213 self.instance.disks)[pnode]
11215 msg = result.fail_msg
11217 # detaches didn't succeed (unlikely)
11218 self.cfg.ReleaseDRBDMinors(self.instance.name)
11219 raise errors.OpExecError("Can't detach the disks from the network on"
11220 " old node: %s" % (msg,))
11222 # if we managed to detach at least one, we update all the disks of
11223 # the instance to point to the new secondary
11224 self.lu.LogInfo("Updating instance configuration")
11225 for dev, _, new_logical_id in iv_names.itervalues():
11226 dev.logical_id = new_logical_id
11227 self.cfg.SetDiskID(dev, self.instance.primary_node)
11229 self.cfg.Update(self.instance, feedback_fn)
11231 # Release all node locks (the configuration has been updated)
11232 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11234 # and now perform the drbd attach
11235 self.lu.LogInfo("Attaching primary drbds to new secondary"
11236 " (standalone => connected)")
11237 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11239 self.node_secondary_ip,
11240 (self.instance.disks, self.instance),
11241 self.instance.name,
11243 for to_node, to_result in result.items():
11244 msg = to_result.fail_msg
11246 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
11248 hint=("please do a gnt-instance info to see the"
11249 " status of disks"))
11251 cstep = itertools.count(5)
11253 if self.early_release:
11254 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11255 self._RemoveOldStorage(self.target_node, iv_names)
11256 # TODO: Check if releasing locks early still makes sense
11257 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11259 # Release all resource locks except those used by the instance
11260 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11261 keep=self.node_secondary_ip.keys())
11263 # TODO: Can the instance lock be downgraded here? Take the optional disk
11264 # shutdown in the caller into consideration.
11267 # This can fail as the old devices are degraded and _WaitForSync
11268 # does a combined result over all disks, so we don't check its return value
11269 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11270 _WaitForSync(self.lu, self.instance)
11272 # Check all devices manually
11273 self._CheckDevices(self.instance.primary_node, iv_names)
11275 # Step: remove old storage
11276 if not self.early_release:
11277 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11278 self._RemoveOldStorage(self.target_node, iv_names)
11281 class LURepairNodeStorage(NoHooksLU):
11282 """Repairs the volume group on a node.
11287 def CheckArguments(self):
11288 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11290 storage_type = self.op.storage_type
11292 if (constants.SO_FIX_CONSISTENCY not in
11293 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
11294 raise errors.OpPrereqError("Storage units of type '%s' can not be"
11295 " repaired" % storage_type,
11296 errors.ECODE_INVAL)
11298 def ExpandNames(self):
11299 self.needed_locks = {
11300 locking.LEVEL_NODE: [self.op.node_name],
11303 def _CheckFaultyDisks(self, instance, node_name):
11304 """Ensure faulty disks abort the opcode or at least warn."""
11306 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
11308 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
11309 " node '%s'" % (instance.name, node_name),
11310 errors.ECODE_STATE)
11311 except errors.OpPrereqError, err:
11312 if self.op.ignore_consistency:
11313 self.proc.LogWarning(str(err.args[0]))
11317 def CheckPrereq(self):
11318 """Check prerequisites.
11321 # Check whether any instance on this node has faulty disks
11322 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
11323 if inst.admin_state != constants.ADMINST_UP:
11325 check_nodes = set(inst.all_nodes)
11326 check_nodes.discard(self.op.node_name)
11327 for inst_node_name in check_nodes:
11328 self._CheckFaultyDisks(inst, inst_node_name)
11330 def Exec(self, feedback_fn):
11331 feedback_fn("Repairing storage unit '%s' on %s ..." %
11332 (self.op.name, self.op.node_name))
11334 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
11335 result = self.rpc.call_storage_execute(self.op.node_name,
11336 self.op.storage_type, st_args,
11338 constants.SO_FIX_CONSISTENCY)
11339 result.Raise("Failed to repair storage unit '%s' on %s" %
11340 (self.op.name, self.op.node_name))
11343 class LUNodeEvacuate(NoHooksLU):
11344 """Evacuates instances off a list of nodes.
11349 _MODE2IALLOCATOR = {
11350 constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
11351 constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
11352 constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
11354 assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
11355 assert (frozenset(_MODE2IALLOCATOR.values()) ==
11356 constants.IALLOCATOR_NEVAC_MODES)
11358 def CheckArguments(self):
11359 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
11361 def ExpandNames(self):
11362 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11364 if self.op.remote_node is not None:
11365 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
11366 assert self.op.remote_node
11368 if self.op.remote_node == self.op.node_name:
11369 raise errors.OpPrereqError("Can not use evacuated node as a new"
11370 " secondary node", errors.ECODE_INVAL)
11372 if self.op.mode != constants.NODE_EVAC_SEC:
11373 raise errors.OpPrereqError("Without the use of an iallocator only"
11374 " secondary instances can be evacuated",
11375 errors.ECODE_INVAL)
11378 self.share_locks = _ShareAll()
11379 self.needed_locks = {
11380 locking.LEVEL_INSTANCE: [],
11381 locking.LEVEL_NODEGROUP: [],
11382 locking.LEVEL_NODE: [],
11385 # Determine nodes (via group) optimistically, needs verification once locks
11386 # have been acquired
11387 self.lock_nodes = self._DetermineNodes()
11389 def _DetermineNodes(self):
11390 """Gets the list of nodes to operate on.
11393 if self.op.remote_node is None:
11394 # Iallocator will choose any node(s) in the same group
11395 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
11397 group_nodes = frozenset([self.op.remote_node])
11399 # Determine nodes to be locked
11400 return set([self.op.node_name]) | group_nodes
11402 def _DetermineInstances(self):
11403 """Builds list of instances to operate on.
11406 assert self.op.mode in constants.NODE_EVAC_MODES
11408 if self.op.mode == constants.NODE_EVAC_PRI:
11409 # Primary instances only
11410 inst_fn = _GetNodePrimaryInstances
11411 assert self.op.remote_node is None, \
11412 "Evacuating primary instances requires iallocator"
11413 elif self.op.mode == constants.NODE_EVAC_SEC:
11414 # Secondary instances only
11415 inst_fn = _GetNodeSecondaryInstances
11418 assert self.op.mode == constants.NODE_EVAC_ALL
11419 inst_fn = _GetNodeInstances
11420 # TODO: In 2.6, change the iallocator interface to take an evacuation mode
11422 raise errors.OpPrereqError("Due to an issue with the iallocator"
11423 " interface it is not possible to evacuate"
11424 " all instances at once; specify explicitly"
11425 " whether to evacuate primary or secondary"
11427 errors.ECODE_INVAL)
11429 return inst_fn(self.cfg, self.op.node_name)
11431 def DeclareLocks(self, level):
11432 if level == locking.LEVEL_INSTANCE:
11433 # Lock instances optimistically, needs verification once node and group
11434 # locks have been acquired
11435 self.needed_locks[locking.LEVEL_INSTANCE] = \
11436 set(i.name for i in self._DetermineInstances())
11438 elif level == locking.LEVEL_NODEGROUP:
11439 # Lock node groups for all potential target nodes optimistically, needs
11440 # verification once nodes have been acquired
11441 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11442 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
11444 elif level == locking.LEVEL_NODE:
11445 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
11447 def CheckPrereq(self):
11449 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11450 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
11451 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
11453 need_nodes = self._DetermineNodes()
11455 if not owned_nodes.issuperset(need_nodes):
11456 raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
11457 " locks were acquired, current nodes are"
11458 " are '%s', used to be '%s'; retry the"
11460 (self.op.node_name,
11461 utils.CommaJoin(need_nodes),
11462 utils.CommaJoin(owned_nodes)),
11463 errors.ECODE_STATE)
11465 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
11466 if owned_groups != wanted_groups:
11467 raise errors.OpExecError("Node groups changed since locks were acquired,"
11468 " current groups are '%s', used to be '%s';"
11469 " retry the operation" %
11470 (utils.CommaJoin(wanted_groups),
11471 utils.CommaJoin(owned_groups)))
11473 # Determine affected instances
11474 self.instances = self._DetermineInstances()
11475 self.instance_names = [i.name for i in self.instances]
11477 if set(self.instance_names) != owned_instances:
11478 raise errors.OpExecError("Instances on node '%s' changed since locks"
11479 " were acquired, current instances are '%s',"
11480 " used to be '%s'; retry the operation" %
11481 (self.op.node_name,
11482 utils.CommaJoin(self.instance_names),
11483 utils.CommaJoin(owned_instances)))
11485 if self.instance_names:
11486 self.LogInfo("Evacuating instances from node '%s': %s",
11488 utils.CommaJoin(utils.NiceSort(self.instance_names)))
11490 self.LogInfo("No instances to evacuate from node '%s'",
11493 if self.op.remote_node is not None:
11494 for i in self.instances:
11495 if i.primary_node == self.op.remote_node:
11496 raise errors.OpPrereqError("Node %s is the primary node of"
11497 " instance %s, cannot use it as"
11499 (self.op.remote_node, i.name),
11500 errors.ECODE_INVAL)
11502 def Exec(self, feedback_fn):
11503 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
11505 if not self.instance_names:
11506 # No instances to evacuate
11509 elif self.op.iallocator is not None:
11510 # TODO: Implement relocation to other group
11511 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
11512 evac_mode=self._MODE2IALLOCATOR[self.op.mode],
11513 instances=list(self.instance_names))
11515 ial.Run(self.op.iallocator)
11517 if not ial.success:
11518 raise errors.OpPrereqError("Can't compute node evacuation using"
11519 " iallocator '%s': %s" %
11520 (self.op.iallocator, ial.info),
11521 errors.ECODE_NORES)
11523 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
11525 elif self.op.remote_node is not None:
11526 assert self.op.mode == constants.NODE_EVAC_SEC
11528 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
11529 remote_node=self.op.remote_node,
11531 mode=constants.REPLACE_DISK_CHG,
11532 early_release=self.op.early_release)]
11533 for instance_name in self.instance_names
11537 raise errors.ProgrammerError("No iallocator or remote node")
11539 return ResultWithJobs(jobs)
11542 def _SetOpEarlyRelease(early_release, op):
11543 """Sets C{early_release} flag on opcodes if available.
11547 op.early_release = early_release
11548 except AttributeError:
11549 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
11554 def _NodeEvacDest(use_nodes, group, nodes):
11555 """Returns group or nodes depending on caller's choice.
11559 return utils.CommaJoin(nodes)
11564 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
11565 """Unpacks the result of change-group and node-evacuate iallocator requests.
11567 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
11568 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
11570 @type lu: L{LogicalUnit}
11571 @param lu: Logical unit instance
11572 @type alloc_result: tuple/list
11573 @param alloc_result: Result from iallocator
11574 @type early_release: bool
11575 @param early_release: Whether to release locks early if possible
11576 @type use_nodes: bool
11577 @param use_nodes: Whether to display node names instead of groups
11580 (moved, failed, jobs) = alloc_result
11583 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
11584 for (name, reason) in failed)
11585 lu.LogWarning("Unable to evacuate instances %s", failreason)
11586 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
11589 lu.LogInfo("Instances to be moved: %s",
11590 utils.CommaJoin("%s (to %s)" %
11591 (name, _NodeEvacDest(use_nodes, group, nodes))
11592 for (name, group, nodes) in moved))
11594 return [map(compat.partial(_SetOpEarlyRelease, early_release),
11595 map(opcodes.OpCode.LoadOpCode, ops))
11599 class LUInstanceGrowDisk(LogicalUnit):
11600 """Grow a disk of an instance.
11603 HPATH = "disk-grow"
11604 HTYPE = constants.HTYPE_INSTANCE
11607 def ExpandNames(self):
11608 self._ExpandAndLockInstance()
11609 self.needed_locks[locking.LEVEL_NODE] = []
11610 self.needed_locks[locking.LEVEL_NODE_RES] = []
11611 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11612 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
11614 def DeclareLocks(self, level):
11615 if level == locking.LEVEL_NODE:
11616 self._LockInstancesNodes()
11617 elif level == locking.LEVEL_NODE_RES:
11619 self.needed_locks[locking.LEVEL_NODE_RES] = \
11620 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
11622 def BuildHooksEnv(self):
11623 """Build hooks env.
11625 This runs on the master, the primary and all the secondaries.
11629 "DISK": self.op.disk,
11630 "AMOUNT": self.op.amount,
11631 "ABSOLUTE": self.op.absolute,
11633 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11636 def BuildHooksNodes(self):
11637 """Build hooks nodes.
11640 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
11643 def CheckPrereq(self):
11644 """Check prerequisites.
11646 This checks that the instance is in the cluster.
11649 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11650 assert instance is not None, \
11651 "Cannot retrieve locked instance %s" % self.op.instance_name
11652 nodenames = list(instance.all_nodes)
11653 for node in nodenames:
11654 _CheckNodeOnline(self, node)
11656 self.instance = instance
11658 if instance.disk_template not in constants.DTS_GROWABLE:
11659 raise errors.OpPrereqError("Instance's disk layout does not support"
11660 " growing", errors.ECODE_INVAL)
11662 self.disk = instance.FindDisk(self.op.disk)
11664 if self.op.absolute:
11665 self.target = self.op.amount
11666 self.delta = self.target - self.disk.size
11668 raise errors.OpPrereqError("Requested size (%s) is smaller than "
11669 "current disk size (%s)" %
11670 (utils.FormatUnit(self.target, "h"),
11671 utils.FormatUnit(self.disk.size, "h")),
11672 errors.ECODE_STATE)
11674 self.delta = self.op.amount
11675 self.target = self.disk.size + self.delta
11677 raise errors.OpPrereqError("Requested increment (%s) is negative" %
11678 utils.FormatUnit(self.delta, "h"),
11679 errors.ECODE_INVAL)
11681 if instance.disk_template not in (constants.DT_FILE,
11682 constants.DT_SHARED_FILE,
11684 # TODO: check the free disk space for file, when that feature will be
11686 _CheckNodesFreeDiskPerVG(self, nodenames,
11687 self.disk.ComputeGrowth(self.delta))
11689 def Exec(self, feedback_fn):
11690 """Execute disk grow.
11693 instance = self.instance
11696 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11697 assert (self.owned_locks(locking.LEVEL_NODE) ==
11698 self.owned_locks(locking.LEVEL_NODE_RES))
11700 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
11702 raise errors.OpExecError("Cannot activate block device to grow")
11704 feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
11705 (self.op.disk, instance.name,
11706 utils.FormatUnit(self.delta, "h"),
11707 utils.FormatUnit(self.target, "h")))
11709 # First run all grow ops in dry-run mode
11710 for node in instance.all_nodes:
11711 self.cfg.SetDiskID(disk, node)
11712 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11714 result.Raise("Grow request failed to node %s" % node)
11716 # We know that (as far as we can test) operations across different
11717 # nodes will succeed, time to run it for real
11718 for node in instance.all_nodes:
11719 self.cfg.SetDiskID(disk, node)
11720 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11722 result.Raise("Grow request failed to node %s" % node)
11724 # TODO: Rewrite code to work properly
11725 # DRBD goes into sync mode for a short amount of time after executing the
11726 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
11727 # calling "resize" in sync mode fails. Sleeping for a short amount of
11728 # time is a work-around.
11731 disk.RecordGrow(self.delta)
11732 self.cfg.Update(instance, feedback_fn)
11734 # Changes have been recorded, release node lock
11735 _ReleaseLocks(self, locking.LEVEL_NODE)
11737 # Downgrade lock while waiting for sync
11738 self.glm.downgrade(locking.LEVEL_INSTANCE)
11740 if self.op.wait_for_sync:
11741 disk_abort = not _WaitForSync(self, instance, disks=[disk])
11743 self.proc.LogWarning("Disk sync-ing has not returned a good"
11744 " status; please check the instance")
11745 if instance.admin_state != constants.ADMINST_UP:
11746 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
11747 elif instance.admin_state != constants.ADMINST_UP:
11748 self.proc.LogWarning("Not shutting down the disk even if the instance is"
11749 " not supposed to be running because no wait for"
11750 " sync mode was requested")
11752 assert self.owned_locks(locking.LEVEL_NODE_RES)
11753 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11756 class LUInstanceQueryData(NoHooksLU):
11757 """Query runtime instance data.
11762 def ExpandNames(self):
11763 self.needed_locks = {}
11765 # Use locking if requested or when non-static information is wanted
11766 if not (self.op.static or self.op.use_locking):
11767 self.LogWarning("Non-static data requested, locks need to be acquired")
11768 self.op.use_locking = True
11770 if self.op.instances or not self.op.use_locking:
11771 # Expand instance names right here
11772 self.wanted_names = _GetWantedInstances(self, self.op.instances)
11774 # Will use acquired locks
11775 self.wanted_names = None
11777 if self.op.use_locking:
11778 self.share_locks = _ShareAll()
11780 if self.wanted_names is None:
11781 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
11783 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
11785 self.needed_locks[locking.LEVEL_NODEGROUP] = []
11786 self.needed_locks[locking.LEVEL_NODE] = []
11787 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11789 def DeclareLocks(self, level):
11790 if self.op.use_locking:
11791 if level == locking.LEVEL_NODEGROUP:
11792 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11794 # Lock all groups used by instances optimistically; this requires going
11795 # via the node before it's locked, requiring verification later on
11796 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11797 frozenset(group_uuid
11798 for instance_name in owned_instances
11800 self.cfg.GetInstanceNodeGroups(instance_name))
11802 elif level == locking.LEVEL_NODE:
11803 self._LockInstancesNodes()
11805 def CheckPrereq(self):
11806 """Check prerequisites.
11808 This only checks the optional instance list against the existing names.
11811 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11812 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11813 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11815 if self.wanted_names is None:
11816 assert self.op.use_locking, "Locking was not used"
11817 self.wanted_names = owned_instances
11819 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
11821 if self.op.use_locking:
11822 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
11825 assert not (owned_instances or owned_groups or owned_nodes)
11827 self.wanted_instances = instances.values()
11829 def _ComputeBlockdevStatus(self, node, instance, dev):
11830 """Returns the status of a block device
11833 if self.op.static or not node:
11836 self.cfg.SetDiskID(dev, node)
11838 result = self.rpc.call_blockdev_find(node, dev)
11842 result.Raise("Can't compute disk status for %s" % instance.name)
11844 status = result.payload
11848 return (status.dev_path, status.major, status.minor,
11849 status.sync_percent, status.estimated_time,
11850 status.is_degraded, status.ldisk_status)
11852 def _ComputeDiskStatus(self, instance, snode, dev):
11853 """Compute block device status.
11856 (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
11858 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
11860 def _ComputeDiskStatusInner(self, instance, snode, dev):
11861 """Compute block device status.
11863 @attention: The device has to be annotated already.
11866 if dev.dev_type in constants.LDS_DRBD:
11867 # we change the snode then (otherwise we use the one passed in)
11868 if dev.logical_id[0] == instance.primary_node:
11869 snode = dev.logical_id[1]
11871 snode = dev.logical_id[0]
11873 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
11875 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
11878 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
11885 "iv_name": dev.iv_name,
11886 "dev_type": dev.dev_type,
11887 "logical_id": dev.logical_id,
11888 "physical_id": dev.physical_id,
11889 "pstatus": dev_pstatus,
11890 "sstatus": dev_sstatus,
11891 "children": dev_children,
11896 def Exec(self, feedback_fn):
11897 """Gather and return data"""
11900 cluster = self.cfg.GetClusterInfo()
11902 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
11903 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
11905 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
11906 for node in nodes.values()))
11908 group2name_fn = lambda uuid: groups[uuid].name
11910 for instance in self.wanted_instances:
11911 pnode = nodes[instance.primary_node]
11913 if self.op.static or pnode.offline:
11914 remote_state = None
11916 self.LogWarning("Primary node %s is marked offline, returning static"
11917 " information only for instance %s" %
11918 (pnode.name, instance.name))
11920 remote_info = self.rpc.call_instance_info(instance.primary_node,
11922 instance.hypervisor)
11923 remote_info.Raise("Error checking node %s" % instance.primary_node)
11924 remote_info = remote_info.payload
11925 if remote_info and "state" in remote_info:
11926 remote_state = "up"
11928 if instance.admin_state == constants.ADMINST_UP:
11929 remote_state = "down"
11931 remote_state = instance.admin_state
11933 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
11936 snodes_group_uuids = [nodes[snode_name].group
11937 for snode_name in instance.secondary_nodes]
11939 result[instance.name] = {
11940 "name": instance.name,
11941 "config_state": instance.admin_state,
11942 "run_state": remote_state,
11943 "pnode": instance.primary_node,
11944 "pnode_group_uuid": pnode.group,
11945 "pnode_group_name": group2name_fn(pnode.group),
11946 "snodes": instance.secondary_nodes,
11947 "snodes_group_uuids": snodes_group_uuids,
11948 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
11950 # this happens to be the same format used for hooks
11951 "nics": _NICListToTuple(self, instance.nics),
11952 "disk_template": instance.disk_template,
11954 "hypervisor": instance.hypervisor,
11955 "network_port": instance.network_port,
11956 "hv_instance": instance.hvparams,
11957 "hv_actual": cluster.FillHV(instance, skip_globals=True),
11958 "be_instance": instance.beparams,
11959 "be_actual": cluster.FillBE(instance),
11960 "os_instance": instance.osparams,
11961 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
11962 "serial_no": instance.serial_no,
11963 "mtime": instance.mtime,
11964 "ctime": instance.ctime,
11965 "uuid": instance.uuid,
11971 def PrepareContainerMods(mods, private_fn):
11972 """Prepares a list of container modifications by adding a private data field.
11974 @type mods: list of tuples; (operation, index, parameters)
11975 @param mods: List of modifications
11976 @type private_fn: callable or None
11977 @param private_fn: Callable for constructing a private data field for a
11982 if private_fn is None:
11987 return [(op, idx, params, fn()) for (op, idx, params) in mods]
11990 #: Type description for changes as returned by L{ApplyContainerMods}'s
11992 _TApplyContModsCbChanges = \
11993 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
11994 ht.TNonEmptyString,
11999 def ApplyContainerMods(kind, container, chgdesc, mods,
12000 create_fn, modify_fn, remove_fn):
12001 """Applies descriptions in C{mods} to C{container}.
12004 @param kind: One-word item description
12005 @type container: list
12006 @param container: Container to modify
12007 @type chgdesc: None or list
12008 @param chgdesc: List of applied changes
12010 @param mods: Modifications as returned by L{PrepareContainerMods}
12011 @type create_fn: callable
12012 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
12013 receives absolute item index, parameters and private data object as added
12014 by L{PrepareContainerMods}, returns tuple containing new item and changes
12016 @type modify_fn: callable
12017 @param modify_fn: Callback for modifying an existing item
12018 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
12019 and private data object as added by L{PrepareContainerMods}, returns
12021 @type remove_fn: callable
12022 @param remove_fn: Callback on removing item; receives absolute item index,
12023 item and private data object as added by L{PrepareContainerMods}
12026 for (op, idx, params, private) in mods:
12029 absidx = len(container) - 1
12031 raise IndexError("Not accepting negative indices other than -1")
12032 elif idx > len(container):
12033 raise IndexError("Got %s index %s, but there are only %s" %
12034 (kind, idx, len(container)))
12040 if op == constants.DDM_ADD:
12041 # Calculate where item will be added
12043 addidx = len(container)
12047 if create_fn is None:
12050 (item, changes) = create_fn(addidx, params, private)
12053 container.append(item)
12056 assert idx <= len(container)
12057 # list.insert does so before the specified index
12058 container.insert(idx, item)
12060 # Retrieve existing item
12062 item = container[absidx]
12064 raise IndexError("Invalid %s index %s" % (kind, idx))
12066 if op == constants.DDM_REMOVE:
12069 if remove_fn is not None:
12070 remove_fn(absidx, item, private)
12072 changes = [("%s/%s" % (kind, absidx), "remove")]
12074 assert container[absidx] == item
12075 del container[absidx]
12076 elif op == constants.DDM_MODIFY:
12077 if modify_fn is not None:
12078 changes = modify_fn(absidx, item, params, private)
12080 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12082 assert _TApplyContModsCbChanges(changes)
12084 if not (chgdesc is None or changes is None):
12085 chgdesc.extend(changes)
12088 def _UpdateIvNames(base_index, disks):
12089 """Updates the C{iv_name} attribute of disks.
12091 @type disks: list of L{objects.Disk}
12094 for (idx, disk) in enumerate(disks):
12095 disk.iv_name = "disk/%s" % (base_index + idx, )
12098 class _InstNicModPrivate:
12099 """Data structure for network interface modifications.
12101 Used by L{LUInstanceSetParams}.
12104 def __init__(self):
12109 class LUInstanceSetParams(LogicalUnit):
12110 """Modifies an instances's parameters.
12113 HPATH = "instance-modify"
12114 HTYPE = constants.HTYPE_INSTANCE
12118 def _UpgradeDiskNicMods(kind, mods, verify_fn):
12119 assert ht.TList(mods)
12120 assert not mods or len(mods[0]) in (2, 3)
12122 if mods and len(mods[0]) == 2:
12126 for op, params in mods:
12127 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
12128 result.append((op, -1, params))
12132 raise errors.OpPrereqError("Only one %s add or remove operation is"
12133 " supported at a time" % kind,
12134 errors.ECODE_INVAL)
12136 result.append((constants.DDM_MODIFY, op, params))
12138 assert verify_fn(result)
12145 def _CheckMods(kind, mods, key_types, item_fn):
12146 """Ensures requested disk/NIC modifications are valid.
12149 for (op, _, params) in mods:
12150 assert ht.TDict(params)
12152 utils.ForceDictType(params, key_types)
12154 if op == constants.DDM_REMOVE:
12156 raise errors.OpPrereqError("No settings should be passed when"
12157 " removing a %s" % kind,
12158 errors.ECODE_INVAL)
12159 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
12160 item_fn(op, params)
12162 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12165 def _VerifyDiskModification(op, params):
12166 """Verifies a disk modification.
12169 if op == constants.DDM_ADD:
12170 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
12171 if mode not in constants.DISK_ACCESS_SET:
12172 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
12173 errors.ECODE_INVAL)
12175 size = params.get(constants.IDISK_SIZE, None)
12177 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
12178 constants.IDISK_SIZE, errors.ECODE_INVAL)
12182 except (TypeError, ValueError), err:
12183 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
12184 errors.ECODE_INVAL)
12186 params[constants.IDISK_SIZE] = size
12188 elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
12189 raise errors.OpPrereqError("Disk size change not possible, use"
12190 " grow-disk", errors.ECODE_INVAL)
12193 def _VerifyNicModification(op, params):
12194 """Verifies a network interface modification.
12197 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
12198 ip = params.get(constants.INIC_IP, None)
12201 elif ip.lower() == constants.VALUE_NONE:
12202 params[constants.INIC_IP] = None
12203 elif not netutils.IPAddress.IsValid(ip):
12204 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
12205 errors.ECODE_INVAL)
12207 bridge = params.get("bridge", None)
12208 link = params.get(constants.INIC_LINK, None)
12209 if bridge and link:
12210 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
12211 " at the same time", errors.ECODE_INVAL)
12212 elif bridge and bridge.lower() == constants.VALUE_NONE:
12213 params["bridge"] = None
12214 elif link and link.lower() == constants.VALUE_NONE:
12215 params[constants.INIC_LINK] = None
12217 if op == constants.DDM_ADD:
12218 macaddr = params.get(constants.INIC_MAC, None)
12219 if macaddr is None:
12220 params[constants.INIC_MAC] = constants.VALUE_AUTO
12222 if constants.INIC_MAC in params:
12223 macaddr = params[constants.INIC_MAC]
12224 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12225 macaddr = utils.NormalizeAndValidateMac(macaddr)
12227 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
12228 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
12229 " modifying an existing NIC",
12230 errors.ECODE_INVAL)
12232 def CheckArguments(self):
12233 if not (self.op.nics or self.op.disks or self.op.disk_template or
12234 self.op.hvparams or self.op.beparams or self.op.os_name or
12235 self.op.offline is not None or self.op.runtime_mem):
12236 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
12238 if self.op.hvparams:
12239 _CheckGlobalHvParams(self.op.hvparams)
12242 self._UpgradeDiskNicMods("disk", self.op.disks,
12243 opcodes.OpInstanceSetParams.TestDiskModifications)
12245 self._UpgradeDiskNicMods("NIC", self.op.nics,
12246 opcodes.OpInstanceSetParams.TestNicModifications)
12248 # Check disk modifications
12249 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
12250 self._VerifyDiskModification)
12252 if self.op.disks and self.op.disk_template is not None:
12253 raise errors.OpPrereqError("Disk template conversion and other disk"
12254 " changes not supported at the same time",
12255 errors.ECODE_INVAL)
12257 if (self.op.disk_template and
12258 self.op.disk_template in constants.DTS_INT_MIRROR and
12259 self.op.remote_node is None):
12260 raise errors.OpPrereqError("Changing the disk template to a mirrored"
12261 " one requires specifying a secondary node",
12262 errors.ECODE_INVAL)
12264 # Check NIC modifications
12265 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
12266 self._VerifyNicModification)
12268 def ExpandNames(self):
12269 self._ExpandAndLockInstance()
12270 # Can't even acquire node locks in shared mode as upcoming changes in
12271 # Ganeti 2.6 will start to modify the node object on disk conversion
12272 self.needed_locks[locking.LEVEL_NODE] = []
12273 self.needed_locks[locking.LEVEL_NODE_RES] = []
12274 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12276 def DeclareLocks(self, level):
12277 # TODO: Acquire group lock in shared mode (disk parameters)
12278 if level == locking.LEVEL_NODE:
12279 self._LockInstancesNodes()
12280 if self.op.disk_template and self.op.remote_node:
12281 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
12282 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
12283 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
12285 self.needed_locks[locking.LEVEL_NODE_RES] = \
12286 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
12288 def BuildHooksEnv(self):
12289 """Build hooks env.
12291 This runs on the master, primary and secondaries.
12295 if constants.BE_MINMEM in self.be_new:
12296 args["minmem"] = self.be_new[constants.BE_MINMEM]
12297 if constants.BE_MAXMEM in self.be_new:
12298 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
12299 if constants.BE_VCPUS in self.be_new:
12300 args["vcpus"] = self.be_new[constants.BE_VCPUS]
12301 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
12302 # information at all.
12304 if self._new_nics is not None:
12307 for nic in self._new_nics:
12308 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
12309 mode = nicparams[constants.NIC_MODE]
12310 link = nicparams[constants.NIC_LINK]
12311 nics.append((nic.ip, nic.mac, mode, link))
12313 args["nics"] = nics
12315 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
12316 if self.op.disk_template:
12317 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
12318 if self.op.runtime_mem:
12319 env["RUNTIME_MEMORY"] = self.op.runtime_mem
12323 def BuildHooksNodes(self):
12324 """Build hooks nodes.
12327 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
12330 def _PrepareNicModification(self, params, private, old_ip, old_params,
12332 update_params_dict = dict([(key, params[key])
12333 for key in constants.NICS_PARAMETERS
12336 if "bridge" in params:
12337 update_params_dict[constants.NIC_LINK] = params["bridge"]
12339 new_params = _GetUpdatedParams(old_params, update_params_dict)
12340 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
12342 new_filled_params = cluster.SimpleFillNIC(new_params)
12343 objects.NIC.CheckParameterSyntax(new_filled_params)
12345 new_mode = new_filled_params[constants.NIC_MODE]
12346 if new_mode == constants.NIC_MODE_BRIDGED:
12347 bridge = new_filled_params[constants.NIC_LINK]
12348 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
12350 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
12352 self.warn.append(msg)
12354 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
12356 elif new_mode == constants.NIC_MODE_ROUTED:
12357 ip = params.get(constants.INIC_IP, old_ip)
12359 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
12360 " on a routed NIC", errors.ECODE_INVAL)
12362 if constants.INIC_MAC in params:
12363 mac = params[constants.INIC_MAC]
12365 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
12366 errors.ECODE_INVAL)
12367 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12368 # otherwise generate the MAC address
12369 params[constants.INIC_MAC] = \
12370 self.cfg.GenerateMAC(self.proc.GetECId())
12372 # or validate/reserve the current one
12374 self.cfg.ReserveMAC(mac, self.proc.GetECId())
12375 except errors.ReservationError:
12376 raise errors.OpPrereqError("MAC address '%s' already in use"
12377 " in cluster" % mac,
12378 errors.ECODE_NOTUNIQUE)
12380 private.params = new_params
12381 private.filled = new_filled_params
12383 def CheckPrereq(self):
12384 """Check prerequisites.
12386 This only checks the instance list against the existing names.
12389 # checking the new params on the primary/secondary nodes
12391 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12392 cluster = self.cluster = self.cfg.GetClusterInfo()
12393 assert self.instance is not None, \
12394 "Cannot retrieve locked instance %s" % self.op.instance_name
12395 pnode = instance.primary_node
12396 nodelist = list(instance.all_nodes)
12397 pnode_info = self.cfg.GetNodeInfo(pnode)
12398 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
12400 # Prepare disk/NIC modifications
12401 self.diskmod = PrepareContainerMods(self.op.disks, None)
12402 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
12405 if self.op.os_name and not self.op.force:
12406 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
12407 self.op.force_variant)
12408 instance_os = self.op.os_name
12410 instance_os = instance.os
12412 assert not (self.op.disk_template and self.op.disks), \
12413 "Can't modify disk template and apply disk changes at the same time"
12415 if self.op.disk_template:
12416 if instance.disk_template == self.op.disk_template:
12417 raise errors.OpPrereqError("Instance already has disk template %s" %
12418 instance.disk_template, errors.ECODE_INVAL)
12420 if (instance.disk_template,
12421 self.op.disk_template) not in self._DISK_CONVERSIONS:
12422 raise errors.OpPrereqError("Unsupported disk template conversion from"
12423 " %s to %s" % (instance.disk_template,
12424 self.op.disk_template),
12425 errors.ECODE_INVAL)
12426 _CheckInstanceState(self, instance, INSTANCE_DOWN,
12427 msg="cannot change disk template")
12428 if self.op.disk_template in constants.DTS_INT_MIRROR:
12429 if self.op.remote_node == pnode:
12430 raise errors.OpPrereqError("Given new secondary node %s is the same"
12431 " as the primary node of the instance" %
12432 self.op.remote_node, errors.ECODE_STATE)
12433 _CheckNodeOnline(self, self.op.remote_node)
12434 _CheckNodeNotDrained(self, self.op.remote_node)
12435 # FIXME: here we assume that the old instance type is DT_PLAIN
12436 assert instance.disk_template == constants.DT_PLAIN
12437 disks = [{constants.IDISK_SIZE: d.size,
12438 constants.IDISK_VG: d.logical_id[0]}
12439 for d in instance.disks]
12440 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
12441 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
12443 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
12444 snode_group = self.cfg.GetNodeGroup(snode_info.group)
12445 ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
12446 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
12447 ignore=self.op.ignore_ipolicy)
12448 if pnode_info.group != snode_info.group:
12449 self.LogWarning("The primary and secondary nodes are in two"
12450 " different node groups; the disk parameters"
12451 " from the first disk's node group will be"
12454 # hvparams processing
12455 if self.op.hvparams:
12456 hv_type = instance.hypervisor
12457 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
12458 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
12459 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
12462 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
12463 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
12464 self.hv_proposed = self.hv_new = hv_new # the new actual values
12465 self.hv_inst = i_hvdict # the new dict (without defaults)
12467 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
12469 self.hv_new = self.hv_inst = {}
12471 # beparams processing
12472 if self.op.beparams:
12473 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
12475 objects.UpgradeBeParams(i_bedict)
12476 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
12477 be_new = cluster.SimpleFillBE(i_bedict)
12478 self.be_proposed = self.be_new = be_new # the new actual values
12479 self.be_inst = i_bedict # the new dict (without defaults)
12481 self.be_new = self.be_inst = {}
12482 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
12483 be_old = cluster.FillBE(instance)
12485 # CPU param validation -- checking every time a parameter is
12486 # changed to cover all cases where either CPU mask or vcpus have
12488 if (constants.BE_VCPUS in self.be_proposed and
12489 constants.HV_CPU_MASK in self.hv_proposed):
12491 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
12492 # Verify mask is consistent with number of vCPUs. Can skip this
12493 # test if only 1 entry in the CPU mask, which means same mask
12494 # is applied to all vCPUs.
12495 if (len(cpu_list) > 1 and
12496 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
12497 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
12499 (self.be_proposed[constants.BE_VCPUS],
12500 self.hv_proposed[constants.HV_CPU_MASK]),
12501 errors.ECODE_INVAL)
12503 # Only perform this test if a new CPU mask is given
12504 if constants.HV_CPU_MASK in self.hv_new:
12505 # Calculate the largest CPU number requested
12506 max_requested_cpu = max(map(max, cpu_list))
12507 # Check that all of the instance's nodes have enough physical CPUs to
12508 # satisfy the requested CPU mask
12509 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
12510 max_requested_cpu + 1, instance.hypervisor)
12512 # osparams processing
12513 if self.op.osparams:
12514 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
12515 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
12516 self.os_inst = i_osdict # the new dict (without defaults)
12522 #TODO(dynmem): do the appropriate check involving MINMEM
12523 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
12524 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
12525 mem_check_list = [pnode]
12526 if be_new[constants.BE_AUTO_BALANCE]:
12527 # either we changed auto_balance to yes or it was from before
12528 mem_check_list.extend(instance.secondary_nodes)
12529 instance_info = self.rpc.call_instance_info(pnode, instance.name,
12530 instance.hypervisor)
12531 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
12532 [instance.hypervisor])
12533 pninfo = nodeinfo[pnode]
12534 msg = pninfo.fail_msg
12536 # Assume the primary node is unreachable and go ahead
12537 self.warn.append("Can't get info from primary node %s: %s" %
12540 (_, _, (pnhvinfo, )) = pninfo.payload
12541 if not isinstance(pnhvinfo.get("memory_free", None), int):
12542 self.warn.append("Node data from primary node %s doesn't contain"
12543 " free memory information" % pnode)
12544 elif instance_info.fail_msg:
12545 self.warn.append("Can't get instance runtime information: %s" %
12546 instance_info.fail_msg)
12548 if instance_info.payload:
12549 current_mem = int(instance_info.payload["memory"])
12551 # Assume instance not running
12552 # (there is a slight race condition here, but it's not very
12553 # probable, and we have no other way to check)
12554 # TODO: Describe race condition
12556 #TODO(dynmem): do the appropriate check involving MINMEM
12557 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
12558 pnhvinfo["memory_free"])
12560 raise errors.OpPrereqError("This change will prevent the instance"
12561 " from starting, due to %d MB of memory"
12562 " missing on its primary node" %
12564 errors.ECODE_NORES)
12566 if be_new[constants.BE_AUTO_BALANCE]:
12567 for node, nres in nodeinfo.items():
12568 if node not in instance.secondary_nodes:
12570 nres.Raise("Can't get info from secondary node %s" % node,
12571 prereq=True, ecode=errors.ECODE_STATE)
12572 (_, _, (nhvinfo, )) = nres.payload
12573 if not isinstance(nhvinfo.get("memory_free", None), int):
12574 raise errors.OpPrereqError("Secondary node %s didn't return free"
12575 " memory information" % node,
12576 errors.ECODE_STATE)
12577 #TODO(dynmem): do the appropriate check involving MINMEM
12578 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
12579 raise errors.OpPrereqError("This change will prevent the instance"
12580 " from failover to its secondary node"
12581 " %s, due to not enough memory" % node,
12582 errors.ECODE_STATE)
12584 if self.op.runtime_mem:
12585 remote_info = self.rpc.call_instance_info(instance.primary_node,
12587 instance.hypervisor)
12588 remote_info.Raise("Error checking node %s" % instance.primary_node)
12589 if not remote_info.payload: # not running already
12590 raise errors.OpPrereqError("Instance %s is not running" % instance.name,
12591 errors.ECODE_STATE)
12593 current_memory = remote_info.payload["memory"]
12594 if (not self.op.force and
12595 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
12596 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
12597 raise errors.OpPrereqError("Instance %s must have memory between %d"
12598 " and %d MB of memory unless --force is"
12599 " given" % (instance.name,
12600 self.be_proposed[constants.BE_MINMEM],
12601 self.be_proposed[constants.BE_MAXMEM]),
12602 errors.ECODE_INVAL)
12604 delta = self.op.runtime_mem - current_memory
12606 _CheckNodeFreeMemory(self, instance.primary_node,
12607 "ballooning memory for instance %s" %
12608 instance.name, delta, instance.hypervisor)
12610 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
12611 raise errors.OpPrereqError("Disk operations not supported for"
12612 " diskless instances",
12613 errors.ECODE_INVAL)
12615 def _PrepareNicCreate(_, params, private):
12616 self._PrepareNicModification(params, private, None, {}, cluster, pnode)
12617 return (None, None)
12619 def _PrepareNicMod(_, nic, params, private):
12620 self._PrepareNicModification(params, private, nic.ip,
12621 nic.nicparams, cluster, pnode)
12624 # Verify NIC changes (operating on copy)
12625 nics = instance.nics[:]
12626 ApplyContainerMods("NIC", nics, None, self.nicmod,
12627 _PrepareNicCreate, _PrepareNicMod, None)
12628 if len(nics) > constants.MAX_NICS:
12629 raise errors.OpPrereqError("Instance has too many network interfaces"
12630 " (%d), cannot add more" % constants.MAX_NICS,
12631 errors.ECODE_STATE)
12633 # Verify disk changes (operating on a copy)
12634 disks = instance.disks[:]
12635 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
12636 if len(disks) > constants.MAX_DISKS:
12637 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
12638 " more" % constants.MAX_DISKS,
12639 errors.ECODE_STATE)
12641 if self.op.offline is not None:
12642 if self.op.offline:
12643 msg = "can't change to offline"
12645 msg = "can't change to online"
12646 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
12648 # Pre-compute NIC changes (necessary to use result in hooks)
12649 self._nic_chgdesc = []
12651 # Operate on copies as this is still in prereq
12652 nics = [nic.Copy() for nic in instance.nics]
12653 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
12654 self._CreateNewNic, self._ApplyNicMods, None)
12655 self._new_nics = nics
12657 self._new_nics = None
12659 def _ConvertPlainToDrbd(self, feedback_fn):
12660 """Converts an instance from plain to drbd.
12663 feedback_fn("Converting template to drbd")
12664 instance = self.instance
12665 pnode = instance.primary_node
12666 snode = self.op.remote_node
12668 assert instance.disk_template == constants.DT_PLAIN
12670 # create a fake disk info for _GenerateDiskTemplate
12671 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
12672 constants.IDISK_VG: d.logical_id[0]}
12673 for d in instance.disks]
12674 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
12675 instance.name, pnode, [snode],
12676 disk_info, None, None, 0, feedback_fn,
12678 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
12680 info = _GetInstanceInfoText(instance)
12681 feedback_fn("Creating additional volumes...")
12682 # first, create the missing data and meta devices
12683 for disk in anno_disks:
12684 # unfortunately this is... not too nice
12685 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
12687 for child in disk.children:
12688 _CreateSingleBlockDev(self, snode, instance, child, info, True)
12689 # at this stage, all new LVs have been created, we can rename the
12691 feedback_fn("Renaming original volumes...")
12692 rename_list = [(o, n.children[0].logical_id)
12693 for (o, n) in zip(instance.disks, new_disks)]
12694 result = self.rpc.call_blockdev_rename(pnode, rename_list)
12695 result.Raise("Failed to rename original LVs")
12697 feedback_fn("Initializing DRBD devices...")
12698 # all child devices are in place, we can now create the DRBD devices
12699 for disk in anno_disks:
12700 for node in [pnode, snode]:
12701 f_create = node == pnode
12702 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
12704 # at this point, the instance has been modified
12705 instance.disk_template = constants.DT_DRBD8
12706 instance.disks = new_disks
12707 self.cfg.Update(instance, feedback_fn)
12709 # Release node locks while waiting for sync
12710 _ReleaseLocks(self, locking.LEVEL_NODE)
12712 # disks are created, waiting for sync
12713 disk_abort = not _WaitForSync(self, instance,
12714 oneshot=not self.op.wait_for_sync)
12716 raise errors.OpExecError("There are some degraded disks for"
12717 " this instance, please cleanup manually")
12719 # Node resource locks will be released by caller
12721 def _ConvertDrbdToPlain(self, feedback_fn):
12722 """Converts an instance from drbd to plain.
12725 instance = self.instance
12727 assert len(instance.secondary_nodes) == 1
12728 assert instance.disk_template == constants.DT_DRBD8
12730 pnode = instance.primary_node
12731 snode = instance.secondary_nodes[0]
12732 feedback_fn("Converting template to plain")
12734 old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
12735 new_disks = [d.children[0] for d in instance.disks]
12737 # copy over size and mode
12738 for parent, child in zip(old_disks, new_disks):
12739 child.size = parent.size
12740 child.mode = parent.mode
12742 # this is a DRBD disk, return its port to the pool
12743 # NOTE: this must be done right before the call to cfg.Update!
12744 for disk in old_disks:
12745 tcp_port = disk.logical_id[2]
12746 self.cfg.AddTcpUdpPort(tcp_port)
12748 # update instance structure
12749 instance.disks = new_disks
12750 instance.disk_template = constants.DT_PLAIN
12751 self.cfg.Update(instance, feedback_fn)
12753 # Release locks in case removing disks takes a while
12754 _ReleaseLocks(self, locking.LEVEL_NODE)
12756 feedback_fn("Removing volumes on the secondary node...")
12757 for disk in old_disks:
12758 self.cfg.SetDiskID(disk, snode)
12759 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
12761 self.LogWarning("Could not remove block device %s on node %s,"
12762 " continuing anyway: %s", disk.iv_name, snode, msg)
12764 feedback_fn("Removing unneeded volumes on the primary node...")
12765 for idx, disk in enumerate(old_disks):
12766 meta = disk.children[1]
12767 self.cfg.SetDiskID(meta, pnode)
12768 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
12770 self.LogWarning("Could not remove metadata for disk %d on node %s,"
12771 " continuing anyway: %s", idx, pnode, msg)
12773 def _CreateNewDisk(self, idx, params, _):
12774 """Creates a new disk.
12777 instance = self.instance
12780 if instance.disk_template in constants.DTS_FILEBASED:
12781 (file_driver, file_path) = instance.disks[0].logical_id
12782 file_path = os.path.dirname(file_path)
12784 file_driver = file_path = None
12787 _GenerateDiskTemplate(self, instance.disk_template, instance.name,
12788 instance.primary_node, instance.secondary_nodes,
12789 [params], file_path, file_driver, idx,
12790 self.Log, self.diskparams)[0]
12792 info = _GetInstanceInfoText(instance)
12794 logging.info("Creating volume %s for instance %s",
12795 disk.iv_name, instance.name)
12796 # Note: this needs to be kept in sync with _CreateDisks
12798 for node in instance.all_nodes:
12799 f_create = (node == instance.primary_node)
12801 _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
12802 except errors.OpExecError, err:
12803 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
12804 disk.iv_name, disk, node, err)
12807 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
12811 def _ModifyDisk(idx, disk, params, _):
12812 """Modifies a disk.
12815 disk.mode = params[constants.IDISK_MODE]
12818 ("disk.mode/%d" % idx, disk.mode),
12821 def _RemoveDisk(self, idx, root, _):
12825 (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
12826 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
12827 self.cfg.SetDiskID(disk, node)
12828 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
12830 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
12831 " continuing anyway", idx, node, msg)
12833 # if this is a DRBD disk, return its port to the pool
12834 if root.dev_type in constants.LDS_DRBD:
12835 self.cfg.AddTcpUdpPort(root.logical_id[2])
12838 def _CreateNewNic(idx, params, private):
12839 """Creates data structure for a new network interface.
12842 mac = params[constants.INIC_MAC]
12843 ip = params.get(constants.INIC_IP, None)
12844 nicparams = private.params
12846 return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
12848 "add:mac=%s,ip=%s,mode=%s,link=%s" %
12849 (mac, ip, private.filled[constants.NIC_MODE],
12850 private.filled[constants.NIC_LINK])),
12854 def _ApplyNicMods(idx, nic, params, private):
12855 """Modifies a network interface.
12860 for key in [constants.INIC_MAC, constants.INIC_IP]:
12862 changes.append(("nic.%s/%d" % (key, idx), params[key]))
12863 setattr(nic, key, params[key])
12866 nic.nicparams = private.params
12868 for (key, val) in params.items():
12869 changes.append(("nic.%s/%d" % (key, idx), val))
12873 def Exec(self, feedback_fn):
12874 """Modifies an instance.
12876 All parameters take effect only at the next restart of the instance.
12879 # Process here the warnings from CheckPrereq, as we don't have a
12880 # feedback_fn there.
12881 # TODO: Replace with self.LogWarning
12882 for warn in self.warn:
12883 feedback_fn("WARNING: %s" % warn)
12885 assert ((self.op.disk_template is None) ^
12886 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
12887 "Not owning any node resource locks"
12890 instance = self.instance
12893 if self.op.runtime_mem:
12894 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
12896 self.op.runtime_mem)
12897 rpcres.Raise("Cannot modify instance runtime memory")
12898 result.append(("runtime_memory", self.op.runtime_mem))
12900 # Apply disk changes
12901 ApplyContainerMods("disk", instance.disks, result, self.diskmod,
12902 self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
12903 _UpdateIvNames(0, instance.disks)
12905 if self.op.disk_template:
12907 check_nodes = set(instance.all_nodes)
12908 if self.op.remote_node:
12909 check_nodes.add(self.op.remote_node)
12910 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
12911 owned = self.owned_locks(level)
12912 assert not (check_nodes - owned), \
12913 ("Not owning the correct locks, owning %r, expected at least %r" %
12914 (owned, check_nodes))
12916 r_shut = _ShutdownInstanceDisks(self, instance)
12918 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
12919 " proceed with disk template conversion")
12920 mode = (instance.disk_template, self.op.disk_template)
12922 self._DISK_CONVERSIONS[mode](self, feedback_fn)
12924 self.cfg.ReleaseDRBDMinors(instance.name)
12926 result.append(("disk_template", self.op.disk_template))
12928 assert instance.disk_template == self.op.disk_template, \
12929 ("Expected disk template '%s', found '%s'" %
12930 (self.op.disk_template, instance.disk_template))
12932 # Release node and resource locks if there are any (they might already have
12933 # been released during disk conversion)
12934 _ReleaseLocks(self, locking.LEVEL_NODE)
12935 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
12937 # Apply NIC changes
12938 if self._new_nics is not None:
12939 instance.nics = self._new_nics
12940 result.extend(self._nic_chgdesc)
12943 if self.op.hvparams:
12944 instance.hvparams = self.hv_inst
12945 for key, val in self.op.hvparams.iteritems():
12946 result.append(("hv/%s" % key, val))
12949 if self.op.beparams:
12950 instance.beparams = self.be_inst
12951 for key, val in self.op.beparams.iteritems():
12952 result.append(("be/%s" % key, val))
12955 if self.op.os_name:
12956 instance.os = self.op.os_name
12959 if self.op.osparams:
12960 instance.osparams = self.os_inst
12961 for key, val in self.op.osparams.iteritems():
12962 result.append(("os/%s" % key, val))
12964 if self.op.offline is None:
12967 elif self.op.offline:
12968 # Mark instance as offline
12969 self.cfg.MarkInstanceOffline(instance.name)
12970 result.append(("admin_state", constants.ADMINST_OFFLINE))
12972 # Mark instance as online, but stopped
12973 self.cfg.MarkInstanceDown(instance.name)
12974 result.append(("admin_state", constants.ADMINST_DOWN))
12976 self.cfg.Update(instance, feedback_fn)
12978 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
12979 self.owned_locks(locking.LEVEL_NODE)), \
12980 "All node locks should have been released by now"
12984 _DISK_CONVERSIONS = {
12985 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
12986 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
12990 class LUInstanceChangeGroup(LogicalUnit):
12991 HPATH = "instance-change-group"
12992 HTYPE = constants.HTYPE_INSTANCE
12995 def ExpandNames(self):
12996 self.share_locks = _ShareAll()
12997 self.needed_locks = {
12998 locking.LEVEL_NODEGROUP: [],
12999 locking.LEVEL_NODE: [],
13002 self._ExpandAndLockInstance()
13004 if self.op.target_groups:
13005 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
13006 self.op.target_groups)
13008 self.req_target_uuids = None
13010 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
13012 def DeclareLocks(self, level):
13013 if level == locking.LEVEL_NODEGROUP:
13014 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
13016 if self.req_target_uuids:
13017 lock_groups = set(self.req_target_uuids)
13019 # Lock all groups used by instance optimistically; this requires going
13020 # via the node before it's locked, requiring verification later on
13021 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
13022 lock_groups.update(instance_groups)
13024 # No target groups, need to lock all of them
13025 lock_groups = locking.ALL_SET
13027 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
13029 elif level == locking.LEVEL_NODE:
13030 if self.req_target_uuids:
13031 # Lock all nodes used by instances
13032 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
13033 self._LockInstancesNodes()
13035 # Lock all nodes in all potential target groups
13036 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
13037 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
13038 member_nodes = [node_name
13039 for group in lock_groups
13040 for node_name in self.cfg.GetNodeGroup(group).members]
13041 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
13043 # Lock all nodes as all groups are potential targets
13044 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13046 def CheckPrereq(self):
13047 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13048 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
13049 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
13051 assert (self.req_target_uuids is None or
13052 owned_groups.issuperset(self.req_target_uuids))
13053 assert owned_instances == set([self.op.instance_name])
13055 # Get instance information
13056 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
13058 # Check if node groups for locked instance are still correct
13059 assert owned_nodes.issuperset(self.instance.all_nodes), \
13060 ("Instance %s's nodes changed while we kept the lock" %
13061 self.op.instance_name)
13063 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
13066 if self.req_target_uuids:
13067 # User requested specific target groups
13068 self.target_uuids = frozenset(self.req_target_uuids)
13070 # All groups except those used by the instance are potential targets
13071 self.target_uuids = owned_groups - inst_groups
13073 conflicting_groups = self.target_uuids & inst_groups
13074 if conflicting_groups:
13075 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
13076 " used by the instance '%s'" %
13077 (utils.CommaJoin(conflicting_groups),
13078 self.op.instance_name),
13079 errors.ECODE_INVAL)
13081 if not self.target_uuids:
13082 raise errors.OpPrereqError("There are no possible target groups",
13083 errors.ECODE_INVAL)
13085 def BuildHooksEnv(self):
13086 """Build hooks env.
13089 assert self.target_uuids
13092 "TARGET_GROUPS": " ".join(self.target_uuids),
13095 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13099 def BuildHooksNodes(self):
13100 """Build hooks nodes.
13103 mn = self.cfg.GetMasterNode()
13104 return ([mn], [mn])
13106 def Exec(self, feedback_fn):
13107 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
13109 assert instances == [self.op.instance_name], "Instance not locked"
13111 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
13112 instances=instances, target_groups=list(self.target_uuids))
13114 ial.Run(self.op.iallocator)
13116 if not ial.success:
13117 raise errors.OpPrereqError("Can't compute solution for changing group of"
13118 " instance '%s' using iallocator '%s': %s" %
13119 (self.op.instance_name, self.op.iallocator,
13121 errors.ECODE_NORES)
13123 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
13125 self.LogInfo("Iallocator returned %s job(s) for changing group of"
13126 " instance '%s'", len(jobs), self.op.instance_name)
13128 return ResultWithJobs(jobs)
13131 class LUBackupQuery(NoHooksLU):
13132 """Query the exports list
13137 def CheckArguments(self):
13138 self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
13139 ["node", "export"], self.op.use_locking)
13141 def ExpandNames(self):
13142 self.expq.ExpandNames(self)
13144 def DeclareLocks(self, level):
13145 self.expq.DeclareLocks(self, level)
13147 def Exec(self, feedback_fn):
13150 for (node, expname) in self.expq.OldStyleQuery(self):
13151 if expname is None:
13152 result[node] = False
13154 result.setdefault(node, []).append(expname)
13159 class _ExportQuery(_QueryBase):
13160 FIELDS = query.EXPORT_FIELDS
13162 #: The node name is not a unique key for this query
13163 SORT_FIELD = "node"
13165 def ExpandNames(self, lu):
13166 lu.needed_locks = {}
13168 # The following variables interact with _QueryBase._GetNames
13170 self.wanted = _GetWantedNodes(lu, self.names)
13172 self.wanted = locking.ALL_SET
13174 self.do_locking = self.use_locking
13176 if self.do_locking:
13177 lu.share_locks = _ShareAll()
13178 lu.needed_locks = {
13179 locking.LEVEL_NODE: self.wanted,
13182 def DeclareLocks(self, lu, level):
13185 def _GetQueryData(self, lu):
13186 """Computes the list of nodes and their attributes.
13189 # Locking is not used
13191 assert not (compat.any(lu.glm.is_owned(level)
13192 for level in locking.LEVELS
13193 if level != locking.LEVEL_CLUSTER) or
13194 self.do_locking or self.use_locking)
13196 nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
13200 for (node, nres) in lu.rpc.call_export_list(nodes).items():
13202 result.append((node, None))
13204 result.extend((node, expname) for expname in nres.payload)
13209 class LUBackupPrepare(NoHooksLU):
13210 """Prepares an instance for an export and returns useful information.
13215 def ExpandNames(self):
13216 self._ExpandAndLockInstance()
13218 def CheckPrereq(self):
13219 """Check prerequisites.
13222 instance_name = self.op.instance_name
13224 self.instance = self.cfg.GetInstanceInfo(instance_name)
13225 assert self.instance is not None, \
13226 "Cannot retrieve locked instance %s" % self.op.instance_name
13227 _CheckNodeOnline(self, self.instance.primary_node)
13229 self._cds = _GetClusterDomainSecret()
13231 def Exec(self, feedback_fn):
13232 """Prepares an instance for an export.
13235 instance = self.instance
13237 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13238 salt = utils.GenerateSecret(8)
13240 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
13241 result = self.rpc.call_x509_cert_create(instance.primary_node,
13242 constants.RIE_CERT_VALIDITY)
13243 result.Raise("Can't create X509 key and certificate on %s" % result.node)
13245 (name, cert_pem) = result.payload
13247 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
13251 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
13252 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
13254 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
13260 class LUBackupExport(LogicalUnit):
13261 """Export an instance to an image in the cluster.
13264 HPATH = "instance-export"
13265 HTYPE = constants.HTYPE_INSTANCE
13268 def CheckArguments(self):
13269 """Check the arguments.
13272 self.x509_key_name = self.op.x509_key_name
13273 self.dest_x509_ca_pem = self.op.destination_x509_ca
13275 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13276 if not self.x509_key_name:
13277 raise errors.OpPrereqError("Missing X509 key name for encryption",
13278 errors.ECODE_INVAL)
13280 if not self.dest_x509_ca_pem:
13281 raise errors.OpPrereqError("Missing destination X509 CA",
13282 errors.ECODE_INVAL)
13284 def ExpandNames(self):
13285 self._ExpandAndLockInstance()
13287 # Lock all nodes for local exports
13288 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13289 # FIXME: lock only instance primary and destination node
13291 # Sad but true, for now we have do lock all nodes, as we don't know where
13292 # the previous export might be, and in this LU we search for it and
13293 # remove it from its current node. In the future we could fix this by:
13294 # - making a tasklet to search (share-lock all), then create the
13295 # new one, then one to remove, after
13296 # - removing the removal operation altogether
13297 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13299 def DeclareLocks(self, level):
13300 """Last minute lock declaration."""
13301 # All nodes are locked anyway, so nothing to do here.
13303 def BuildHooksEnv(self):
13304 """Build hooks env.
13306 This will run on the master, primary node and target node.
13310 "EXPORT_MODE": self.op.mode,
13311 "EXPORT_NODE": self.op.target_node,
13312 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
13313 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
13314 # TODO: Generic function for boolean env variables
13315 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
13318 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13322 def BuildHooksNodes(self):
13323 """Build hooks nodes.
13326 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
13328 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13329 nl.append(self.op.target_node)
13333 def CheckPrereq(self):
13334 """Check prerequisites.
13336 This checks that the instance and node names are valid.
13339 instance_name = self.op.instance_name
13341 self.instance = self.cfg.GetInstanceInfo(instance_name)
13342 assert self.instance is not None, \
13343 "Cannot retrieve locked instance %s" % self.op.instance_name
13344 _CheckNodeOnline(self, self.instance.primary_node)
13346 if (self.op.remove_instance and
13347 self.instance.admin_state == constants.ADMINST_UP and
13348 not self.op.shutdown):
13349 raise errors.OpPrereqError("Can not remove instance without shutting it"
13352 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13353 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
13354 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
13355 assert self.dst_node is not None
13357 _CheckNodeOnline(self, self.dst_node.name)
13358 _CheckNodeNotDrained(self, self.dst_node.name)
13361 self.dest_disk_info = None
13362 self.dest_x509_ca = None
13364 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13365 self.dst_node = None
13367 if len(self.op.target_node) != len(self.instance.disks):
13368 raise errors.OpPrereqError(("Received destination information for %s"
13369 " disks, but instance %s has %s disks") %
13370 (len(self.op.target_node), instance_name,
13371 len(self.instance.disks)),
13372 errors.ECODE_INVAL)
13374 cds = _GetClusterDomainSecret()
13376 # Check X509 key name
13378 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
13379 except (TypeError, ValueError), err:
13380 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
13382 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
13383 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
13384 errors.ECODE_INVAL)
13386 # Load and verify CA
13388 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
13389 except OpenSSL.crypto.Error, err:
13390 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
13391 (err, ), errors.ECODE_INVAL)
13393 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
13394 if errcode is not None:
13395 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
13396 (msg, ), errors.ECODE_INVAL)
13398 self.dest_x509_ca = cert
13400 # Verify target information
13402 for idx, disk_data in enumerate(self.op.target_node):
13404 (host, port, magic) = \
13405 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
13406 except errors.GenericError, err:
13407 raise errors.OpPrereqError("Target info for disk %s: %s" %
13408 (idx, err), errors.ECODE_INVAL)
13410 disk_info.append((host, port, magic))
13412 assert len(disk_info) == len(self.op.target_node)
13413 self.dest_disk_info = disk_info
13416 raise errors.ProgrammerError("Unhandled export mode %r" %
13419 # instance disk type verification
13420 # TODO: Implement export support for file-based disks
13421 for disk in self.instance.disks:
13422 if disk.dev_type == constants.LD_FILE:
13423 raise errors.OpPrereqError("Export not supported for instances with"
13424 " file-based disks", errors.ECODE_INVAL)
13426 def _CleanupExports(self, feedback_fn):
13427 """Removes exports of current instance from all other nodes.
13429 If an instance in a cluster with nodes A..D was exported to node C, its
13430 exports will be removed from the nodes A, B and D.
13433 assert self.op.mode != constants.EXPORT_MODE_REMOTE
13435 nodelist = self.cfg.GetNodeList()
13436 nodelist.remove(self.dst_node.name)
13438 # on one-node clusters nodelist will be empty after the removal
13439 # if we proceed the backup would be removed because OpBackupQuery
13440 # substitutes an empty list with the full cluster node list.
13441 iname = self.instance.name
13443 feedback_fn("Removing old exports for instance %s" % iname)
13444 exportlist = self.rpc.call_export_list(nodelist)
13445 for node in exportlist:
13446 if exportlist[node].fail_msg:
13448 if iname in exportlist[node].payload:
13449 msg = self.rpc.call_export_remove(node, iname).fail_msg
13451 self.LogWarning("Could not remove older export for instance %s"
13452 " on node %s: %s", iname, node, msg)
13454 def Exec(self, feedback_fn):
13455 """Export an instance to an image in the cluster.
13458 assert self.op.mode in constants.EXPORT_MODES
13460 instance = self.instance
13461 src_node = instance.primary_node
13463 if self.op.shutdown:
13464 # shutdown the instance, but not the disks
13465 feedback_fn("Shutting down instance %s" % instance.name)
13466 result = self.rpc.call_instance_shutdown(src_node, instance,
13467 self.op.shutdown_timeout)
13468 # TODO: Maybe ignore failures if ignore_remove_failures is set
13469 result.Raise("Could not shutdown instance %s on"
13470 " node %s" % (instance.name, src_node))
13472 # set the disks ID correctly since call_instance_start needs the
13473 # correct drbd minor to create the symlinks
13474 for disk in instance.disks:
13475 self.cfg.SetDiskID(disk, src_node)
13477 activate_disks = (instance.admin_state != constants.ADMINST_UP)
13480 # Activate the instance disks if we'exporting a stopped instance
13481 feedback_fn("Activating disks for %s" % instance.name)
13482 _StartInstanceDisks(self, instance, None)
13485 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
13488 helper.CreateSnapshots()
13490 if (self.op.shutdown and
13491 instance.admin_state == constants.ADMINST_UP and
13492 not self.op.remove_instance):
13493 assert not activate_disks
13494 feedback_fn("Starting instance %s" % instance.name)
13495 result = self.rpc.call_instance_start(src_node,
13496 (instance, None, None), False)
13497 msg = result.fail_msg
13499 feedback_fn("Failed to start instance: %s" % msg)
13500 _ShutdownInstanceDisks(self, instance)
13501 raise errors.OpExecError("Could not start instance: %s" % msg)
13503 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13504 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
13505 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13506 connect_timeout = constants.RIE_CONNECT_TIMEOUT
13507 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
13509 (key_name, _, _) = self.x509_key_name
13512 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
13515 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
13516 key_name, dest_ca_pem,
13521 # Check for backwards compatibility
13522 assert len(dresults) == len(instance.disks)
13523 assert compat.all(isinstance(i, bool) for i in dresults), \
13524 "Not all results are boolean: %r" % dresults
13528 feedback_fn("Deactivating disks for %s" % instance.name)
13529 _ShutdownInstanceDisks(self, instance)
13531 if not (compat.all(dresults) and fin_resu):
13534 failures.append("export finalization")
13535 if not compat.all(dresults):
13536 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
13538 failures.append("disk export: disk(s) %s" % fdsk)
13540 raise errors.OpExecError("Export failed, errors in %s" %
13541 utils.CommaJoin(failures))
13543 # At this point, the export was successful, we can cleanup/finish
13545 # Remove instance if requested
13546 if self.op.remove_instance:
13547 feedback_fn("Removing instance %s" % instance.name)
13548 _RemoveInstance(self, feedback_fn, instance,
13549 self.op.ignore_remove_failures)
13551 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13552 self._CleanupExports(feedback_fn)
13554 return fin_resu, dresults
13557 class LUBackupRemove(NoHooksLU):
13558 """Remove exports related to the named instance.
13563 def ExpandNames(self):
13564 self.needed_locks = {}
13565 # We need all nodes to be locked in order for RemoveExport to work, but we
13566 # don't need to lock the instance itself, as nothing will happen to it (and
13567 # we can remove exports also for a removed instance)
13568 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13570 def Exec(self, feedback_fn):
13571 """Remove any export.
13574 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
13575 # If the instance was not found we'll try with the name that was passed in.
13576 # This will only work if it was an FQDN, though.
13578 if not instance_name:
13580 instance_name = self.op.instance_name
13582 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
13583 exportlist = self.rpc.call_export_list(locked_nodes)
13585 for node in exportlist:
13586 msg = exportlist[node].fail_msg
13588 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
13590 if instance_name in exportlist[node].payload:
13592 result = self.rpc.call_export_remove(node, instance_name)
13593 msg = result.fail_msg
13595 logging.error("Could not remove export for instance %s"
13596 " on node %s: %s", instance_name, node, msg)
13598 if fqdn_warn and not found:
13599 feedback_fn("Export not found. If trying to remove an export belonging"
13600 " to a deleted instance please use its Fully Qualified"
13604 class LUGroupAdd(LogicalUnit):
13605 """Logical unit for creating node groups.
13608 HPATH = "group-add"
13609 HTYPE = constants.HTYPE_GROUP
13612 def ExpandNames(self):
13613 # We need the new group's UUID here so that we can create and acquire the
13614 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
13615 # that it should not check whether the UUID exists in the configuration.
13616 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
13617 self.needed_locks = {}
13618 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
13620 def CheckPrereq(self):
13621 """Check prerequisites.
13623 This checks that the given group name is not an existing node group
13628 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13629 except errors.OpPrereqError:
13632 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
13633 " node group (UUID: %s)" %
13634 (self.op.group_name, existing_uuid),
13635 errors.ECODE_EXISTS)
13637 if self.op.ndparams:
13638 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13640 if self.op.hv_state:
13641 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
13643 self.new_hv_state = None
13645 if self.op.disk_state:
13646 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
13648 self.new_disk_state = None
13650 if self.op.diskparams:
13651 for templ in constants.DISK_TEMPLATES:
13652 if templ in self.op.diskparams:
13653 utils.ForceDictType(self.op.diskparams[templ],
13654 constants.DISK_DT_TYPES)
13655 self.new_diskparams = self.op.diskparams
13657 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
13658 except errors.OpPrereqError, err:
13659 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
13660 errors.ECODE_INVAL)
13662 self.new_diskparams = {}
13664 if self.op.ipolicy:
13665 cluster = self.cfg.GetClusterInfo()
13666 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
13668 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
13669 except errors.ConfigurationError, err:
13670 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
13671 errors.ECODE_INVAL)
13673 def BuildHooksEnv(self):
13674 """Build hooks env.
13678 "GROUP_NAME": self.op.group_name,
13681 def BuildHooksNodes(self):
13682 """Build hooks nodes.
13685 mn = self.cfg.GetMasterNode()
13686 return ([mn], [mn])
13688 def Exec(self, feedback_fn):
13689 """Add the node group to the cluster.
13692 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
13693 uuid=self.group_uuid,
13694 alloc_policy=self.op.alloc_policy,
13695 ndparams=self.op.ndparams,
13696 diskparams=self.new_diskparams,
13697 ipolicy=self.op.ipolicy,
13698 hv_state_static=self.new_hv_state,
13699 disk_state_static=self.new_disk_state)
13701 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
13702 del self.remove_locks[locking.LEVEL_NODEGROUP]
13705 class LUGroupAssignNodes(NoHooksLU):
13706 """Logical unit for assigning nodes to groups.
13711 def ExpandNames(self):
13712 # These raise errors.OpPrereqError on their own:
13713 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13714 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
13716 # We want to lock all the affected nodes and groups. We have readily
13717 # available the list of nodes, and the *destination* group. To gather the
13718 # list of "source" groups, we need to fetch node information later on.
13719 self.needed_locks = {
13720 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
13721 locking.LEVEL_NODE: self.op.nodes,
13724 def DeclareLocks(self, level):
13725 if level == locking.LEVEL_NODEGROUP:
13726 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
13728 # Try to get all affected nodes' groups without having the group or node
13729 # lock yet. Needs verification later in the code flow.
13730 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
13732 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
13734 def CheckPrereq(self):
13735 """Check prerequisites.
13738 assert self.needed_locks[locking.LEVEL_NODEGROUP]
13739 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
13740 frozenset(self.op.nodes))
13742 expected_locks = (set([self.group_uuid]) |
13743 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
13744 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
13745 if actual_locks != expected_locks:
13746 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
13747 " current groups are '%s', used to be '%s'" %
13748 (utils.CommaJoin(expected_locks),
13749 utils.CommaJoin(actual_locks)))
13751 self.node_data = self.cfg.GetAllNodesInfo()
13752 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13753 instance_data = self.cfg.GetAllInstancesInfo()
13755 if self.group is None:
13756 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13757 (self.op.group_name, self.group_uuid))
13759 (new_splits, previous_splits) = \
13760 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
13761 for node in self.op.nodes],
13762 self.node_data, instance_data)
13765 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
13767 if not self.op.force:
13768 raise errors.OpExecError("The following instances get split by this"
13769 " change and --force was not given: %s" %
13772 self.LogWarning("This operation will split the following instances: %s",
13775 if previous_splits:
13776 self.LogWarning("In addition, these already-split instances continue"
13777 " to be split across groups: %s",
13778 utils.CommaJoin(utils.NiceSort(previous_splits)))
13780 def Exec(self, feedback_fn):
13781 """Assign nodes to a new group.
13784 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
13786 self.cfg.AssignGroupNodes(mods)
13789 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
13790 """Check for split instances after a node assignment.
13792 This method considers a series of node assignments as an atomic operation,
13793 and returns information about split instances after applying the set of
13796 In particular, it returns information about newly split instances, and
13797 instances that were already split, and remain so after the change.
13799 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
13802 @type changes: list of (node_name, new_group_uuid) pairs.
13803 @param changes: list of node assignments to consider.
13804 @param node_data: a dict with data for all nodes
13805 @param instance_data: a dict with all instances to consider
13806 @rtype: a two-tuple
13807 @return: a list of instances that were previously okay and result split as a
13808 consequence of this change, and a list of instances that were previously
13809 split and this change does not fix.
13812 changed_nodes = dict((node, group) for node, group in changes
13813 if node_data[node].group != group)
13815 all_split_instances = set()
13816 previously_split_instances = set()
13818 def InstanceNodes(instance):
13819 return [instance.primary_node] + list(instance.secondary_nodes)
13821 for inst in instance_data.values():
13822 if inst.disk_template not in constants.DTS_INT_MIRROR:
13825 instance_nodes = InstanceNodes(inst)
13827 if len(set(node_data[node].group for node in instance_nodes)) > 1:
13828 previously_split_instances.add(inst.name)
13830 if len(set(changed_nodes.get(node, node_data[node].group)
13831 for node in instance_nodes)) > 1:
13832 all_split_instances.add(inst.name)
13834 return (list(all_split_instances - previously_split_instances),
13835 list(previously_split_instances & all_split_instances))
13838 class _GroupQuery(_QueryBase):
13839 FIELDS = query.GROUP_FIELDS
13841 def ExpandNames(self, lu):
13842 lu.needed_locks = {}
13844 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
13845 self._cluster = lu.cfg.GetClusterInfo()
13846 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
13849 self.wanted = [name_to_uuid[name]
13850 for name in utils.NiceSort(name_to_uuid.keys())]
13852 # Accept names to be either names or UUIDs.
13855 all_uuid = frozenset(self._all_groups.keys())
13857 for name in self.names:
13858 if name in all_uuid:
13859 self.wanted.append(name)
13860 elif name in name_to_uuid:
13861 self.wanted.append(name_to_uuid[name])
13863 missing.append(name)
13866 raise errors.OpPrereqError("Some groups do not exist: %s" %
13867 utils.CommaJoin(missing),
13868 errors.ECODE_NOENT)
13870 def DeclareLocks(self, lu, level):
13873 def _GetQueryData(self, lu):
13874 """Computes the list of node groups and their attributes.
13877 do_nodes = query.GQ_NODE in self.requested_data
13878 do_instances = query.GQ_INST in self.requested_data
13880 group_to_nodes = None
13881 group_to_instances = None
13883 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
13884 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
13885 # latter GetAllInstancesInfo() is not enough, for we have to go through
13886 # instance->node. Hence, we will need to process nodes even if we only need
13887 # instance information.
13888 if do_nodes or do_instances:
13889 all_nodes = lu.cfg.GetAllNodesInfo()
13890 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
13893 for node in all_nodes.values():
13894 if node.group in group_to_nodes:
13895 group_to_nodes[node.group].append(node.name)
13896 node_to_group[node.name] = node.group
13899 all_instances = lu.cfg.GetAllInstancesInfo()
13900 group_to_instances = dict((uuid, []) for uuid in self.wanted)
13902 for instance in all_instances.values():
13903 node = instance.primary_node
13904 if node in node_to_group:
13905 group_to_instances[node_to_group[node]].append(instance.name)
13908 # Do not pass on node information if it was not requested.
13909 group_to_nodes = None
13911 return query.GroupQueryData(self._cluster,
13912 [self._all_groups[uuid]
13913 for uuid in self.wanted],
13914 group_to_nodes, group_to_instances,
13915 query.GQ_DISKPARAMS in self.requested_data)
13918 class LUGroupQuery(NoHooksLU):
13919 """Logical unit for querying node groups.
13924 def CheckArguments(self):
13925 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
13926 self.op.output_fields, False)
13928 def ExpandNames(self):
13929 self.gq.ExpandNames(self)
13931 def DeclareLocks(self, level):
13932 self.gq.DeclareLocks(self, level)
13934 def Exec(self, feedback_fn):
13935 return self.gq.OldStyleQuery(self)
13938 class LUGroupSetParams(LogicalUnit):
13939 """Modifies the parameters of a node group.
13942 HPATH = "group-modify"
13943 HTYPE = constants.HTYPE_GROUP
13946 def CheckArguments(self):
13949 self.op.diskparams,
13950 self.op.alloc_policy,
13952 self.op.disk_state,
13956 if all_changes.count(None) == len(all_changes):
13957 raise errors.OpPrereqError("Please pass at least one modification",
13958 errors.ECODE_INVAL)
13960 def ExpandNames(self):
13961 # This raises errors.OpPrereqError on its own:
13962 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13964 self.needed_locks = {
13965 locking.LEVEL_INSTANCE: [],
13966 locking.LEVEL_NODEGROUP: [self.group_uuid],
13969 self.share_locks[locking.LEVEL_INSTANCE] = 1
13971 def DeclareLocks(self, level):
13972 if level == locking.LEVEL_INSTANCE:
13973 assert not self.needed_locks[locking.LEVEL_INSTANCE]
13975 # Lock instances optimistically, needs verification once group lock has
13977 self.needed_locks[locking.LEVEL_INSTANCE] = \
13978 self.cfg.GetNodeGroupInstances(self.group_uuid)
13981 def _UpdateAndVerifyDiskParams(old, new):
13982 """Updates and verifies disk parameters.
13985 new_params = _GetUpdatedParams(old, new)
13986 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
13989 def CheckPrereq(self):
13990 """Check prerequisites.
13993 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13995 # Check if locked instances are still correct
13996 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
13998 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13999 cluster = self.cfg.GetClusterInfo()
14001 if self.group is None:
14002 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14003 (self.op.group_name, self.group_uuid))
14005 if self.op.ndparams:
14006 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
14007 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
14008 self.new_ndparams = new_ndparams
14010 if self.op.diskparams:
14011 diskparams = self.group.diskparams
14012 uavdp = self._UpdateAndVerifyDiskParams
14013 # For each disktemplate subdict update and verify the values
14014 new_diskparams = dict((dt,
14015 uavdp(diskparams.get(dt, {}),
14016 self.op.diskparams[dt]))
14017 for dt in constants.DISK_TEMPLATES
14018 if dt in self.op.diskparams)
14019 # As we've all subdicts of diskparams ready, lets merge the actual
14020 # dict with all updated subdicts
14021 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
14023 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
14024 except errors.OpPrereqError, err:
14025 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
14026 errors.ECODE_INVAL)
14028 if self.op.hv_state:
14029 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
14030 self.group.hv_state_static)
14032 if self.op.disk_state:
14033 self.new_disk_state = \
14034 _MergeAndVerifyDiskState(self.op.disk_state,
14035 self.group.disk_state_static)
14037 if self.op.ipolicy:
14038 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
14042 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
14043 inst_filter = lambda inst: inst.name in owned_instances
14044 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
14046 _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
14048 new_ipolicy, instances)
14051 self.LogWarning("After the ipolicy change the following instances"
14052 " violate them: %s",
14053 utils.CommaJoin(violations))
14055 def BuildHooksEnv(self):
14056 """Build hooks env.
14060 "GROUP_NAME": self.op.group_name,
14061 "NEW_ALLOC_POLICY": self.op.alloc_policy,
14064 def BuildHooksNodes(self):
14065 """Build hooks nodes.
14068 mn = self.cfg.GetMasterNode()
14069 return ([mn], [mn])
14071 def Exec(self, feedback_fn):
14072 """Modifies the node group.
14077 if self.op.ndparams:
14078 self.group.ndparams = self.new_ndparams
14079 result.append(("ndparams", str(self.group.ndparams)))
14081 if self.op.diskparams:
14082 self.group.diskparams = self.new_diskparams
14083 result.append(("diskparams", str(self.group.diskparams)))
14085 if self.op.alloc_policy:
14086 self.group.alloc_policy = self.op.alloc_policy
14088 if self.op.hv_state:
14089 self.group.hv_state_static = self.new_hv_state
14091 if self.op.disk_state:
14092 self.group.disk_state_static = self.new_disk_state
14094 if self.op.ipolicy:
14095 self.group.ipolicy = self.new_ipolicy
14097 self.cfg.Update(self.group, feedback_fn)
14101 class LUGroupRemove(LogicalUnit):
14102 HPATH = "group-remove"
14103 HTYPE = constants.HTYPE_GROUP
14106 def ExpandNames(self):
14107 # This will raises errors.OpPrereqError on its own:
14108 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14109 self.needed_locks = {
14110 locking.LEVEL_NODEGROUP: [self.group_uuid],
14113 def CheckPrereq(self):
14114 """Check prerequisites.
14116 This checks that the given group name exists as a node group, that is
14117 empty (i.e., contains no nodes), and that is not the last group of the
14121 # Verify that the group is empty.
14122 group_nodes = [node.name
14123 for node in self.cfg.GetAllNodesInfo().values()
14124 if node.group == self.group_uuid]
14127 raise errors.OpPrereqError("Group '%s' not empty, has the following"
14129 (self.op.group_name,
14130 utils.CommaJoin(utils.NiceSort(group_nodes))),
14131 errors.ECODE_STATE)
14133 # Verify the cluster would not be left group-less.
14134 if len(self.cfg.GetNodeGroupList()) == 1:
14135 raise errors.OpPrereqError("Group '%s' is the only group,"
14136 " cannot be removed" %
14137 self.op.group_name,
14138 errors.ECODE_STATE)
14140 def BuildHooksEnv(self):
14141 """Build hooks env.
14145 "GROUP_NAME": self.op.group_name,
14148 def BuildHooksNodes(self):
14149 """Build hooks nodes.
14152 mn = self.cfg.GetMasterNode()
14153 return ([mn], [mn])
14155 def Exec(self, feedback_fn):
14156 """Remove the node group.
14160 self.cfg.RemoveNodeGroup(self.group_uuid)
14161 except errors.ConfigurationError:
14162 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
14163 (self.op.group_name, self.group_uuid))
14165 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
14168 class LUGroupRename(LogicalUnit):
14169 HPATH = "group-rename"
14170 HTYPE = constants.HTYPE_GROUP
14173 def ExpandNames(self):
14174 # This raises errors.OpPrereqError on its own:
14175 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14177 self.needed_locks = {
14178 locking.LEVEL_NODEGROUP: [self.group_uuid],
14181 def CheckPrereq(self):
14182 """Check prerequisites.
14184 Ensures requested new name is not yet used.
14188 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
14189 except errors.OpPrereqError:
14192 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
14193 " node group (UUID: %s)" %
14194 (self.op.new_name, new_name_uuid),
14195 errors.ECODE_EXISTS)
14197 def BuildHooksEnv(self):
14198 """Build hooks env.
14202 "OLD_NAME": self.op.group_name,
14203 "NEW_NAME": self.op.new_name,
14206 def BuildHooksNodes(self):
14207 """Build hooks nodes.
14210 mn = self.cfg.GetMasterNode()
14212 all_nodes = self.cfg.GetAllNodesInfo()
14213 all_nodes.pop(mn, None)
14216 run_nodes.extend(node.name for node in all_nodes.values()
14217 if node.group == self.group_uuid)
14219 return (run_nodes, run_nodes)
14221 def Exec(self, feedback_fn):
14222 """Rename the node group.
14225 group = self.cfg.GetNodeGroup(self.group_uuid)
14228 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14229 (self.op.group_name, self.group_uuid))
14231 group.name = self.op.new_name
14232 self.cfg.Update(group, feedback_fn)
14234 return self.op.new_name
14237 class LUGroupEvacuate(LogicalUnit):
14238 HPATH = "group-evacuate"
14239 HTYPE = constants.HTYPE_GROUP
14242 def ExpandNames(self):
14243 # This raises errors.OpPrereqError on its own:
14244 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14246 if self.op.target_groups:
14247 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
14248 self.op.target_groups)
14250 self.req_target_uuids = []
14252 if self.group_uuid in self.req_target_uuids:
14253 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
14254 " as a target group (targets are %s)" %
14256 utils.CommaJoin(self.req_target_uuids)),
14257 errors.ECODE_INVAL)
14259 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
14261 self.share_locks = _ShareAll()
14262 self.needed_locks = {
14263 locking.LEVEL_INSTANCE: [],
14264 locking.LEVEL_NODEGROUP: [],
14265 locking.LEVEL_NODE: [],
14268 def DeclareLocks(self, level):
14269 if level == locking.LEVEL_INSTANCE:
14270 assert not self.needed_locks[locking.LEVEL_INSTANCE]
14272 # Lock instances optimistically, needs verification once node and group
14273 # locks have been acquired
14274 self.needed_locks[locking.LEVEL_INSTANCE] = \
14275 self.cfg.GetNodeGroupInstances(self.group_uuid)
14277 elif level == locking.LEVEL_NODEGROUP:
14278 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
14280 if self.req_target_uuids:
14281 lock_groups = set([self.group_uuid] + self.req_target_uuids)
14283 # Lock all groups used by instances optimistically; this requires going
14284 # via the node before it's locked, requiring verification later on
14285 lock_groups.update(group_uuid
14286 for instance_name in
14287 self.owned_locks(locking.LEVEL_INSTANCE)
14289 self.cfg.GetInstanceNodeGroups(instance_name))
14291 # No target groups, need to lock all of them
14292 lock_groups = locking.ALL_SET
14294 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
14296 elif level == locking.LEVEL_NODE:
14297 # This will only lock the nodes in the group to be evacuated which
14298 # contain actual instances
14299 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
14300 self._LockInstancesNodes()
14302 # Lock all nodes in group to be evacuated and target groups
14303 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14304 assert self.group_uuid in owned_groups
14305 member_nodes = [node_name
14306 for group in owned_groups
14307 for node_name in self.cfg.GetNodeGroup(group).members]
14308 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
14310 def CheckPrereq(self):
14311 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
14312 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14313 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
14315 assert owned_groups.issuperset(self.req_target_uuids)
14316 assert self.group_uuid in owned_groups
14318 # Check if locked instances are still correct
14319 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
14321 # Get instance information
14322 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
14324 # Check if node groups for locked instances are still correct
14325 _CheckInstancesNodeGroups(self.cfg, self.instances,
14326 owned_groups, owned_nodes, self.group_uuid)
14328 if self.req_target_uuids:
14329 # User requested specific target groups
14330 self.target_uuids = self.req_target_uuids
14332 # All groups except the one to be evacuated are potential targets
14333 self.target_uuids = [group_uuid for group_uuid in owned_groups
14334 if group_uuid != self.group_uuid]
14336 if not self.target_uuids:
14337 raise errors.OpPrereqError("There are no possible target groups",
14338 errors.ECODE_INVAL)
14340 def BuildHooksEnv(self):
14341 """Build hooks env.
14345 "GROUP_NAME": self.op.group_name,
14346 "TARGET_GROUPS": " ".join(self.target_uuids),
14349 def BuildHooksNodes(self):
14350 """Build hooks nodes.
14353 mn = self.cfg.GetMasterNode()
14355 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
14357 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
14359 return (run_nodes, run_nodes)
14361 def Exec(self, feedback_fn):
14362 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
14364 assert self.group_uuid not in self.target_uuids
14366 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
14367 instances=instances, target_groups=self.target_uuids)
14369 ial.Run(self.op.iallocator)
14371 if not ial.success:
14372 raise errors.OpPrereqError("Can't compute group evacuation using"
14373 " iallocator '%s': %s" %
14374 (self.op.iallocator, ial.info),
14375 errors.ECODE_NORES)
14377 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
14379 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
14380 len(jobs), self.op.group_name)
14382 return ResultWithJobs(jobs)
14385 class TagsLU(NoHooksLU): # pylint: disable=W0223
14386 """Generic tags LU.
14388 This is an abstract class which is the parent of all the other tags LUs.
14391 def ExpandNames(self):
14392 self.group_uuid = None
14393 self.needed_locks = {}
14395 if self.op.kind == constants.TAG_NODE:
14396 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
14397 lock_level = locking.LEVEL_NODE
14398 lock_name = self.op.name
14399 elif self.op.kind == constants.TAG_INSTANCE:
14400 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
14401 lock_level = locking.LEVEL_INSTANCE
14402 lock_name = self.op.name
14403 elif self.op.kind == constants.TAG_NODEGROUP:
14404 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
14405 lock_level = locking.LEVEL_NODEGROUP
14406 lock_name = self.group_uuid
14411 if lock_level and getattr(self.op, "use_locking", True):
14412 self.needed_locks[lock_level] = lock_name
14414 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
14415 # not possible to acquire the BGL based on opcode parameters)
14417 def CheckPrereq(self):
14418 """Check prerequisites.
14421 if self.op.kind == constants.TAG_CLUSTER:
14422 self.target = self.cfg.GetClusterInfo()
14423 elif self.op.kind == constants.TAG_NODE:
14424 self.target = self.cfg.GetNodeInfo(self.op.name)
14425 elif self.op.kind == constants.TAG_INSTANCE:
14426 self.target = self.cfg.GetInstanceInfo(self.op.name)
14427 elif self.op.kind == constants.TAG_NODEGROUP:
14428 self.target = self.cfg.GetNodeGroup(self.group_uuid)
14430 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
14431 str(self.op.kind), errors.ECODE_INVAL)
14434 class LUTagsGet(TagsLU):
14435 """Returns the tags of a given object.
14440 def ExpandNames(self):
14441 TagsLU.ExpandNames(self)
14443 # Share locks as this is only a read operation
14444 self.share_locks = _ShareAll()
14446 def Exec(self, feedback_fn):
14447 """Returns the tag list.
14450 return list(self.target.GetTags())
14453 class LUTagsSearch(NoHooksLU):
14454 """Searches the tags for a given pattern.
14459 def ExpandNames(self):
14460 self.needed_locks = {}
14462 def CheckPrereq(self):
14463 """Check prerequisites.
14465 This checks the pattern passed for validity by compiling it.
14469 self.re = re.compile(self.op.pattern)
14470 except re.error, err:
14471 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
14472 (self.op.pattern, err), errors.ECODE_INVAL)
14474 def Exec(self, feedback_fn):
14475 """Returns the tag list.
14479 tgts = [("/cluster", cfg.GetClusterInfo())]
14480 ilist = cfg.GetAllInstancesInfo().values()
14481 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
14482 nlist = cfg.GetAllNodesInfo().values()
14483 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
14484 tgts.extend(("/nodegroup/%s" % n.name, n)
14485 for n in cfg.GetAllNodeGroupsInfo().values())
14487 for path, target in tgts:
14488 for tag in target.GetTags():
14489 if self.re.search(tag):
14490 results.append((path, tag))
14494 class LUTagsSet(TagsLU):
14495 """Sets a tag on a given object.
14500 def CheckPrereq(self):
14501 """Check prerequisites.
14503 This checks the type and length of the tag name and value.
14506 TagsLU.CheckPrereq(self)
14507 for tag in self.op.tags:
14508 objects.TaggableObject.ValidateTag(tag)
14510 def Exec(self, feedback_fn):
14515 for tag in self.op.tags:
14516 self.target.AddTag(tag)
14517 except errors.TagError, err:
14518 raise errors.OpExecError("Error while setting tag: %s" % str(err))
14519 self.cfg.Update(self.target, feedback_fn)
14522 class LUTagsDel(TagsLU):
14523 """Delete a list of tags from a given object.
14528 def CheckPrereq(self):
14529 """Check prerequisites.
14531 This checks that we have the given tag.
14534 TagsLU.CheckPrereq(self)
14535 for tag in self.op.tags:
14536 objects.TaggableObject.ValidateTag(tag)
14537 del_tags = frozenset(self.op.tags)
14538 cur_tags = self.target.GetTags()
14540 diff_tags = del_tags - cur_tags
14542 diff_names = ("'%s'" % i for i in sorted(diff_tags))
14543 raise errors.OpPrereqError("Tag(s) %s not found" %
14544 (utils.CommaJoin(diff_names), ),
14545 errors.ECODE_NOENT)
14547 def Exec(self, feedback_fn):
14548 """Remove the tag from the object.
14551 for tag in self.op.tags:
14552 self.target.RemoveTag(tag)
14553 self.cfg.Update(self.target, feedback_fn)
14556 class LUTestDelay(NoHooksLU):
14557 """Sleep for a specified amount of time.
14559 This LU sleeps on the master and/or nodes for a specified amount of
14565 def ExpandNames(self):
14566 """Expand names and set required locks.
14568 This expands the node list, if any.
14571 self.needed_locks = {}
14572 if self.op.on_nodes:
14573 # _GetWantedNodes can be used here, but is not always appropriate to use
14574 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
14575 # more information.
14576 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
14577 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
14579 def _TestDelay(self):
14580 """Do the actual sleep.
14583 if self.op.on_master:
14584 if not utils.TestDelay(self.op.duration):
14585 raise errors.OpExecError("Error during master delay test")
14586 if self.op.on_nodes:
14587 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
14588 for node, node_result in result.items():
14589 node_result.Raise("Failure during rpc call to node %s" % node)
14591 def Exec(self, feedback_fn):
14592 """Execute the test delay opcode, with the wanted repetitions.
14595 if self.op.repeat == 0:
14598 top_value = self.op.repeat - 1
14599 for i in range(self.op.repeat):
14600 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
14604 class LUTestJqueue(NoHooksLU):
14605 """Utility LU to test some aspects of the job queue.
14610 # Must be lower than default timeout for WaitForJobChange to see whether it
14611 # notices changed jobs
14612 _CLIENT_CONNECT_TIMEOUT = 20.0
14613 _CLIENT_CONFIRM_TIMEOUT = 60.0
14616 def _NotifyUsingSocket(cls, cb, errcls):
14617 """Opens a Unix socket and waits for another program to connect.
14620 @param cb: Callback to send socket name to client
14621 @type errcls: class
14622 @param errcls: Exception class to use for errors
14625 # Using a temporary directory as there's no easy way to create temporary
14626 # sockets without writing a custom loop around tempfile.mktemp and
14628 tmpdir = tempfile.mkdtemp()
14630 tmpsock = utils.PathJoin(tmpdir, "sock")
14632 logging.debug("Creating temporary socket at %s", tmpsock)
14633 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
14638 # Send details to client
14641 # Wait for client to connect before continuing
14642 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
14644 (conn, _) = sock.accept()
14645 except socket.error, err:
14646 raise errcls("Client didn't connect in time (%s)" % err)
14650 # Remove as soon as client is connected
14651 shutil.rmtree(tmpdir)
14653 # Wait for client to close
14656 # pylint: disable=E1101
14657 # Instance of '_socketobject' has no ... member
14658 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
14660 except socket.error, err:
14661 raise errcls("Client failed to confirm notification (%s)" % err)
14665 def _SendNotification(self, test, arg, sockname):
14666 """Sends a notification to the client.
14669 @param test: Test name
14670 @param arg: Test argument (depends on test)
14671 @type sockname: string
14672 @param sockname: Socket path
14675 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
14677 def _Notify(self, prereq, test, arg):
14678 """Notifies the client of a test.
14681 @param prereq: Whether this is a prereq-phase test
14683 @param test: Test name
14684 @param arg: Test argument (depends on test)
14688 errcls = errors.OpPrereqError
14690 errcls = errors.OpExecError
14692 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
14696 def CheckArguments(self):
14697 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
14698 self.expandnames_calls = 0
14700 def ExpandNames(self):
14701 checkargs_calls = getattr(self, "checkargs_calls", 0)
14702 if checkargs_calls < 1:
14703 raise errors.ProgrammerError("CheckArguments was not called")
14705 self.expandnames_calls += 1
14707 if self.op.notify_waitlock:
14708 self._Notify(True, constants.JQT_EXPANDNAMES, None)
14710 self.LogInfo("Expanding names")
14712 # Get lock on master node (just to get a lock, not for a particular reason)
14713 self.needed_locks = {
14714 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
14717 def Exec(self, feedback_fn):
14718 if self.expandnames_calls < 1:
14719 raise errors.ProgrammerError("ExpandNames was not called")
14721 if self.op.notify_exec:
14722 self._Notify(False, constants.JQT_EXEC, None)
14724 self.LogInfo("Executing")
14726 if self.op.log_messages:
14727 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
14728 for idx, msg in enumerate(self.op.log_messages):
14729 self.LogInfo("Sending log message %s", idx + 1)
14730 feedback_fn(constants.JQT_MSGPREFIX + msg)
14731 # Report how many test messages have been sent
14732 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
14735 raise errors.OpExecError("Opcode failure was requested")
14740 class IAllocator(object):
14741 """IAllocator framework.
14743 An IAllocator instance has three sets of attributes:
14744 - cfg that is needed to query the cluster
14745 - input data (all members of the _KEYS class attribute are required)
14746 - four buffer attributes (in|out_data|text), that represent the
14747 input (to the external script) in text and data structure format,
14748 and the output from it, again in two formats
14749 - the result variables from the script (success, info, nodes) for
14753 # pylint: disable=R0902
14754 # lots of instance attributes
14756 def __init__(self, cfg, rpc_runner, mode, **kwargs):
14758 self.rpc = rpc_runner
14759 # init buffer variables
14760 self.in_text = self.out_text = self.in_data = self.out_data = None
14761 # init all input fields so that pylint is happy
14763 self.memory = self.disks = self.disk_template = self.spindle_use = None
14764 self.os = self.tags = self.nics = self.vcpus = None
14765 self.hypervisor = None
14766 self.relocate_from = None
14768 self.instances = None
14769 self.evac_mode = None
14770 self.target_groups = []
14772 self.required_nodes = None
14773 # init result fields
14774 self.success = self.info = self.result = None
14777 (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
14779 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
14780 " IAllocator" % self.mode)
14782 keyset = [n for (n, _) in keydata]
14785 if key not in keyset:
14786 raise errors.ProgrammerError("Invalid input parameter '%s' to"
14787 " IAllocator" % key)
14788 setattr(self, key, kwargs[key])
14791 if key not in kwargs:
14792 raise errors.ProgrammerError("Missing input parameter '%s' to"
14793 " IAllocator" % key)
14794 self._BuildInputData(compat.partial(fn, self), keydata)
14796 def _ComputeClusterData(self):
14797 """Compute the generic allocator input data.
14799 This is the data that is independent of the actual operation.
14803 cluster_info = cfg.GetClusterInfo()
14806 "version": constants.IALLOCATOR_VERSION,
14807 "cluster_name": cfg.GetClusterName(),
14808 "cluster_tags": list(cluster_info.GetTags()),
14809 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
14810 "ipolicy": cluster_info.ipolicy,
14812 ninfo = cfg.GetAllNodesInfo()
14813 iinfo = cfg.GetAllInstancesInfo().values()
14814 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
14817 node_list = [n.name for n in ninfo.values() if n.vm_capable]
14819 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
14820 hypervisor_name = self.hypervisor
14821 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
14822 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
14824 hypervisor_name = cluster_info.primary_hypervisor
14826 node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
14829 self.rpc.call_all_instances_info(node_list,
14830 cluster_info.enabled_hypervisors)
14832 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
14834 config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
14835 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
14836 i_list, config_ndata)
14837 assert len(data["nodes"]) == len(ninfo), \
14838 "Incomplete node data computed"
14840 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
14842 self.in_data = data
14845 def _ComputeNodeGroupData(cfg):
14846 """Compute node groups data.
14849 cluster = cfg.GetClusterInfo()
14850 ng = dict((guuid, {
14851 "name": gdata.name,
14852 "alloc_policy": gdata.alloc_policy,
14853 "ipolicy": _CalculateGroupIPolicy(cluster, gdata),
14854 "tags": list(gdata.tags),
14856 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
14861 def _ComputeBasicNodeData(cfg, node_cfg):
14862 """Compute global node data.
14865 @returns: a dict of name: (node dict, node config)
14868 # fill in static (config-based) values
14869 node_results = dict((ninfo.name, {
14870 "tags": list(ninfo.GetTags()),
14871 "primary_ip": ninfo.primary_ip,
14872 "secondary_ip": ninfo.secondary_ip,
14873 "offline": ninfo.offline,
14874 "drained": ninfo.drained,
14875 "master_candidate": ninfo.master_candidate,
14876 "group": ninfo.group,
14877 "master_capable": ninfo.master_capable,
14878 "vm_capable": ninfo.vm_capable,
14879 "ndparams": cfg.GetNdParams(ninfo),
14881 for ninfo in node_cfg.values())
14883 return node_results
14886 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
14888 """Compute global node data.
14890 @param node_results: the basic node structures as filled from the config
14893 #TODO(dynmem): compute the right data on MAX and MIN memory
14894 # make a copy of the current dict
14895 node_results = dict(node_results)
14896 for nname, nresult in node_data.items():
14897 assert nname in node_results, "Missing basic data for node %s" % nname
14898 ninfo = node_cfg[nname]
14900 if not (ninfo.offline or ninfo.drained):
14901 nresult.Raise("Can't get data for node %s" % nname)
14902 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
14904 remote_info = _MakeLegacyNodeInfo(nresult.payload)
14906 for attr in ["memory_total", "memory_free", "memory_dom0",
14907 "vg_size", "vg_free", "cpu_total"]:
14908 if attr not in remote_info:
14909 raise errors.OpExecError("Node '%s' didn't return attribute"
14910 " '%s'" % (nname, attr))
14911 if not isinstance(remote_info[attr], int):
14912 raise errors.OpExecError("Node '%s' returned invalid value"
14914 (nname, attr, remote_info[attr]))
14915 # compute memory used by primary instances
14916 i_p_mem = i_p_up_mem = 0
14917 for iinfo, beinfo in i_list:
14918 if iinfo.primary_node == nname:
14919 i_p_mem += beinfo[constants.BE_MAXMEM]
14920 if iinfo.name not in node_iinfo[nname].payload:
14923 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
14924 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
14925 remote_info["memory_free"] -= max(0, i_mem_diff)
14927 if iinfo.admin_state == constants.ADMINST_UP:
14928 i_p_up_mem += beinfo[constants.BE_MAXMEM]
14930 # compute memory used by instances
14932 "total_memory": remote_info["memory_total"],
14933 "reserved_memory": remote_info["memory_dom0"],
14934 "free_memory": remote_info["memory_free"],
14935 "total_disk": remote_info["vg_size"],
14936 "free_disk": remote_info["vg_free"],
14937 "total_cpus": remote_info["cpu_total"],
14938 "i_pri_memory": i_p_mem,
14939 "i_pri_up_memory": i_p_up_mem,
14941 pnr_dyn.update(node_results[nname])
14942 node_results[nname] = pnr_dyn
14944 return node_results
14947 def _ComputeInstanceData(cluster_info, i_list):
14948 """Compute global instance data.
14952 for iinfo, beinfo in i_list:
14954 for nic in iinfo.nics:
14955 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
14959 "mode": filled_params[constants.NIC_MODE],
14960 "link": filled_params[constants.NIC_LINK],
14962 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
14963 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
14964 nic_data.append(nic_dict)
14966 "tags": list(iinfo.GetTags()),
14967 "admin_state": iinfo.admin_state,
14968 "vcpus": beinfo[constants.BE_VCPUS],
14969 "memory": beinfo[constants.BE_MAXMEM],
14970 "spindle_use": beinfo[constants.BE_SPINDLE_USE],
14972 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
14974 "disks": [{constants.IDISK_SIZE: dsk.size,
14975 constants.IDISK_MODE: dsk.mode}
14976 for dsk in iinfo.disks],
14977 "disk_template": iinfo.disk_template,
14978 "hypervisor": iinfo.hypervisor,
14980 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
14982 instance_data[iinfo.name] = pir
14984 return instance_data
14986 def _AddNewInstance(self):
14987 """Add new instance data to allocator structure.
14989 This in combination with _AllocatorGetClusterData will create the
14990 correct structure needed as input for the allocator.
14992 The checks for the completeness of the opcode must have already been
14996 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
14998 if self.disk_template in constants.DTS_INT_MIRROR:
14999 self.required_nodes = 2
15001 self.required_nodes = 1
15005 "disk_template": self.disk_template,
15008 "vcpus": self.vcpus,
15009 "memory": self.memory,
15010 "spindle_use": self.spindle_use,
15011 "disks": self.disks,
15012 "disk_space_total": disk_space,
15014 "required_nodes": self.required_nodes,
15015 "hypervisor": self.hypervisor,
15020 def _AddRelocateInstance(self):
15021 """Add relocate instance data to allocator structure.
15023 This in combination with _IAllocatorGetClusterData will create the
15024 correct structure needed as input for the allocator.
15026 The checks for the completeness of the opcode must have already been
15030 instance = self.cfg.GetInstanceInfo(self.name)
15031 if instance is None:
15032 raise errors.ProgrammerError("Unknown instance '%s' passed to"
15033 " IAllocator" % self.name)
15035 if instance.disk_template not in constants.DTS_MIRRORED:
15036 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
15037 errors.ECODE_INVAL)
15039 if instance.disk_template in constants.DTS_INT_MIRROR and \
15040 len(instance.secondary_nodes) != 1:
15041 raise errors.OpPrereqError("Instance has not exactly one secondary node",
15042 errors.ECODE_STATE)
15044 self.required_nodes = 1
15045 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
15046 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
15050 "disk_space_total": disk_space,
15051 "required_nodes": self.required_nodes,
15052 "relocate_from": self.relocate_from,
15056 def _AddNodeEvacuate(self):
15057 """Get data for node-evacuate requests.
15061 "instances": self.instances,
15062 "evac_mode": self.evac_mode,
15065 def _AddChangeGroup(self):
15066 """Get data for node-evacuate requests.
15070 "instances": self.instances,
15071 "target_groups": self.target_groups,
15074 def _BuildInputData(self, fn, keydata):
15075 """Build input data structures.
15078 self._ComputeClusterData()
15081 request["type"] = self.mode
15082 for keyname, keytype in keydata:
15083 if keyname not in request:
15084 raise errors.ProgrammerError("Request parameter %s is missing" %
15086 val = request[keyname]
15087 if not keytype(val):
15088 raise errors.ProgrammerError("Request parameter %s doesn't pass"
15089 " validation, value %s, expected"
15090 " type %s" % (keyname, val, keytype))
15091 self.in_data["request"] = request
15093 self.in_text = serializer.Dump(self.in_data)
15095 _STRING_LIST = ht.TListOf(ht.TString)
15096 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
15097 # pylint: disable=E1101
15098 # Class '...' has no 'OP_ID' member
15099 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
15100 opcodes.OpInstanceMigrate.OP_ID,
15101 opcodes.OpInstanceReplaceDisks.OP_ID])
15105 ht.TListOf(ht.TAnd(ht.TIsLength(3),
15106 ht.TItems([ht.TNonEmptyString,
15107 ht.TNonEmptyString,
15108 ht.TListOf(ht.TNonEmptyString),
15111 ht.TListOf(ht.TAnd(ht.TIsLength(2),
15112 ht.TItems([ht.TNonEmptyString,
15115 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
15116 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
15119 constants.IALLOCATOR_MODE_ALLOC:
15122 ("name", ht.TString),
15123 ("memory", ht.TInt),
15124 ("spindle_use", ht.TInt),
15125 ("disks", ht.TListOf(ht.TDict)),
15126 ("disk_template", ht.TString),
15127 ("os", ht.TString),
15128 ("tags", _STRING_LIST),
15129 ("nics", ht.TListOf(ht.TDict)),
15130 ("vcpus", ht.TInt),
15131 ("hypervisor", ht.TString),
15133 constants.IALLOCATOR_MODE_RELOC:
15134 (_AddRelocateInstance,
15135 [("name", ht.TString), ("relocate_from", _STRING_LIST)],
15137 constants.IALLOCATOR_MODE_NODE_EVAC:
15138 (_AddNodeEvacuate, [
15139 ("instances", _STRING_LIST),
15140 ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
15142 constants.IALLOCATOR_MODE_CHG_GROUP:
15143 (_AddChangeGroup, [
15144 ("instances", _STRING_LIST),
15145 ("target_groups", _STRING_LIST),
15149 def Run(self, name, validate=True, call_fn=None):
15150 """Run an instance allocator and return the results.
15153 if call_fn is None:
15154 call_fn = self.rpc.call_iallocator_runner
15156 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
15157 result.Raise("Failure while running the iallocator script")
15159 self.out_text = result.payload
15161 self._ValidateResult()
15163 def _ValidateResult(self):
15164 """Process the allocator results.
15166 This will process and if successful save the result in
15167 self.out_data and the other parameters.
15171 rdict = serializer.Load(self.out_text)
15172 except Exception, err:
15173 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
15175 if not isinstance(rdict, dict):
15176 raise errors.OpExecError("Can't parse iallocator results: not a dict")
15178 # TODO: remove backwards compatiblity in later versions
15179 if "nodes" in rdict and "result" not in rdict:
15180 rdict["result"] = rdict["nodes"]
15183 for key in "success", "info", "result":
15184 if key not in rdict:
15185 raise errors.OpExecError("Can't parse iallocator results:"
15186 " missing key '%s'" % key)
15187 setattr(self, key, rdict[key])
15189 if not self._result_check(self.result):
15190 raise errors.OpExecError("Iallocator returned invalid result,"
15191 " expected %s, got %s" %
15192 (self._result_check, self.result),
15193 errors.ECODE_INVAL)
15195 if self.mode == constants.IALLOCATOR_MODE_RELOC:
15196 assert self.relocate_from is not None
15197 assert self.required_nodes == 1
15199 node2group = dict((name, ndata["group"])
15200 for (name, ndata) in self.in_data["nodes"].items())
15202 fn = compat.partial(self._NodesToGroups, node2group,
15203 self.in_data["nodegroups"])
15205 instance = self.cfg.GetInstanceInfo(self.name)
15206 request_groups = fn(self.relocate_from + [instance.primary_node])
15207 result_groups = fn(rdict["result"] + [instance.primary_node])
15209 if self.success and not set(result_groups).issubset(request_groups):
15210 raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
15211 " differ from original groups (%s)" %
15212 (utils.CommaJoin(result_groups),
15213 utils.CommaJoin(request_groups)))
15215 elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15216 assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
15218 self.out_data = rdict
15221 def _NodesToGroups(node2group, groups, nodes):
15222 """Returns a list of unique group names for a list of nodes.
15224 @type node2group: dict
15225 @param node2group: Map from node name to group UUID
15227 @param groups: Group information
15229 @param nodes: Node names
15236 group_uuid = node2group[node]
15238 # Ignore unknown node
15242 group = groups[group_uuid]
15244 # Can't find group, let's use UUID
15245 group_name = group_uuid
15247 group_name = group["name"]
15249 result.add(group_name)
15251 return sorted(result)
15254 class LUTestAllocator(NoHooksLU):
15255 """Run allocator tests.
15257 This LU runs the allocator tests
15260 def CheckPrereq(self):
15261 """Check prerequisites.
15263 This checks the opcode parameters depending on the director and mode test.
15266 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15267 for attr in ["memory", "disks", "disk_template",
15268 "os", "tags", "nics", "vcpus"]:
15269 if not hasattr(self.op, attr):
15270 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
15271 attr, errors.ECODE_INVAL)
15272 iname = self.cfg.ExpandInstanceName(self.op.name)
15273 if iname is not None:
15274 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
15275 iname, errors.ECODE_EXISTS)
15276 if not isinstance(self.op.nics, list):
15277 raise errors.OpPrereqError("Invalid parameter 'nics'",
15278 errors.ECODE_INVAL)
15279 if not isinstance(self.op.disks, list):
15280 raise errors.OpPrereqError("Invalid parameter 'disks'",
15281 errors.ECODE_INVAL)
15282 for row in self.op.disks:
15283 if (not isinstance(row, dict) or
15284 constants.IDISK_SIZE not in row or
15285 not isinstance(row[constants.IDISK_SIZE], int) or
15286 constants.IDISK_MODE not in row or
15287 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
15288 raise errors.OpPrereqError("Invalid contents of the 'disks'"
15289 " parameter", errors.ECODE_INVAL)
15290 if self.op.hypervisor is None:
15291 self.op.hypervisor = self.cfg.GetHypervisorType()
15292 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15293 fname = _ExpandInstanceName(self.cfg, self.op.name)
15294 self.op.name = fname
15295 self.relocate_from = \
15296 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
15297 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
15298 constants.IALLOCATOR_MODE_NODE_EVAC):
15299 if not self.op.instances:
15300 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
15301 self.op.instances = _GetWantedInstances(self, self.op.instances)
15303 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
15304 self.op.mode, errors.ECODE_INVAL)
15306 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
15307 if self.op.allocator is None:
15308 raise errors.OpPrereqError("Missing allocator name",
15309 errors.ECODE_INVAL)
15310 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
15311 raise errors.OpPrereqError("Wrong allocator test '%s'" %
15312 self.op.direction, errors.ECODE_INVAL)
15314 def Exec(self, feedback_fn):
15315 """Run the allocator test.
15318 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15319 ial = IAllocator(self.cfg, self.rpc,
15322 memory=self.op.memory,
15323 disks=self.op.disks,
15324 disk_template=self.op.disk_template,
15328 vcpus=self.op.vcpus,
15329 hypervisor=self.op.hypervisor,
15330 spindle_use=self.op.spindle_use,
15332 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15333 ial = IAllocator(self.cfg, self.rpc,
15336 relocate_from=list(self.relocate_from),
15338 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
15339 ial = IAllocator(self.cfg, self.rpc,
15341 instances=self.op.instances,
15342 target_groups=self.op.target_groups)
15343 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15344 ial = IAllocator(self.cfg, self.rpc,
15346 instances=self.op.instances,
15347 evac_mode=self.op.evac_mode)
15349 raise errors.ProgrammerError("Uncatched mode %s in"
15350 " LUTestAllocator.Exec", self.op.mode)
15352 if self.op.direction == constants.IALLOCATOR_DIR_IN:
15353 result = ial.in_text
15355 ial.Run(self.op.allocator, validate=False)
15356 result = ial.out_text
15360 #: Query type implementations
15362 constants.QR_CLUSTER: _ClusterQuery,
15363 constants.QR_INSTANCE: _InstanceQuery,
15364 constants.QR_NODE: _NodeQuery,
15365 constants.QR_GROUP: _GroupQuery,
15366 constants.QR_OS: _OsQuery,
15367 constants.QR_EXPORT: _ExportQuery,
15370 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
15373 def _GetQueryImplementation(name):
15374 """Returns the implemtnation for a query type.
15376 @param name: Query type, must be one of L{constants.QR_VIA_OP}
15380 return _QUERY_IMPL[name]
15382 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
15383 errors.ECODE_INVAL)