4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import ssconf
52 from ganeti import uidpool
53 from ganeti import compat
54 from ganeti import masterd
55 from ganeti import netutils
56 from ganeti import query
57 from ganeti import qlang
58 from ganeti import opcodes
60 from ganeti import rpc
61 from ganeti import runtime
62 from ganeti import pathutils
63 from ganeti import vcluster
64 from ganeti import network
65 from ganeti.masterd import iallocator
67 import ganeti.masterd.instance # pylint: disable=W0611
71 INSTANCE_DOWN = [constants.ADMINST_DOWN]
72 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
73 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
75 #: Instance status in which an instance can be marked as offline/online
76 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
77 constants.ADMINST_OFFLINE,
82 """Data container for LU results with jobs.
84 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
85 by L{mcpu._ProcessResult}. The latter will then submit the jobs
86 contained in the C{jobs} attribute and include the job IDs in the opcode
90 def __init__(self, jobs, **kwargs):
91 """Initializes this class.
93 Additional return values can be specified as keyword arguments.
95 @type jobs: list of lists of L{opcode.OpCode}
96 @param jobs: A list of lists of opcode objects
103 class LogicalUnit(object):
104 """Logical Unit base class.
106 Subclasses must follow these rules:
107 - implement ExpandNames
108 - implement CheckPrereq (except when tasklets are used)
109 - implement Exec (except when tasklets are used)
110 - implement BuildHooksEnv
111 - implement BuildHooksNodes
112 - redefine HPATH and HTYPE
113 - optionally redefine their run requirements:
114 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
116 Note that all commands require root permissions.
118 @ivar dry_run_result: the value (if any) that will be returned to the caller
119 in dry-run mode (signalled by opcode dry_run parameter)
126 def __init__(self, processor, op, context, rpc_runner):
127 """Constructor for LogicalUnit.
129 This needs to be overridden in derived classes in order to check op
133 self.proc = processor
135 self.cfg = context.cfg
136 self.glm = context.glm
138 self.owned_locks = context.glm.list_owned
139 self.context = context
140 self.rpc = rpc_runner
142 # Dictionaries used to declare locking needs to mcpu
143 self.needed_locks = None
144 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
145 self.opportunistic_locks = dict.fromkeys(locking.LEVELS, False)
148 self.remove_locks = {}
150 # Used to force good behavior when calling helper functions
151 self.recalculate_locks = {}
154 self.Log = processor.Log # pylint: disable=C0103
155 self.LogWarning = processor.LogWarning # pylint: disable=C0103
156 self.LogInfo = processor.LogInfo # pylint: disable=C0103
157 self.LogStep = processor.LogStep # pylint: disable=C0103
158 # support for dry-run
159 self.dry_run_result = None
160 # support for generic debug attribute
161 if (not hasattr(self.op, "debug_level") or
162 not isinstance(self.op.debug_level, int)):
163 self.op.debug_level = 0
168 # Validate opcode parameters and set defaults
169 self.op.Validate(True)
171 self.CheckArguments()
173 def CheckArguments(self):
174 """Check syntactic validity for the opcode arguments.
176 This method is for doing a simple syntactic check and ensure
177 validity of opcode parameters, without any cluster-related
178 checks. While the same can be accomplished in ExpandNames and/or
179 CheckPrereq, doing these separate is better because:
181 - ExpandNames is left as as purely a lock-related function
182 - CheckPrereq is run after we have acquired locks (and possible
185 The function is allowed to change the self.op attribute so that
186 later methods can no longer worry about missing parameters.
191 def ExpandNames(self):
192 """Expand names for this LU.
194 This method is called before starting to execute the opcode, and it should
195 update all the parameters of the opcode to their canonical form (e.g. a
196 short node name must be fully expanded after this method has successfully
197 completed). This way locking, hooks, logging, etc. can work correctly.
199 LUs which implement this method must also populate the self.needed_locks
200 member, as a dict with lock levels as keys, and a list of needed lock names
203 - use an empty dict if you don't need any lock
204 - if you don't need any lock at a particular level omit that
205 level (note that in this case C{DeclareLocks} won't be called
206 at all for that level)
207 - if you need locks at a level, but you can't calculate it in
208 this function, initialise that level with an empty list and do
209 further processing in L{LogicalUnit.DeclareLocks} (see that
210 function's docstring)
211 - don't put anything for the BGL level
212 - if you want all locks at a level use L{locking.ALL_SET} as a value
214 If you need to share locks (rather than acquire them exclusively) at one
215 level you can modify self.share_locks, setting a true value (usually 1) for
216 that level. By default locks are not shared.
218 This function can also define a list of tasklets, which then will be
219 executed in order instead of the usual LU-level CheckPrereq and Exec
220 functions, if those are not defined by the LU.
224 # Acquire all nodes and one instance
225 self.needed_locks = {
226 locking.LEVEL_NODE: locking.ALL_SET,
227 locking.LEVEL_INSTANCE: ['instance1.example.com'],
229 # Acquire just two nodes
230 self.needed_locks = {
231 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
234 self.needed_locks = {} # No, you can't leave it to the default value None
237 # The implementation of this method is mandatory only if the new LU is
238 # concurrent, so that old LUs don't need to be changed all at the same
241 self.needed_locks = {} # Exclusive LUs don't need locks.
243 raise NotImplementedError
245 def DeclareLocks(self, level):
246 """Declare LU locking needs for a level
248 While most LUs can just declare their locking needs at ExpandNames time,
249 sometimes there's the need to calculate some locks after having acquired
250 the ones before. This function is called just before acquiring locks at a
251 particular level, but after acquiring the ones at lower levels, and permits
252 such calculations. It can be used to modify self.needed_locks, and by
253 default it does nothing.
255 This function is only called if you have something already set in
256 self.needed_locks for the level.
258 @param level: Locking level which is going to be locked
259 @type level: member of L{ganeti.locking.LEVELS}
263 def CheckPrereq(self):
264 """Check prerequisites for this LU.
266 This method should check that the prerequisites for the execution
267 of this LU are fulfilled. It can do internode communication, but
268 it should be idempotent - no cluster or system changes are
271 The method should raise errors.OpPrereqError in case something is
272 not fulfilled. Its return value is ignored.
274 This method should also update all the parameters of the opcode to
275 their canonical form if it hasn't been done by ExpandNames before.
278 if self.tasklets is not None:
279 for (idx, tl) in enumerate(self.tasklets):
280 logging.debug("Checking prerequisites for tasklet %s/%s",
281 idx + 1, len(self.tasklets))
286 def Exec(self, feedback_fn):
289 This method should implement the actual work. It should raise
290 errors.OpExecError for failures that are somewhat dealt with in
294 if self.tasklets is not None:
295 for (idx, tl) in enumerate(self.tasklets):
296 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
299 raise NotImplementedError
301 def BuildHooksEnv(self):
302 """Build hooks environment for this LU.
305 @return: Dictionary containing the environment that will be used for
306 running the hooks for this LU. The keys of the dict must not be prefixed
307 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
308 will extend the environment with additional variables. If no environment
309 should be defined, an empty dictionary should be returned (not C{None}).
310 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
314 raise NotImplementedError
316 def BuildHooksNodes(self):
317 """Build list of nodes to run LU's hooks.
319 @rtype: tuple; (list, list)
320 @return: Tuple containing a list of node names on which the hook
321 should run before the execution and a list of node names on which the
322 hook should run after the execution. No nodes should be returned as an
323 empty list (and not None).
324 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
328 raise NotImplementedError
330 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
331 """Notify the LU about the results of its hooks.
333 This method is called every time a hooks phase is executed, and notifies
334 the Logical Unit about the hooks' result. The LU can then use it to alter
335 its result based on the hooks. By default the method does nothing and the
336 previous result is passed back unchanged but any LU can define it if it
337 wants to use the local cluster hook-scripts somehow.
339 @param phase: one of L{constants.HOOKS_PHASE_POST} or
340 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
341 @param hook_results: the results of the multi-node hooks rpc call
342 @param feedback_fn: function used send feedback back to the caller
343 @param lu_result: the previous Exec result this LU had, or None
345 @return: the new Exec result, based on the previous result
349 # API must be kept, thus we ignore the unused argument and could
350 # be a function warnings
351 # pylint: disable=W0613,R0201
354 def _ExpandAndLockInstance(self):
355 """Helper function to expand and lock an instance.
357 Many LUs that work on an instance take its name in self.op.instance_name
358 and need to expand it and then declare the expanded name for locking. This
359 function does it, and then updates self.op.instance_name to the expanded
360 name. It also initializes needed_locks as a dict, if this hasn't been done
364 if self.needed_locks is None:
365 self.needed_locks = {}
367 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
368 "_ExpandAndLockInstance called with instance-level locks set"
369 self.op.instance_name = _ExpandInstanceName(self.cfg,
370 self.op.instance_name)
371 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
373 def _LockInstancesNodes(self, primary_only=False,
374 level=locking.LEVEL_NODE):
375 """Helper function to declare instances' nodes for locking.
377 This function should be called after locking one or more instances to lock
378 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
379 with all primary or secondary nodes for instances already locked and
380 present in self.needed_locks[locking.LEVEL_INSTANCE].
382 It should be called from DeclareLocks, and for safety only works if
383 self.recalculate_locks[locking.LEVEL_NODE] is set.
385 In the future it may grow parameters to just lock some instance's nodes, or
386 to just lock primaries or secondary nodes, if needed.
388 If should be called in DeclareLocks in a way similar to::
390 if level == locking.LEVEL_NODE:
391 self._LockInstancesNodes()
393 @type primary_only: boolean
394 @param primary_only: only lock primary nodes of locked instances
395 @param level: Which lock level to use for locking nodes
398 assert level in self.recalculate_locks, \
399 "_LockInstancesNodes helper function called with no nodes to recalculate"
401 # TODO: check if we're really been called with the instance locks held
403 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
404 # future we might want to have different behaviors depending on the value
405 # of self.recalculate_locks[locking.LEVEL_NODE]
407 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
408 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
409 wanted_nodes.append(instance.primary_node)
411 wanted_nodes.extend(instance.secondary_nodes)
413 if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
414 self.needed_locks[level] = wanted_nodes
415 elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
416 self.needed_locks[level].extend(wanted_nodes)
418 raise errors.ProgrammerError("Unknown recalculation mode")
420 del self.recalculate_locks[level]
423 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
424 """Simple LU which runs no hooks.
426 This LU is intended as a parent for other LogicalUnits which will
427 run no hooks, in order to reduce duplicate code.
433 def BuildHooksEnv(self):
434 """Empty BuildHooksEnv for NoHooksLu.
436 This just raises an error.
439 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
441 def BuildHooksNodes(self):
442 """Empty BuildHooksNodes for NoHooksLU.
445 raise AssertionError("BuildHooksNodes called for NoHooksLU")
449 """Tasklet base class.
451 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
452 they can mix legacy code with tasklets. Locking needs to be done in the LU,
453 tasklets know nothing about locks.
455 Subclasses must follow these rules:
456 - Implement CheckPrereq
460 def __init__(self, lu):
467 def CheckPrereq(self):
468 """Check prerequisites for this tasklets.
470 This method should check whether the prerequisites for the execution of
471 this tasklet are fulfilled. It can do internode communication, but it
472 should be idempotent - no cluster or system changes are allowed.
474 The method should raise errors.OpPrereqError in case something is not
475 fulfilled. Its return value is ignored.
477 This method should also update all parameters to their canonical form if it
478 hasn't been done before.
483 def Exec(self, feedback_fn):
484 """Execute the tasklet.
486 This method should implement the actual work. It should raise
487 errors.OpExecError for failures that are somewhat dealt with in code, or
491 raise NotImplementedError
495 """Base for query utility classes.
498 #: Attribute holding field definitions
504 def __init__(self, qfilter, fields, use_locking):
505 """Initializes this class.
508 self.use_locking = use_locking
510 self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
511 namefield=self.SORT_FIELD)
512 self.requested_data = self.query.RequestedData()
513 self.names = self.query.RequestedNames()
515 # Sort only if no names were requested
516 self.sort_by_name = not self.names
518 self.do_locking = None
521 def _GetNames(self, lu, all_names, lock_level):
522 """Helper function to determine names asked for in the query.
526 names = lu.owned_locks(lock_level)
530 if self.wanted == locking.ALL_SET:
531 assert not self.names
532 # caller didn't specify names, so ordering is not important
533 return utils.NiceSort(names)
535 # caller specified names and we must keep the same order
537 assert not self.do_locking or lu.glm.is_owned(lock_level)
539 missing = set(self.wanted).difference(names)
541 raise errors.OpExecError("Some items were removed before retrieving"
542 " their data: %s" % missing)
544 # Return expanded names
547 def ExpandNames(self, lu):
548 """Expand names for this query.
550 See L{LogicalUnit.ExpandNames}.
553 raise NotImplementedError()
555 def DeclareLocks(self, lu, level):
556 """Declare locks for this query.
558 See L{LogicalUnit.DeclareLocks}.
561 raise NotImplementedError()
563 def _GetQueryData(self, lu):
564 """Collects all data for this query.
566 @return: Query data object
569 raise NotImplementedError()
571 def NewStyleQuery(self, lu):
572 """Collect data and execute query.
575 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
576 sort_by_name=self.sort_by_name)
578 def OldStyleQuery(self, lu):
579 """Collect data and execute query.
582 return self.query.OldStyleQuery(self._GetQueryData(lu),
583 sort_by_name=self.sort_by_name)
587 """Returns a dict declaring all lock levels shared.
590 return dict.fromkeys(locking.LEVELS, 1)
593 def _AnnotateDiskParams(instance, devs, cfg):
594 """Little helper wrapper to the rpc annotation method.
596 @param instance: The instance object
597 @type devs: List of L{objects.Disk}
598 @param devs: The root devices (not any of its children!)
599 @param cfg: The config object
600 @returns The annotated disk copies
601 @see L{rpc.AnnotateDiskParams}
604 return rpc.AnnotateDiskParams(instance.disk_template, devs,
605 cfg.GetInstanceDiskParams(instance))
608 def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
610 """Checks if node groups for locked instances are still correct.
612 @type cfg: L{config.ConfigWriter}
613 @param cfg: Cluster configuration
614 @type instances: dict; string as key, L{objects.Instance} as value
615 @param instances: Dictionary, instance name as key, instance object as value
616 @type owned_groups: iterable of string
617 @param owned_groups: List of owned groups
618 @type owned_nodes: iterable of string
619 @param owned_nodes: List of owned nodes
620 @type cur_group_uuid: string or None
621 @param cur_group_uuid: Optional group UUID to check against instance's groups
624 for (name, inst) in instances.items():
625 assert owned_nodes.issuperset(inst.all_nodes), \
626 "Instance %s's nodes changed while we kept the lock" % name
628 inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
630 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
631 "Instance %s has no node in group %s" % (name, cur_group_uuid)
634 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
636 """Checks if the owned node groups are still correct for an instance.
638 @type cfg: L{config.ConfigWriter}
639 @param cfg: The cluster configuration
640 @type instance_name: string
641 @param instance_name: Instance name
642 @type owned_groups: set or frozenset
643 @param owned_groups: List of currently owned node groups
644 @type primary_only: boolean
645 @param primary_only: Whether to check node groups for only the primary node
648 inst_groups = cfg.GetInstanceNodeGroups(instance_name, primary_only)
650 if not owned_groups.issuperset(inst_groups):
651 raise errors.OpPrereqError("Instance %s's node groups changed since"
652 " locks were acquired, current groups are"
653 " are '%s', owning groups '%s'; retry the"
656 utils.CommaJoin(inst_groups),
657 utils.CommaJoin(owned_groups)),
663 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
664 """Checks if the instances in a node group are still correct.
666 @type cfg: L{config.ConfigWriter}
667 @param cfg: The cluster configuration
668 @type group_uuid: string
669 @param group_uuid: Node group UUID
670 @type owned_instances: set or frozenset
671 @param owned_instances: List of currently owned instances
674 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
675 if owned_instances != wanted_instances:
676 raise errors.OpPrereqError("Instances in node group '%s' changed since"
677 " locks were acquired, wanted '%s', have '%s';"
678 " retry the operation" %
680 utils.CommaJoin(wanted_instances),
681 utils.CommaJoin(owned_instances)),
684 return wanted_instances
687 def _SupportsOob(cfg, node):
688 """Tells if node supports OOB.
690 @type cfg: L{config.ConfigWriter}
691 @param cfg: The cluster configuration
692 @type node: L{objects.Node}
693 @param node: The node
694 @return: The OOB script if supported or an empty string otherwise
697 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
700 def _CopyLockList(names):
701 """Makes a copy of a list of lock names.
703 Handles L{locking.ALL_SET} correctly.
706 if names == locking.ALL_SET:
707 return locking.ALL_SET
712 def _GetWantedNodes(lu, nodes):
713 """Returns list of checked and expanded node names.
715 @type lu: L{LogicalUnit}
716 @param lu: the logical unit on whose behalf we execute
718 @param nodes: list of node names or None for all nodes
720 @return: the list of nodes, sorted
721 @raise errors.ProgrammerError: if the nodes parameter is wrong type
725 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
727 return utils.NiceSort(lu.cfg.GetNodeList())
730 def _GetWantedInstances(lu, instances):
731 """Returns list of checked and expanded instance names.
733 @type lu: L{LogicalUnit}
734 @param lu: the logical unit on whose behalf we execute
735 @type instances: list
736 @param instances: list of instance names or None for all instances
738 @return: the list of instances, sorted
739 @raise errors.OpPrereqError: if the instances parameter is wrong type
740 @raise errors.OpPrereqError: if any of the passed instances is not found
744 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
746 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
750 def _GetUpdatedParams(old_params, update_dict,
751 use_default=True, use_none=False):
752 """Return the new version of a parameter dictionary.
754 @type old_params: dict
755 @param old_params: old parameters
756 @type update_dict: dict
757 @param update_dict: dict containing new parameter values, or
758 constants.VALUE_DEFAULT to reset the parameter to its default
760 @param use_default: boolean
761 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
762 values as 'to be deleted' values
763 @param use_none: boolean
764 @type use_none: whether to recognise C{None} values as 'to be
767 @return: the new parameter dictionary
770 params_copy = copy.deepcopy(old_params)
771 for key, val in update_dict.iteritems():
772 if ((use_default and val == constants.VALUE_DEFAULT) or
773 (use_none and val is None)):
779 params_copy[key] = val
783 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
784 """Return the new version of a instance policy.
786 @param group_policy: whether this policy applies to a group and thus
787 we should support removal of policy entries
790 use_none = use_default = group_policy
791 ipolicy = copy.deepcopy(old_ipolicy)
792 for key, value in new_ipolicy.items():
793 if key not in constants.IPOLICY_ALL_KEYS:
794 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
796 if key in constants.IPOLICY_ISPECS:
797 utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
798 ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
800 use_default=use_default)
802 if (not value or value == [constants.VALUE_DEFAULT] or
803 value == constants.VALUE_DEFAULT):
807 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
808 " on the cluster'" % key,
811 if key in constants.IPOLICY_PARAMETERS:
812 # FIXME: we assume all such values are float
814 ipolicy[key] = float(value)
815 except (TypeError, ValueError), err:
816 raise errors.OpPrereqError("Invalid value for attribute"
817 " '%s': '%s', error: %s" %
818 (key, value, err), errors.ECODE_INVAL)
820 # FIXME: we assume all others are lists; this should be redone
822 ipolicy[key] = list(value)
824 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
825 except errors.ConfigurationError, err:
826 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
831 def _UpdateAndVerifySubDict(base, updates, type_check):
832 """Updates and verifies a dict with sub dicts of the same type.
834 @param base: The dict with the old data
835 @param updates: The dict with the new data
836 @param type_check: Dict suitable to ForceDictType to verify correct types
837 @returns: A new dict with updated and verified values
841 new = _GetUpdatedParams(old, value)
842 utils.ForceDictType(new, type_check)
845 ret = copy.deepcopy(base)
846 ret.update(dict((key, fn(base.get(key, {}), value))
847 for key, value in updates.items()))
851 def _MergeAndVerifyHvState(op_input, obj_input):
852 """Combines the hv state from an opcode with the one of the object
854 @param op_input: The input dict from the opcode
855 @param obj_input: The input dict from the objects
856 @return: The verified and updated dict
860 invalid_hvs = set(op_input) - constants.HYPER_TYPES
862 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
863 " %s" % utils.CommaJoin(invalid_hvs),
865 if obj_input is None:
867 type_check = constants.HVSTS_PARAMETER_TYPES
868 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
873 def _MergeAndVerifyDiskState(op_input, obj_input):
874 """Combines the disk state from an opcode with the one of the object
876 @param op_input: The input dict from the opcode
877 @param obj_input: The input dict from the objects
878 @return: The verified and updated dict
881 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
883 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
884 utils.CommaJoin(invalid_dst),
886 type_check = constants.DSS_PARAMETER_TYPES
887 if obj_input is None:
889 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
891 for key, value in op_input.items())
896 def _ReleaseLocks(lu, level, names=None, keep=None):
897 """Releases locks owned by an LU.
899 @type lu: L{LogicalUnit}
900 @param level: Lock level
901 @type names: list or None
902 @param names: Names of locks to release
903 @type keep: list or None
904 @param keep: Names of locks to retain
907 assert not (keep is not None and names is not None), \
908 "Only one of the 'names' and the 'keep' parameters can be given"
910 if names is not None:
911 should_release = names.__contains__
913 should_release = lambda name: name not in keep
915 should_release = None
917 owned = lu.owned_locks(level)
919 # Not owning any lock at this level, do nothing
926 # Determine which locks to release
928 if should_release(name):
933 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
935 # Release just some locks
936 lu.glm.release(level, names=release)
938 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
941 lu.glm.release(level)
943 assert not lu.glm.is_owned(level), "No locks should be owned"
946 def _MapInstanceDisksToNodes(instances):
947 """Creates a map from (node, volume) to instance name.
949 @type instances: list of L{objects.Instance}
950 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
953 return dict(((node, vol), inst.name)
954 for inst in instances
955 for (node, vols) in inst.MapLVsByNode().items()
959 def _RunPostHook(lu, node_name):
960 """Runs the post-hook for an opcode on a single node.
963 hm = lu.proc.BuildHooksManager(lu)
965 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
966 except Exception, err: # pylint: disable=W0703
967 lu.LogWarning("Errors occurred running hooks on %s: %s",
971 def _CheckOutputFields(static, dynamic, selected):
972 """Checks whether all selected fields are valid.
974 @type static: L{utils.FieldSet}
975 @param static: static fields set
976 @type dynamic: L{utils.FieldSet}
977 @param dynamic: dynamic fields set
984 delta = f.NonMatching(selected)
986 raise errors.OpPrereqError("Unknown output fields selected: %s"
987 % ",".join(delta), errors.ECODE_INVAL)
990 def _CheckGlobalHvParams(params):
991 """Validates that given hypervisor params are not global ones.
993 This will ensure that instances don't get customised versions of
997 used_globals = constants.HVC_GLOBALS.intersection(params)
999 msg = ("The following hypervisor parameters are global and cannot"
1000 " be customized at instance level, please modify them at"
1001 " cluster level: %s" % utils.CommaJoin(used_globals))
1002 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1005 def _CheckNodeOnline(lu, node, msg=None):
1006 """Ensure that a given node is online.
1008 @param lu: the LU on behalf of which we make the check
1009 @param node: the node to check
1010 @param msg: if passed, should be a message to replace the default one
1011 @raise errors.OpPrereqError: if the node is offline
1015 msg = "Can't use offline node"
1016 if lu.cfg.GetNodeInfo(node).offline:
1017 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
1020 def _CheckNodeNotDrained(lu, node):
1021 """Ensure that a given node is not drained.
1023 @param lu: the LU on behalf of which we make the check
1024 @param node: the node to check
1025 @raise errors.OpPrereqError: if the node is drained
1028 if lu.cfg.GetNodeInfo(node).drained:
1029 raise errors.OpPrereqError("Can't use drained node %s" % node,
1033 def _CheckNodeVmCapable(lu, node):
1034 """Ensure that a given node is vm capable.
1036 @param lu: the LU on behalf of which we make the check
1037 @param node: the node to check
1038 @raise errors.OpPrereqError: if the node is not vm capable
1041 if not lu.cfg.GetNodeInfo(node).vm_capable:
1042 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
1046 def _CheckNodeHasOS(lu, node, os_name, force_variant):
1047 """Ensure that a node supports a given OS.
1049 @param lu: the LU on behalf of which we make the check
1050 @param node: the node to check
1051 @param os_name: the OS to query about
1052 @param force_variant: whether to ignore variant errors
1053 @raise errors.OpPrereqError: if the node is not supporting the OS
1056 result = lu.rpc.call_os_get(node, os_name)
1057 result.Raise("OS '%s' not in supported OS list for node %s" %
1059 prereq=True, ecode=errors.ECODE_INVAL)
1060 if not force_variant:
1061 _CheckOSVariant(result.payload, os_name)
1064 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
1065 """Ensure that a node has the given secondary ip.
1067 @type lu: L{LogicalUnit}
1068 @param lu: the LU on behalf of which we make the check
1070 @param node: the node to check
1071 @type secondary_ip: string
1072 @param secondary_ip: the ip to check
1073 @type prereq: boolean
1074 @param prereq: whether to throw a prerequisite or an execute error
1075 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
1076 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
1079 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
1080 result.Raise("Failure checking secondary ip on node %s" % node,
1081 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1082 if not result.payload:
1083 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
1084 " please fix and re-run this command" % secondary_ip)
1086 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
1088 raise errors.OpExecError(msg)
1091 def _GetClusterDomainSecret():
1092 """Reads the cluster domain secret.
1095 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
1099 def _CheckInstanceState(lu, instance, req_states, msg=None):
1100 """Ensure that an instance is in one of the required states.
1102 @param lu: the LU on behalf of which we make the check
1103 @param instance: the instance to check
1104 @param msg: if passed, should be a message to replace the default one
1105 @raise errors.OpPrereqError: if the instance is not in the required state
1109 msg = ("can't use instance from outside %s states" %
1110 utils.CommaJoin(req_states))
1111 if instance.admin_state not in req_states:
1112 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1113 (instance.name, instance.admin_state, msg),
1116 if constants.ADMINST_UP not in req_states:
1117 pnode = instance.primary_node
1118 if not lu.cfg.GetNodeInfo(pnode).offline:
1119 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
1120 ins_l.Raise("Can't contact node %s for instance information" % pnode,
1121 prereq=True, ecode=errors.ECODE_ENVIRON)
1122 if instance.name in ins_l.payload:
1123 raise errors.OpPrereqError("Instance %s is running, %s" %
1124 (instance.name, msg), errors.ECODE_STATE)
1126 lu.LogWarning("Primary node offline, ignoring check that instance"
1130 def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
1131 """Computes if value is in the desired range.
1133 @param name: name of the parameter for which we perform the check
1134 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
1136 @param ipolicy: dictionary containing min, max and std values
1137 @param value: actual value that we want to use
1138 @return: None or element not meeting the criteria
1142 if value in [None, constants.VALUE_AUTO]:
1144 max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
1145 min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
1146 if value > max_v or min_v > value:
1148 fqn = "%s/%s" % (name, qualifier)
1151 return ("%s value %s is not in range [%s, %s]" %
1152 (fqn, value, min_v, max_v))
1156 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
1157 nic_count, disk_sizes, spindle_use,
1158 _compute_fn=_ComputeMinMaxSpec):
1159 """Verifies ipolicy against provided specs.
1162 @param ipolicy: The ipolicy
1164 @param mem_size: The memory size
1165 @type cpu_count: int
1166 @param cpu_count: Used cpu cores
1167 @type disk_count: int
1168 @param disk_count: Number of disks used
1169 @type nic_count: int
1170 @param nic_count: Number of nics used
1171 @type disk_sizes: list of ints
1172 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
1173 @type spindle_use: int
1174 @param spindle_use: The number of spindles this instance uses
1175 @param _compute_fn: The compute function (unittest only)
1176 @return: A list of violations, or an empty list of no violations are found
1179 assert disk_count == len(disk_sizes)
1182 (constants.ISPEC_MEM_SIZE, "", mem_size),
1183 (constants.ISPEC_CPU_COUNT, "", cpu_count),
1184 (constants.ISPEC_DISK_COUNT, "", disk_count),
1185 (constants.ISPEC_NIC_COUNT, "", nic_count),
1186 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
1187 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
1188 for idx, d in enumerate(disk_sizes)]
1191 (_compute_fn(name, qualifier, ipolicy, value)
1192 for (name, qualifier, value) in test_settings))
1195 def _ComputeIPolicyInstanceViolation(ipolicy, instance,
1196 _compute_fn=_ComputeIPolicySpecViolation):
1197 """Compute if instance meets the specs of ipolicy.
1200 @param ipolicy: The ipolicy to verify against
1201 @type instance: L{objects.Instance}
1202 @param instance: The instance to verify
1203 @param _compute_fn: The function to verify ipolicy (unittest only)
1204 @see: L{_ComputeIPolicySpecViolation}
1207 mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
1208 cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
1209 spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
1210 disk_count = len(instance.disks)
1211 disk_sizes = [disk.size for disk in instance.disks]
1212 nic_count = len(instance.nics)
1214 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1215 disk_sizes, spindle_use)
1218 def _ComputeIPolicyInstanceSpecViolation(
1219 ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
1220 """Compute if instance specs meets the specs of ipolicy.
1223 @param ipolicy: The ipolicy to verify against
1224 @param instance_spec: dict
1225 @param instance_spec: The instance spec to verify
1226 @param _compute_fn: The function to verify ipolicy (unittest only)
1227 @see: L{_ComputeIPolicySpecViolation}
1230 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
1231 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
1232 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
1233 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
1234 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
1235 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
1237 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1238 disk_sizes, spindle_use)
1241 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
1243 _compute_fn=_ComputeIPolicyInstanceViolation):
1244 """Compute if instance meets the specs of the new target group.
1246 @param ipolicy: The ipolicy to verify
1247 @param instance: The instance object to verify
1248 @param current_group: The current group of the instance
1249 @param target_group: The new group of the instance
1250 @param _compute_fn: The function to verify ipolicy (unittest only)
1251 @see: L{_ComputeIPolicySpecViolation}
1254 if current_group == target_group:
1257 return _compute_fn(ipolicy, instance)
1260 def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
1261 _compute_fn=_ComputeIPolicyNodeViolation):
1262 """Checks that the target node is correct in terms of instance policy.
1264 @param ipolicy: The ipolicy to verify
1265 @param instance: The instance object to verify
1266 @param node: The new node to relocate
1267 @param ignore: Ignore violations of the ipolicy
1268 @param _compute_fn: The function to verify ipolicy (unittest only)
1269 @see: L{_ComputeIPolicySpecViolation}
1272 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
1273 res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
1276 msg = ("Instance does not meet target node group's (%s) instance"
1277 " policy: %s") % (node.group, utils.CommaJoin(res))
1281 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1284 def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
1285 """Computes a set of any instances that would violate the new ipolicy.
1287 @param old_ipolicy: The current (still in-place) ipolicy
1288 @param new_ipolicy: The new (to become) ipolicy
1289 @param instances: List of instances to verify
1290 @return: A list of instances which violates the new ipolicy but
1294 return (_ComputeViolatingInstances(new_ipolicy, instances) -
1295 _ComputeViolatingInstances(old_ipolicy, instances))
1298 def _ExpandItemName(fn, name, kind):
1299 """Expand an item name.
1301 @param fn: the function to use for expansion
1302 @param name: requested item name
1303 @param kind: text description ('Node' or 'Instance')
1304 @return: the resolved (full) name
1305 @raise errors.OpPrereqError: if the item is not found
1308 full_name = fn(name)
1309 if full_name is None:
1310 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
1315 def _ExpandNodeName(cfg, name):
1316 """Wrapper over L{_ExpandItemName} for nodes."""
1317 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
1320 def _ExpandInstanceName(cfg, name):
1321 """Wrapper over L{_ExpandItemName} for instance."""
1322 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
1325 def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
1326 network_type, mac_prefix, tags):
1327 """Builds network related env variables for hooks
1329 This builds the hook environment from individual variables.
1332 @param name: the name of the network
1333 @type subnet: string
1334 @param subnet: the ipv4 subnet
1335 @type gateway: string
1336 @param gateway: the ipv4 gateway
1337 @type network6: string
1338 @param network6: the ipv6 subnet
1339 @type gateway6: string
1340 @param gateway6: the ipv6 gateway
1341 @type network_type: string
1342 @param network_type: the type of the network
1343 @type mac_prefix: string
1344 @param mac_prefix: the mac_prefix
1346 @param tags: the tags of the network
1351 env["NETWORK_NAME"] = name
1353 env["NETWORK_SUBNET"] = subnet
1355 env["NETWORK_GATEWAY"] = gateway
1357 env["NETWORK_SUBNET6"] = network6
1359 env["NETWORK_GATEWAY6"] = gateway6
1361 env["NETWORK_MAC_PREFIX"] = mac_prefix
1363 env["NETWORK_TYPE"] = network_type
1365 env["NETWORK_TAGS"] = " ".join(tags)
1370 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
1371 minmem, maxmem, vcpus, nics, disk_template, disks,
1372 bep, hvp, hypervisor_name, tags):
1373 """Builds instance related env variables for hooks
1375 This builds the hook environment from individual variables.
1378 @param name: the name of the instance
1379 @type primary_node: string
1380 @param primary_node: the name of the instance's primary node
1381 @type secondary_nodes: list
1382 @param secondary_nodes: list of secondary nodes as strings
1383 @type os_type: string
1384 @param os_type: the name of the instance's OS
1385 @type status: string
1386 @param status: the desired status of the instance
1387 @type minmem: string
1388 @param minmem: the minimum memory size of the instance
1389 @type maxmem: string
1390 @param maxmem: the maximum memory size of the instance
1392 @param vcpus: the count of VCPUs the instance has
1394 @param nics: list of tuples (ip, mac, mode, link, network) representing
1395 the NICs the instance has
1396 @type disk_template: string
1397 @param disk_template: the disk template of the instance
1399 @param disks: the list of (size, mode) pairs
1401 @param bep: the backend parameters for the instance
1403 @param hvp: the hypervisor parameters for the instance
1404 @type hypervisor_name: string
1405 @param hypervisor_name: the hypervisor for the instance
1407 @param tags: list of instance tags as strings
1409 @return: the hook environment for this instance
1414 "INSTANCE_NAME": name,
1415 "INSTANCE_PRIMARY": primary_node,
1416 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
1417 "INSTANCE_OS_TYPE": os_type,
1418 "INSTANCE_STATUS": status,
1419 "INSTANCE_MINMEM": minmem,
1420 "INSTANCE_MAXMEM": maxmem,
1421 # TODO(2.7) remove deprecated "memory" value
1422 "INSTANCE_MEMORY": maxmem,
1423 "INSTANCE_VCPUS": vcpus,
1424 "INSTANCE_DISK_TEMPLATE": disk_template,
1425 "INSTANCE_HYPERVISOR": hypervisor_name,
1428 nic_count = len(nics)
1429 for idx, (ip, mac, mode, link, net, netinfo) in enumerate(nics):
1432 env["INSTANCE_NIC%d_IP" % idx] = ip
1433 env["INSTANCE_NIC%d_MAC" % idx] = mac
1434 env["INSTANCE_NIC%d_MODE" % idx] = mode
1435 env["INSTANCE_NIC%d_LINK" % idx] = link
1437 env["INSTANCE_NIC%d_NETWORK" % idx] = net
1439 nobj = objects.Network.FromDict(netinfo)
1441 env["INSTANCE_NIC%d_NETWORK_SUBNET" % idx] = nobj.network
1443 env["INSTANCE_NIC%d_NETWORK_GATEWAY" % idx] = nobj.gateway
1445 env["INSTANCE_NIC%d_NETWORK_SUBNET6" % idx] = nobj.network6
1447 env["INSTANCE_NIC%d_NETWORK_GATEWAY6" % idx] = nobj.gateway6
1449 env["INSTANCE_NIC%d_NETWORK_MAC_PREFIX" % idx] = nobj.mac_prefix
1450 if nobj.network_type:
1451 env["INSTANCE_NIC%d_NETWORK_TYPE" % idx] = nobj.network_type
1453 env["INSTANCE_NIC%d_NETWORK_TAGS" % idx] = " ".join(nobj.tags)
1454 if mode == constants.NIC_MODE_BRIDGED:
1455 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1459 env["INSTANCE_NIC_COUNT"] = nic_count
1462 disk_count = len(disks)
1463 for idx, (size, mode) in enumerate(disks):
1464 env["INSTANCE_DISK%d_SIZE" % idx] = size
1465 env["INSTANCE_DISK%d_MODE" % idx] = mode
1469 env["INSTANCE_DISK_COUNT"] = disk_count
1474 env["INSTANCE_TAGS"] = " ".join(tags)
1476 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1477 for key, value in source.items():
1478 env["INSTANCE_%s_%s" % (kind, key)] = value
1483 def _NICToTuple(lu, nic):
1484 """Build a tupple of nic information.
1486 @type lu: L{LogicalUnit}
1487 @param lu: the logical unit on whose behalf we execute
1488 @type nic: L{objects.NIC}
1489 @param nic: nic to convert to hooks tuple
1494 cluster = lu.cfg.GetClusterInfo()
1495 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1496 mode = filled_params[constants.NIC_MODE]
1497 link = filled_params[constants.NIC_LINK]
1501 net_uuid = lu.cfg.LookupNetwork(net)
1503 nobj = lu.cfg.GetNetwork(net_uuid)
1504 netinfo = objects.Network.ToDict(nobj)
1505 return (ip, mac, mode, link, net, netinfo)
1508 def _NICListToTuple(lu, nics):
1509 """Build a list of nic information tuples.
1511 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1512 value in LUInstanceQueryData.
1514 @type lu: L{LogicalUnit}
1515 @param lu: the logical unit on whose behalf we execute
1516 @type nics: list of L{objects.NIC}
1517 @param nics: list of nics to convert to hooks tuples
1522 hooks_nics.append(_NICToTuple(lu, nic))
1526 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1527 """Builds instance related env variables for hooks from an object.
1529 @type lu: L{LogicalUnit}
1530 @param lu: the logical unit on whose behalf we execute
1531 @type instance: L{objects.Instance}
1532 @param instance: the instance for which we should build the
1534 @type override: dict
1535 @param override: dictionary with key/values that will override
1538 @return: the hook environment dictionary
1541 cluster = lu.cfg.GetClusterInfo()
1542 bep = cluster.FillBE(instance)
1543 hvp = cluster.FillHV(instance)
1545 "name": instance.name,
1546 "primary_node": instance.primary_node,
1547 "secondary_nodes": instance.secondary_nodes,
1548 "os_type": instance.os,
1549 "status": instance.admin_state,
1550 "maxmem": bep[constants.BE_MAXMEM],
1551 "minmem": bep[constants.BE_MINMEM],
1552 "vcpus": bep[constants.BE_VCPUS],
1553 "nics": _NICListToTuple(lu, instance.nics),
1554 "disk_template": instance.disk_template,
1555 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1558 "hypervisor_name": instance.hypervisor,
1559 "tags": instance.tags,
1562 args.update(override)
1563 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1566 def _AdjustCandidatePool(lu, exceptions):
1567 """Adjust the candidate pool after node operations.
1570 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1572 lu.LogInfo("Promoted nodes to master candidate role: %s",
1573 utils.CommaJoin(node.name for node in mod_list))
1574 for name in mod_list:
1575 lu.context.ReaddNode(name)
1576 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1578 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1582 def _DecideSelfPromotion(lu, exceptions=None):
1583 """Decide whether I should promote myself as a master candidate.
1586 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1587 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1588 # the new node will increase mc_max with one, so:
1589 mc_should = min(mc_should + 1, cp_size)
1590 return mc_now < mc_should
1593 def _ComputeViolatingInstances(ipolicy, instances):
1594 """Computes a set of instances who violates given ipolicy.
1596 @param ipolicy: The ipolicy to verify
1597 @type instances: object.Instance
1598 @param instances: List of instances to verify
1599 @return: A frozenset of instance names violating the ipolicy
1602 return frozenset([inst.name for inst in instances
1603 if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
1606 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1607 """Check that the brigdes needed by a list of nics exist.
1610 cluster = lu.cfg.GetClusterInfo()
1611 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1612 brlist = [params[constants.NIC_LINK] for params in paramslist
1613 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1615 result = lu.rpc.call_bridges_exist(target_node, brlist)
1616 result.Raise("Error checking bridges on destination node '%s'" %
1617 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1620 def _CheckInstanceBridgesExist(lu, instance, node=None):
1621 """Check that the brigdes needed by an instance exist.
1625 node = instance.primary_node
1626 _CheckNicsBridgesExist(lu, instance.nics, node)
1629 def _CheckOSVariant(os_obj, name):
1630 """Check whether an OS name conforms to the os variants specification.
1632 @type os_obj: L{objects.OS}
1633 @param os_obj: OS object to check
1635 @param name: OS name passed by the user, to check for validity
1638 variant = objects.OS.GetVariant(name)
1639 if not os_obj.supported_variants:
1641 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1642 " passed)" % (os_obj.name, variant),
1646 raise errors.OpPrereqError("OS name must include a variant",
1649 if variant not in os_obj.supported_variants:
1650 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1653 def _GetNodeInstancesInner(cfg, fn):
1654 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1657 def _GetNodeInstances(cfg, node_name):
1658 """Returns a list of all primary and secondary instances on a node.
1662 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1665 def _GetNodePrimaryInstances(cfg, node_name):
1666 """Returns primary instances on a node.
1669 return _GetNodeInstancesInner(cfg,
1670 lambda inst: node_name == inst.primary_node)
1673 def _GetNodeSecondaryInstances(cfg, node_name):
1674 """Returns secondary instances on a node.
1677 return _GetNodeInstancesInner(cfg,
1678 lambda inst: node_name in inst.secondary_nodes)
1681 def _GetStorageTypeArgs(cfg, storage_type):
1682 """Returns the arguments for a storage type.
1685 # Special case for file storage
1686 if storage_type == constants.ST_FILE:
1687 # storage.FileStorage wants a list of storage directories
1688 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1693 def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
1696 for dev in instance.disks:
1697 cfg.SetDiskID(dev, node_name)
1699 result = rpc_runner.call_blockdev_getmirrorstatus(node_name, (instance.disks,
1701 result.Raise("Failed to get disk status from node %s" % node_name,
1702 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1704 for idx, bdev_status in enumerate(result.payload):
1705 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1711 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1712 """Check the sanity of iallocator and node arguments and use the
1713 cluster-wide iallocator if appropriate.
1715 Check that at most one of (iallocator, node) is specified. If none is
1716 specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1717 then the LU's opcode's iallocator slot is filled with the cluster-wide
1720 @type iallocator_slot: string
1721 @param iallocator_slot: the name of the opcode iallocator slot
1722 @type node_slot: string
1723 @param node_slot: the name of the opcode target node slot
1726 node = getattr(lu.op, node_slot, None)
1727 ialloc = getattr(lu.op, iallocator_slot, None)
1731 if node is not None and ialloc is not None:
1732 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1734 elif ((node is None and ialloc is None) or
1735 ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1736 default_iallocator = lu.cfg.GetDefaultIAllocator()
1737 if default_iallocator:
1738 setattr(lu.op, iallocator_slot, default_iallocator)
1740 raise errors.OpPrereqError("No iallocator or node given and no"
1741 " cluster-wide default iallocator found;"
1742 " please specify either an iallocator or a"
1743 " node, or set a cluster-wide default"
1744 " iallocator", errors.ECODE_INVAL)
1747 def _GetDefaultIAllocator(cfg, ialloc):
1748 """Decides on which iallocator to use.
1750 @type cfg: L{config.ConfigWriter}
1751 @param cfg: Cluster configuration object
1752 @type ialloc: string or None
1753 @param ialloc: Iallocator specified in opcode
1755 @return: Iallocator name
1759 # Use default iallocator
1760 ialloc = cfg.GetDefaultIAllocator()
1763 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1764 " opcode nor as a cluster-wide default",
1770 def _CheckHostnameSane(lu, name):
1771 """Ensures that a given hostname resolves to a 'sane' name.
1773 The given name is required to be a prefix of the resolved hostname,
1774 to prevent accidental mismatches.
1776 @param lu: the logical unit on behalf of which we're checking
1777 @param name: the name we should resolve and check
1778 @return: the resolved hostname object
1781 hostname = netutils.GetHostname(name=name)
1782 if hostname.name != name:
1783 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
1784 if not utils.MatchNameComponent(name, [hostname.name]):
1785 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
1786 " same as given hostname '%s'") %
1787 (hostname.name, name), errors.ECODE_INVAL)
1791 class LUClusterPostInit(LogicalUnit):
1792 """Logical unit for running hooks after cluster initialization.
1795 HPATH = "cluster-init"
1796 HTYPE = constants.HTYPE_CLUSTER
1798 def BuildHooksEnv(self):
1803 "OP_TARGET": self.cfg.GetClusterName(),
1806 def BuildHooksNodes(self):
1807 """Build hooks nodes.
1810 return ([], [self.cfg.GetMasterNode()])
1812 def Exec(self, feedback_fn):
1819 class LUClusterDestroy(LogicalUnit):
1820 """Logical unit for destroying the cluster.
1823 HPATH = "cluster-destroy"
1824 HTYPE = constants.HTYPE_CLUSTER
1826 def BuildHooksEnv(self):
1831 "OP_TARGET": self.cfg.GetClusterName(),
1834 def BuildHooksNodes(self):
1835 """Build hooks nodes.
1840 def CheckPrereq(self):
1841 """Check prerequisites.
1843 This checks whether the cluster is empty.
1845 Any errors are signaled by raising errors.OpPrereqError.
1848 master = self.cfg.GetMasterNode()
1850 nodelist = self.cfg.GetNodeList()
1851 if len(nodelist) != 1 or nodelist[0] != master:
1852 raise errors.OpPrereqError("There are still %d node(s) in"
1853 " this cluster." % (len(nodelist) - 1),
1855 instancelist = self.cfg.GetInstanceList()
1857 raise errors.OpPrereqError("There are still %d instance(s) in"
1858 " this cluster." % len(instancelist),
1861 def Exec(self, feedback_fn):
1862 """Destroys the cluster.
1865 master_params = self.cfg.GetMasterNetworkParameters()
1867 # Run post hooks on master node before it's removed
1868 _RunPostHook(self, master_params.name)
1870 ems = self.cfg.GetUseExternalMipScript()
1871 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
1874 self.LogWarning("Error disabling the master IP address: %s",
1877 return master_params.name
1880 def _VerifyCertificate(filename):
1881 """Verifies a certificate for L{LUClusterVerifyConfig}.
1883 @type filename: string
1884 @param filename: Path to PEM file
1888 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1889 utils.ReadFile(filename))
1890 except Exception, err: # pylint: disable=W0703
1891 return (LUClusterVerifyConfig.ETYPE_ERROR,
1892 "Failed to load X509 certificate %s: %s" % (filename, err))
1895 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1896 constants.SSL_CERT_EXPIRATION_ERROR)
1899 fnamemsg = "While verifying %s: %s" % (filename, msg)
1904 return (None, fnamemsg)
1905 elif errcode == utils.CERT_WARNING:
1906 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1907 elif errcode == utils.CERT_ERROR:
1908 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1910 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1913 def _GetAllHypervisorParameters(cluster, instances):
1914 """Compute the set of all hypervisor parameters.
1916 @type cluster: L{objects.Cluster}
1917 @param cluster: the cluster object
1918 @param instances: list of L{objects.Instance}
1919 @param instances: additional instances from which to obtain parameters
1920 @rtype: list of (origin, hypervisor, parameters)
1921 @return: a list with all parameters found, indicating the hypervisor they
1922 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1927 for hv_name in cluster.enabled_hypervisors:
1928 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1930 for os_name, os_hvp in cluster.os_hvp.items():
1931 for hv_name, hv_params in os_hvp.items():
1933 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1934 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1936 # TODO: collapse identical parameter values in a single one
1937 for instance in instances:
1938 if instance.hvparams:
1939 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1940 cluster.FillHV(instance)))
1945 class _VerifyErrors(object):
1946 """Mix-in for cluster/group verify LUs.
1948 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1949 self.op and self._feedback_fn to be available.)
1953 ETYPE_FIELD = "code"
1954 ETYPE_ERROR = "ERROR"
1955 ETYPE_WARNING = "WARNING"
1957 def _Error(self, ecode, item, msg, *args, **kwargs):
1958 """Format an error message.
1960 Based on the opcode's error_codes parameter, either format a
1961 parseable error code, or a simpler error string.
1963 This must be called only from Exec and functions called from Exec.
1966 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1967 itype, etxt, _ = ecode
1968 # first complete the msg
1971 # then format the whole message
1972 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1973 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1979 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1980 # and finally report it via the feedback_fn
1981 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1983 def _ErrorIf(self, cond, ecode, *args, **kwargs):
1984 """Log an error message if the passed condition is True.
1988 or self.op.debug_simulate_errors) # pylint: disable=E1101
1990 # If the error code is in the list of ignored errors, demote the error to a
1992 (_, etxt, _) = ecode
1993 if etxt in self.op.ignore_errors: # pylint: disable=E1101
1994 kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
1997 self._Error(ecode, *args, **kwargs)
1999 # do not mark the operation as failed for WARN cases only
2000 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
2001 self.bad = self.bad or cond
2004 class LUClusterVerify(NoHooksLU):
2005 """Submits all jobs necessary to verify the cluster.
2010 def ExpandNames(self):
2011 self.needed_locks = {}
2013 def Exec(self, feedback_fn):
2016 if self.op.group_name:
2017 groups = [self.op.group_name]
2018 depends_fn = lambda: None
2020 groups = self.cfg.GetNodeGroupList()
2022 # Verify global configuration
2024 opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors),
2027 # Always depend on global verification
2028 depends_fn = lambda: [(-len(jobs), [])]
2031 [opcodes.OpClusterVerifyGroup(group_name=group,
2032 ignore_errors=self.op.ignore_errors,
2033 depends=depends_fn())]
2034 for group in groups)
2036 # Fix up all parameters
2037 for op in itertools.chain(*jobs): # pylint: disable=W0142
2038 op.debug_simulate_errors = self.op.debug_simulate_errors
2039 op.verbose = self.op.verbose
2040 op.error_codes = self.op.error_codes
2042 op.skip_checks = self.op.skip_checks
2043 except AttributeError:
2044 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
2046 return ResultWithJobs(jobs)
2049 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
2050 """Verifies the cluster config.
2055 def _VerifyHVP(self, hvp_data):
2056 """Verifies locally the syntax of the hypervisor parameters.
2059 for item, hv_name, hv_params in hvp_data:
2060 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2063 hv_class = hypervisor.GetHypervisor(hv_name)
2064 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2065 hv_class.CheckParameterSyntax(hv_params)
2066 except errors.GenericError, err:
2067 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
2069 def ExpandNames(self):
2070 self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
2071 self.share_locks = _ShareAll()
2073 def CheckPrereq(self):
2074 """Check prerequisites.
2077 # Retrieve all information
2078 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
2079 self.all_node_info = self.cfg.GetAllNodesInfo()
2080 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2082 def Exec(self, feedback_fn):
2083 """Verify integrity of cluster, performing various test on nodes.
2087 self._feedback_fn = feedback_fn
2089 feedback_fn("* Verifying cluster config")
2091 for msg in self.cfg.VerifyConfig():
2092 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
2094 feedback_fn("* Verifying cluster certificate files")
2096 for cert_filename in pathutils.ALL_CERT_FILES:
2097 (errcode, msg) = _VerifyCertificate(cert_filename)
2098 self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
2100 feedback_fn("* Verifying hypervisor parameters")
2102 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
2103 self.all_inst_info.values()))
2105 feedback_fn("* Verifying all nodes belong to an existing group")
2107 # We do this verification here because, should this bogus circumstance
2108 # occur, it would never be caught by VerifyGroup, which only acts on
2109 # nodes/instances reachable from existing node groups.
2111 dangling_nodes = set(node.name for node in self.all_node_info.values()
2112 if node.group not in self.all_group_info)
2114 dangling_instances = {}
2115 no_node_instances = []
2117 for inst in self.all_inst_info.values():
2118 if inst.primary_node in dangling_nodes:
2119 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
2120 elif inst.primary_node not in self.all_node_info:
2121 no_node_instances.append(inst.name)
2126 utils.CommaJoin(dangling_instances.get(node.name,
2128 for node in dangling_nodes]
2130 self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
2132 "the following nodes (and their instances) belong to a non"
2133 " existing group: %s", utils.CommaJoin(pretty_dangling))
2135 self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
2137 "the following instances have a non-existing primary-node:"
2138 " %s", utils.CommaJoin(no_node_instances))
2143 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
2144 """Verifies the status of a node group.
2147 HPATH = "cluster-verify"
2148 HTYPE = constants.HTYPE_CLUSTER
2151 _HOOKS_INDENT_RE = re.compile("^", re.M)
2153 class NodeImage(object):
2154 """A class representing the logical and physical status of a node.
2157 @ivar name: the node name to which this object refers
2158 @ivar volumes: a structure as returned from
2159 L{ganeti.backend.GetVolumeList} (runtime)
2160 @ivar instances: a list of running instances (runtime)
2161 @ivar pinst: list of configured primary instances (config)
2162 @ivar sinst: list of configured secondary instances (config)
2163 @ivar sbp: dictionary of {primary-node: list of instances} for all
2164 instances for which this node is secondary (config)
2165 @ivar mfree: free memory, as reported by hypervisor (runtime)
2166 @ivar dfree: free disk, as reported by the node (runtime)
2167 @ivar offline: the offline status (config)
2168 @type rpc_fail: boolean
2169 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
2170 not whether the individual keys were correct) (runtime)
2171 @type lvm_fail: boolean
2172 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
2173 @type hyp_fail: boolean
2174 @ivar hyp_fail: whether the RPC call didn't return the instance list
2175 @type ghost: boolean
2176 @ivar ghost: whether this is a known node or not (config)
2177 @type os_fail: boolean
2178 @ivar os_fail: whether the RPC call didn't return valid OS data
2180 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
2181 @type vm_capable: boolean
2182 @ivar vm_capable: whether the node can host instances
2185 def __init__(self, offline=False, name=None, vm_capable=True):
2194 self.offline = offline
2195 self.vm_capable = vm_capable
2196 self.rpc_fail = False
2197 self.lvm_fail = False
2198 self.hyp_fail = False
2200 self.os_fail = False
2203 def ExpandNames(self):
2204 # This raises errors.OpPrereqError on its own:
2205 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2207 # Get instances in node group; this is unsafe and needs verification later
2209 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2211 self.needed_locks = {
2212 locking.LEVEL_INSTANCE: inst_names,
2213 locking.LEVEL_NODEGROUP: [self.group_uuid],
2214 locking.LEVEL_NODE: [],
2216 # This opcode is run by watcher every five minutes and acquires all nodes
2217 # for a group. It doesn't run for a long time, so it's better to acquire
2218 # the node allocation lock as well.
2219 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2222 self.share_locks = _ShareAll()
2224 def DeclareLocks(self, level):
2225 if level == locking.LEVEL_NODE:
2226 # Get members of node group; this is unsafe and needs verification later
2227 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
2229 all_inst_info = self.cfg.GetAllInstancesInfo()
2231 # In Exec(), we warn about mirrored instances that have primary and
2232 # secondary living in separate node groups. To fully verify that
2233 # volumes for these instances are healthy, we will need to do an
2234 # extra call to their secondaries. We ensure here those nodes will
2236 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
2237 # Important: access only the instances whose lock is owned
2238 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
2239 nodes.update(all_inst_info[inst].secondary_nodes)
2241 self.needed_locks[locking.LEVEL_NODE] = nodes
2243 def CheckPrereq(self):
2244 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
2245 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
2247 group_nodes = set(self.group_info.members)
2249 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2252 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2254 unlocked_instances = \
2255 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
2258 raise errors.OpPrereqError("Missing lock for nodes: %s" %
2259 utils.CommaJoin(unlocked_nodes),
2262 if unlocked_instances:
2263 raise errors.OpPrereqError("Missing lock for instances: %s" %
2264 utils.CommaJoin(unlocked_instances),
2267 self.all_node_info = self.cfg.GetAllNodesInfo()
2268 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2270 self.my_node_names = utils.NiceSort(group_nodes)
2271 self.my_inst_names = utils.NiceSort(group_instances)
2273 self.my_node_info = dict((name, self.all_node_info[name])
2274 for name in self.my_node_names)
2276 self.my_inst_info = dict((name, self.all_inst_info[name])
2277 for name in self.my_inst_names)
2279 # We detect here the nodes that will need the extra RPC calls for verifying
2280 # split LV volumes; they should be locked.
2281 extra_lv_nodes = set()
2283 for inst in self.my_inst_info.values():
2284 if inst.disk_template in constants.DTS_INT_MIRROR:
2285 for nname in inst.all_nodes:
2286 if self.all_node_info[nname].group != self.group_uuid:
2287 extra_lv_nodes.add(nname)
2289 unlocked_lv_nodes = \
2290 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2292 if unlocked_lv_nodes:
2293 raise errors.OpPrereqError("Missing node locks for LV check: %s" %
2294 utils.CommaJoin(unlocked_lv_nodes),
2296 self.extra_lv_nodes = list(extra_lv_nodes)
2298 def _VerifyNode(self, ninfo, nresult):
2299 """Perform some basic validation on data returned from a node.
2301 - check the result data structure is well formed and has all the
2303 - check ganeti version
2305 @type ninfo: L{objects.Node}
2306 @param ninfo: the node to check
2307 @param nresult: the results from the node
2309 @return: whether overall this call was successful (and we can expect
2310 reasonable values in the respose)
2314 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2316 # main result, nresult should be a non-empty dict
2317 test = not nresult or not isinstance(nresult, dict)
2318 _ErrorIf(test, constants.CV_ENODERPC, node,
2319 "unable to verify node: no data returned")
2323 # compares ganeti version
2324 local_version = constants.PROTOCOL_VERSION
2325 remote_version = nresult.get("version", None)
2326 test = not (remote_version and
2327 isinstance(remote_version, (list, tuple)) and
2328 len(remote_version) == 2)
2329 _ErrorIf(test, constants.CV_ENODERPC, node,
2330 "connection to node returned invalid data")
2334 test = local_version != remote_version[0]
2335 _ErrorIf(test, constants.CV_ENODEVERSION, node,
2336 "incompatible protocol versions: master %s,"
2337 " node %s", local_version, remote_version[0])
2341 # node seems compatible, we can actually try to look into its results
2343 # full package version
2344 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
2345 constants.CV_ENODEVERSION, node,
2346 "software version mismatch: master %s, node %s",
2347 constants.RELEASE_VERSION, remote_version[1],
2348 code=self.ETYPE_WARNING)
2350 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
2351 if ninfo.vm_capable and isinstance(hyp_result, dict):
2352 for hv_name, hv_result in hyp_result.iteritems():
2353 test = hv_result is not None
2354 _ErrorIf(test, constants.CV_ENODEHV, node,
2355 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
2357 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
2358 if ninfo.vm_capable and isinstance(hvp_result, list):
2359 for item, hv_name, hv_result in hvp_result:
2360 _ErrorIf(True, constants.CV_ENODEHV, node,
2361 "hypervisor %s parameter verify failure (source %s): %s",
2362 hv_name, item, hv_result)
2364 test = nresult.get(constants.NV_NODESETUP,
2365 ["Missing NODESETUP results"])
2366 _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
2371 def _VerifyNodeTime(self, ninfo, nresult,
2372 nvinfo_starttime, nvinfo_endtime):
2373 """Check the node time.
2375 @type ninfo: L{objects.Node}
2376 @param ninfo: the node to check
2377 @param nresult: the remote results for the node
2378 @param nvinfo_starttime: the start time of the RPC call
2379 @param nvinfo_endtime: the end time of the RPC call
2383 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2385 ntime = nresult.get(constants.NV_TIME, None)
2387 ntime_merged = utils.MergeTime(ntime)
2388 except (ValueError, TypeError):
2389 _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
2392 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
2393 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
2394 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
2395 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
2399 _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
2400 "Node time diverges by at least %s from master node time",
2403 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
2404 """Check the node LVM results.
2406 @type ninfo: L{objects.Node}
2407 @param ninfo: the node to check
2408 @param nresult: the remote results for the node
2409 @param vg_name: the configured VG name
2416 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2418 # checks vg existence and size > 20G
2419 vglist = nresult.get(constants.NV_VGLIST, None)
2421 _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
2423 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
2424 constants.MIN_VG_SIZE)
2425 _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
2428 pvlist = nresult.get(constants.NV_PVLIST, None)
2429 test = pvlist is None
2430 _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
2432 # check that ':' is not present in PV names, since it's a
2433 # special character for lvcreate (denotes the range of PEs to
2435 for _, pvname, owner_vg in pvlist:
2436 test = ":" in pvname
2437 _ErrorIf(test, constants.CV_ENODELVM, node,
2438 "Invalid character ':' in PV '%s' of VG '%s'",
2441 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
2442 """Check the node bridges.
2444 @type ninfo: L{objects.Node}
2445 @param ninfo: the node to check
2446 @param nresult: the remote results for the node
2447 @param bridges: the expected list of bridges
2454 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2456 missing = nresult.get(constants.NV_BRIDGES, None)
2457 test = not isinstance(missing, list)
2458 _ErrorIf(test, constants.CV_ENODENET, node,
2459 "did not return valid bridge information")
2461 _ErrorIf(bool(missing), constants.CV_ENODENET, node,
2462 "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
2464 def _VerifyNodeUserScripts(self, ninfo, nresult):
2465 """Check the results of user scripts presence and executability on the node
2467 @type ninfo: L{objects.Node}
2468 @param ninfo: the node to check
2469 @param nresult: the remote results for the node
2474 test = not constants.NV_USERSCRIPTS in nresult
2475 self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
2476 "did not return user scripts information")
2478 broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
2480 self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
2481 "user scripts not present or not executable: %s" %
2482 utils.CommaJoin(sorted(broken_scripts)))
2484 def _VerifyNodeNetwork(self, ninfo, nresult):
2485 """Check the node network connectivity results.
2487 @type ninfo: L{objects.Node}
2488 @param ninfo: the node to check
2489 @param nresult: the remote results for the node
2493 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2495 test = constants.NV_NODELIST not in nresult
2496 _ErrorIf(test, constants.CV_ENODESSH, node,
2497 "node hasn't returned node ssh connectivity data")
2499 if nresult[constants.NV_NODELIST]:
2500 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
2501 _ErrorIf(True, constants.CV_ENODESSH, node,
2502 "ssh communication with node '%s': %s", a_node, a_msg)
2504 test = constants.NV_NODENETTEST not in nresult
2505 _ErrorIf(test, constants.CV_ENODENET, node,
2506 "node hasn't returned node tcp connectivity data")
2508 if nresult[constants.NV_NODENETTEST]:
2509 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
2511 _ErrorIf(True, constants.CV_ENODENET, node,
2512 "tcp communication with node '%s': %s",
2513 anode, nresult[constants.NV_NODENETTEST][anode])
2515 test = constants.NV_MASTERIP not in nresult
2516 _ErrorIf(test, constants.CV_ENODENET, node,
2517 "node hasn't returned node master IP reachability data")
2519 if not nresult[constants.NV_MASTERIP]:
2520 if node == self.master_node:
2521 msg = "the master node cannot reach the master IP (not configured?)"
2523 msg = "cannot reach the master IP"
2524 _ErrorIf(True, constants.CV_ENODENET, node, msg)
2526 def _VerifyInstance(self, instance, instanceconfig, node_image,
2528 """Verify an instance.
2530 This function checks to see if the required block devices are
2531 available on the instance's node.
2534 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2535 node_current = instanceconfig.primary_node
2537 node_vol_should = {}
2538 instanceconfig.MapLVsByNode(node_vol_should)
2540 cluster = self.cfg.GetClusterInfo()
2541 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2543 err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
2544 _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
2545 code=self.ETYPE_WARNING)
2547 for node in node_vol_should:
2548 n_img = node_image[node]
2549 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2550 # ignore missing volumes on offline or broken nodes
2552 for volume in node_vol_should[node]:
2553 test = volume not in n_img.volumes
2554 _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2555 "volume %s missing on node %s", volume, node)
2557 if instanceconfig.admin_state == constants.ADMINST_UP:
2558 pri_img = node_image[node_current]
2559 test = instance not in pri_img.instances and not pri_img.offline
2560 _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
2561 "instance not running on its primary node %s",
2564 diskdata = [(nname, success, status, idx)
2565 for (nname, disks) in diskstatus.items()
2566 for idx, (success, status) in enumerate(disks)]
2568 for nname, success, bdev_status, idx in diskdata:
2569 # the 'ghost node' construction in Exec() ensures that we have a
2571 snode = node_image[nname]
2572 bad_snode = snode.ghost or snode.offline
2573 _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
2574 not success and not bad_snode,
2575 constants.CV_EINSTANCEFAULTYDISK, instance,
2576 "couldn't retrieve status for disk/%s on %s: %s",
2577 idx, nname, bdev_status)
2578 _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
2579 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
2580 constants.CV_EINSTANCEFAULTYDISK, instance,
2581 "disk/%s on %s is faulty", idx, nname)
2583 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2584 """Verify if there are any unknown volumes in the cluster.
2586 The .os, .swap and backup volumes are ignored. All other volumes are
2587 reported as unknown.
2589 @type reserved: L{ganeti.utils.FieldSet}
2590 @param reserved: a FieldSet of reserved volume names
2593 for node, n_img in node_image.items():
2594 if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2595 self.all_node_info[node].group != self.group_uuid):
2596 # skip non-healthy nodes
2598 for volume in n_img.volumes:
2599 test = ((node not in node_vol_should or
2600 volume not in node_vol_should[node]) and
2601 not reserved.Matches(volume))
2602 self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
2603 "volume %s is unknown", volume)
2605 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2606 """Verify N+1 Memory Resilience.
2608 Check that if one single node dies we can still start all the
2609 instances it was primary for.
2612 cluster_info = self.cfg.GetClusterInfo()
2613 for node, n_img in node_image.items():
2614 # This code checks that every node which is now listed as
2615 # secondary has enough memory to host all instances it is
2616 # supposed to should a single other node in the cluster fail.
2617 # FIXME: not ready for failover to an arbitrary node
2618 # FIXME: does not support file-backed instances
2619 # WARNING: we currently take into account down instances as well
2620 # as up ones, considering that even if they're down someone
2621 # might want to start them even in the event of a node failure.
2622 if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2623 # we're skipping nodes marked offline and nodes in other groups from
2624 # the N+1 warning, since most likely we don't have good memory
2625 # infromation from them; we already list instances living on such
2626 # nodes, and that's enough warning
2628 #TODO(dynmem): also consider ballooning out other instances
2629 for prinode, instances in n_img.sbp.items():
2631 for instance in instances:
2632 bep = cluster_info.FillBE(instance_cfg[instance])
2633 if bep[constants.BE_AUTO_BALANCE]:
2634 needed_mem += bep[constants.BE_MINMEM]
2635 test = n_img.mfree < needed_mem
2636 self._ErrorIf(test, constants.CV_ENODEN1, node,
2637 "not enough memory to accomodate instance failovers"
2638 " should node %s fail (%dMiB needed, %dMiB available)",
2639 prinode, needed_mem, n_img.mfree)
2642 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2643 (files_all, files_opt, files_mc, files_vm)):
2644 """Verifies file checksums collected from all nodes.
2646 @param errorif: Callback for reporting errors
2647 @param nodeinfo: List of L{objects.Node} objects
2648 @param master_node: Name of master node
2649 @param all_nvinfo: RPC results
2652 # Define functions determining which nodes to consider for a file
2655 (files_mc, lambda node: (node.master_candidate or
2656 node.name == master_node)),
2657 (files_vm, lambda node: node.vm_capable),
2660 # Build mapping from filename to list of nodes which should have the file
2662 for (files, fn) in files2nodefn:
2664 filenodes = nodeinfo
2666 filenodes = filter(fn, nodeinfo)
2667 nodefiles.update((filename,
2668 frozenset(map(operator.attrgetter("name"), filenodes)))
2669 for filename in files)
2671 assert set(nodefiles) == (files_all | files_mc | files_vm)
2673 fileinfo = dict((filename, {}) for filename in nodefiles)
2674 ignore_nodes = set()
2676 for node in nodeinfo:
2678 ignore_nodes.add(node.name)
2681 nresult = all_nvinfo[node.name]
2683 if nresult.fail_msg or not nresult.payload:
2686 fingerprints = nresult.payload.get(constants.NV_FILELIST, None)
2687 node_files = dict((vcluster.LocalizeVirtualPath(key), value)
2688 for (key, value) in fingerprints.items())
2691 test = not (node_files and isinstance(node_files, dict))
2692 errorif(test, constants.CV_ENODEFILECHECK, node.name,
2693 "Node did not return file checksum data")
2695 ignore_nodes.add(node.name)
2698 # Build per-checksum mapping from filename to nodes having it
2699 for (filename, checksum) in node_files.items():
2700 assert filename in nodefiles
2701 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2703 for (filename, checksums) in fileinfo.items():
2704 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2706 # Nodes having the file
2707 with_file = frozenset(node_name
2708 for nodes in fileinfo[filename].values()
2709 for node_name in nodes) - ignore_nodes
2711 expected_nodes = nodefiles[filename] - ignore_nodes
2713 # Nodes missing file
2714 missing_file = expected_nodes - with_file
2716 if filename in files_opt:
2718 errorif(missing_file and missing_file != expected_nodes,
2719 constants.CV_ECLUSTERFILECHECK, None,
2720 "File %s is optional, but it must exist on all or no"
2721 " nodes (not found on %s)",
2722 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2724 errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
2725 "File %s is missing from node(s) %s", filename,
2726 utils.CommaJoin(utils.NiceSort(missing_file)))
2728 # Warn if a node has a file it shouldn't
2729 unexpected = with_file - expected_nodes
2731 constants.CV_ECLUSTERFILECHECK, None,
2732 "File %s should not exist on node(s) %s",
2733 filename, utils.CommaJoin(utils.NiceSort(unexpected)))
2735 # See if there are multiple versions of the file
2736 test = len(checksums) > 1
2738 variants = ["variant %s on %s" %
2739 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2740 for (idx, (checksum, nodes)) in
2741 enumerate(sorted(checksums.items()))]
2745 errorif(test, constants.CV_ECLUSTERFILECHECK, None,
2746 "File %s found with %s different checksums (%s)",
2747 filename, len(checksums), "; ".join(variants))
2749 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2751 """Verifies and the node DRBD status.
2753 @type ninfo: L{objects.Node}
2754 @param ninfo: the node to check
2755 @param nresult: the remote results for the node
2756 @param instanceinfo: the dict of instances
2757 @param drbd_helper: the configured DRBD usermode helper
2758 @param drbd_map: the DRBD map as returned by
2759 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2763 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2766 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2767 test = (helper_result is None)
2768 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2769 "no drbd usermode helper returned")
2771 status, payload = helper_result
2773 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2774 "drbd usermode helper check unsuccessful: %s", payload)
2775 test = status and (payload != drbd_helper)
2776 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2777 "wrong drbd usermode helper: %s", payload)
2779 # compute the DRBD minors
2781 for minor, instance in drbd_map[node].items():
2782 test = instance not in instanceinfo
2783 _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2784 "ghost instance '%s' in temporary DRBD map", instance)
2785 # ghost instance should not be running, but otherwise we
2786 # don't give double warnings (both ghost instance and
2787 # unallocated minor in use)
2789 node_drbd[minor] = (instance, False)
2791 instance = instanceinfo[instance]
2792 node_drbd[minor] = (instance.name,
2793 instance.admin_state == constants.ADMINST_UP)
2795 # and now check them
2796 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2797 test = not isinstance(used_minors, (tuple, list))
2798 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2799 "cannot parse drbd status file: %s", str(used_minors))
2801 # we cannot check drbd status
2804 for minor, (iname, must_exist) in node_drbd.items():
2805 test = minor not in used_minors and must_exist
2806 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2807 "drbd minor %d of instance %s is not active", minor, iname)
2808 for minor in used_minors:
2809 test = minor not in node_drbd
2810 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2811 "unallocated drbd minor %d is in use", minor)
2813 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2814 """Builds the node OS structures.
2816 @type ninfo: L{objects.Node}
2817 @param ninfo: the node to check
2818 @param nresult: the remote results for the node
2819 @param nimg: the node image object
2823 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2825 remote_os = nresult.get(constants.NV_OSLIST, None)
2826 test = (not isinstance(remote_os, list) or
2827 not compat.all(isinstance(v, list) and len(v) == 7
2828 for v in remote_os))
2830 _ErrorIf(test, constants.CV_ENODEOS, node,
2831 "node hasn't returned valid OS data")
2840 for (name, os_path, status, diagnose,
2841 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2843 if name not in os_dict:
2846 # parameters is a list of lists instead of list of tuples due to
2847 # JSON lacking a real tuple type, fix it:
2848 parameters = [tuple(v) for v in parameters]
2849 os_dict[name].append((os_path, status, diagnose,
2850 set(variants), set(parameters), set(api_ver)))
2852 nimg.oslist = os_dict
2854 def _VerifyNodeOS(self, ninfo, nimg, base):
2855 """Verifies the node OS list.
2857 @type ninfo: L{objects.Node}
2858 @param ninfo: the node to check
2859 @param nimg: the node image object
2860 @param base: the 'template' node we match against (e.g. from the master)
2864 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2866 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2868 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2869 for os_name, os_data in nimg.oslist.items():
2870 assert os_data, "Empty OS status for OS %s?!" % os_name
2871 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2872 _ErrorIf(not f_status, constants.CV_ENODEOS, node,
2873 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2874 _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
2875 "OS '%s' has multiple entries (first one shadows the rest): %s",
2876 os_name, utils.CommaJoin([v[0] for v in os_data]))
2877 # comparisons with the 'base' image
2878 test = os_name not in base.oslist
2879 _ErrorIf(test, constants.CV_ENODEOS, node,
2880 "Extra OS %s not present on reference node (%s)",
2884 assert base.oslist[os_name], "Base node has empty OS status?"
2885 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2887 # base OS is invalid, skipping
2889 for kind, a, b in [("API version", f_api, b_api),
2890 ("variants list", f_var, b_var),
2891 ("parameters", beautify_params(f_param),
2892 beautify_params(b_param))]:
2893 _ErrorIf(a != b, constants.CV_ENODEOS, node,
2894 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2895 kind, os_name, base.name,
2896 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2898 # check any missing OSes
2899 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2900 _ErrorIf(missing, constants.CV_ENODEOS, node,
2901 "OSes present on reference node %s but missing on this node: %s",
2902 base.name, utils.CommaJoin(missing))
2904 def _VerifyFileStoragePaths(self, ninfo, nresult, is_master):
2905 """Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
2907 @type ninfo: L{objects.Node}
2908 @param ninfo: the node to check
2909 @param nresult: the remote results for the node
2910 @type is_master: bool
2911 @param is_master: Whether node is the master node
2917 (constants.ENABLE_FILE_STORAGE or
2918 constants.ENABLE_SHARED_FILE_STORAGE)):
2920 fspaths = nresult[constants.NV_FILE_STORAGE_PATHS]
2922 # This should never happen
2923 self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, node,
2924 "Node did not return forbidden file storage paths")
2926 self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, node,
2927 "Found forbidden file storage paths: %s",
2928 utils.CommaJoin(fspaths))
2930 self._ErrorIf(constants.NV_FILE_STORAGE_PATHS in nresult,
2931 constants.CV_ENODEFILESTORAGEPATHS, node,
2932 "Node should not have returned forbidden file storage"
2935 def _VerifyOob(self, ninfo, nresult):
2936 """Verifies out of band functionality of a node.
2938 @type ninfo: L{objects.Node}
2939 @param ninfo: the node to check
2940 @param nresult: the remote results for the node
2944 # We just have to verify the paths on master and/or master candidates
2945 # as the oob helper is invoked on the master
2946 if ((ninfo.master_candidate or ninfo.master_capable) and
2947 constants.NV_OOB_PATHS in nresult):
2948 for path_result in nresult[constants.NV_OOB_PATHS]:
2949 self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
2951 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2952 """Verifies and updates the node volume data.
2954 This function will update a L{NodeImage}'s internal structures
2955 with data from the remote call.
2957 @type ninfo: L{objects.Node}
2958 @param ninfo: the node to check
2959 @param nresult: the remote results for the node
2960 @param nimg: the node image object
2961 @param vg_name: the configured VG name
2965 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2967 nimg.lvm_fail = True
2968 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2971 elif isinstance(lvdata, basestring):
2972 _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
2973 utils.SafeEncode(lvdata))
2974 elif not isinstance(lvdata, dict):
2975 _ErrorIf(True, constants.CV_ENODELVM, node,
2976 "rpc call to node failed (lvlist)")
2978 nimg.volumes = lvdata
2979 nimg.lvm_fail = False
2981 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2982 """Verifies and updates the node instance list.
2984 If the listing was successful, then updates this node's instance
2985 list. Otherwise, it marks the RPC call as failed for the instance
2988 @type ninfo: L{objects.Node}
2989 @param ninfo: the node to check
2990 @param nresult: the remote results for the node
2991 @param nimg: the node image object
2994 idata = nresult.get(constants.NV_INSTANCELIST, None)
2995 test = not isinstance(idata, list)
2996 self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2997 "rpc call to node failed (instancelist): %s",
2998 utils.SafeEncode(str(idata)))
3000 nimg.hyp_fail = True
3002 nimg.instances = idata
3004 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
3005 """Verifies and computes a node information map
3007 @type ninfo: L{objects.Node}
3008 @param ninfo: the node to check
3009 @param nresult: the remote results for the node
3010 @param nimg: the node image object
3011 @param vg_name: the configured VG name
3015 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3017 # try to read free memory (from the hypervisor)
3018 hv_info = nresult.get(constants.NV_HVINFO, None)
3019 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
3020 _ErrorIf(test, constants.CV_ENODEHV, node,
3021 "rpc call to node failed (hvinfo)")
3024 nimg.mfree = int(hv_info["memory_free"])
3025 except (ValueError, TypeError):
3026 _ErrorIf(True, constants.CV_ENODERPC, node,
3027 "node returned invalid nodeinfo, check hypervisor")
3029 # FIXME: devise a free space model for file based instances as well
3030 if vg_name is not None:
3031 test = (constants.NV_VGLIST not in nresult or
3032 vg_name not in nresult[constants.NV_VGLIST])
3033 _ErrorIf(test, constants.CV_ENODELVM, node,
3034 "node didn't return data for the volume group '%s'"
3035 " - it is either missing or broken", vg_name)
3038 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
3039 except (ValueError, TypeError):
3040 _ErrorIf(True, constants.CV_ENODERPC, node,
3041 "node returned invalid LVM info, check LVM status")
3043 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
3044 """Gets per-disk status information for all instances.
3046 @type nodelist: list of strings
3047 @param nodelist: Node names
3048 @type node_image: dict of (name, L{objects.Node})
3049 @param node_image: Node objects
3050 @type instanceinfo: dict of (name, L{objects.Instance})
3051 @param instanceinfo: Instance objects
3052 @rtype: {instance: {node: [(succes, payload)]}}
3053 @return: a dictionary of per-instance dictionaries with nodes as
3054 keys and disk information as values; the disk information is a
3055 list of tuples (success, payload)
3058 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3061 node_disks_devonly = {}
3062 diskless_instances = set()
3063 diskless = constants.DT_DISKLESS
3065 for nname in nodelist:
3066 node_instances = list(itertools.chain(node_image[nname].pinst,
3067 node_image[nname].sinst))
3068 diskless_instances.update(inst for inst in node_instances
3069 if instanceinfo[inst].disk_template == diskless)
3070 disks = [(inst, disk)
3071 for inst in node_instances
3072 for disk in instanceinfo[inst].disks]
3075 # No need to collect data
3078 node_disks[nname] = disks
3080 # _AnnotateDiskParams makes already copies of the disks
3082 for (inst, dev) in disks:
3083 (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
3084 self.cfg.SetDiskID(anno_disk, nname)
3085 devonly.append(anno_disk)
3087 node_disks_devonly[nname] = devonly
3089 assert len(node_disks) == len(node_disks_devonly)
3091 # Collect data from all nodes with disks
3092 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
3095 assert len(result) == len(node_disks)
3099 for (nname, nres) in result.items():
3100 disks = node_disks[nname]
3103 # No data from this node
3104 data = len(disks) * [(False, "node offline")]
3107 _ErrorIf(msg, constants.CV_ENODERPC, nname,
3108 "while getting disk information: %s", msg)
3110 # No data from this node
3111 data = len(disks) * [(False, msg)]
3114 for idx, i in enumerate(nres.payload):
3115 if isinstance(i, (tuple, list)) and len(i) == 2:
3118 logging.warning("Invalid result from node %s, entry %d: %s",
3120 data.append((False, "Invalid result from the remote node"))
3122 for ((inst, _), status) in zip(disks, data):
3123 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
3125 # Add empty entries for diskless instances.
3126 for inst in diskless_instances:
3127 assert inst not in instdisk
3130 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
3131 len(nnames) <= len(instanceinfo[inst].all_nodes) and
3132 compat.all(isinstance(s, (tuple, list)) and
3133 len(s) == 2 for s in statuses)
3134 for inst, nnames in instdisk.items()
3135 for nname, statuses in nnames.items())
3136 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
3141 def _SshNodeSelector(group_uuid, all_nodes):
3142 """Create endless iterators for all potential SSH check hosts.
3145 nodes = [node for node in all_nodes
3146 if (node.group != group_uuid and
3148 keyfunc = operator.attrgetter("group")
3150 return map(itertools.cycle,
3151 [sorted(map(operator.attrgetter("name"), names))
3152 for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
3156 def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
3157 """Choose which nodes should talk to which other nodes.
3159 We will make nodes contact all nodes in their group, and one node from
3162 @warning: This algorithm has a known issue if one node group is much
3163 smaller than others (e.g. just one node). In such a case all other
3164 nodes will talk to the single node.
3167 online_nodes = sorted(node.name for node in group_nodes if not node.offline)
3168 sel = cls._SshNodeSelector(group_uuid, all_nodes)
3170 return (online_nodes,
3171 dict((name, sorted([i.next() for i in sel]))
3172 for name in online_nodes))
3174 def BuildHooksEnv(self):
3177 Cluster-Verify hooks just ran in the post phase and their failure makes
3178 the output be logged in the verify output and the verification to fail.
3182 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()),
3185 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
3186 for node in self.my_node_info.values())
3190 def BuildHooksNodes(self):
3191 """Build hooks nodes.
3194 return ([], self.my_node_names)
3196 def Exec(self, feedback_fn):
3197 """Verify integrity of the node group, performing various test on nodes.
3200 # This method has too many local variables. pylint: disable=R0914
3201 feedback_fn("* Verifying group '%s'" % self.group_info.name)
3203 if not self.my_node_names:
3205 feedback_fn("* Empty node group, skipping verification")
3209 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3210 verbose = self.op.verbose
3211 self._feedback_fn = feedback_fn
3213 vg_name = self.cfg.GetVGName()
3214 drbd_helper = self.cfg.GetDRBDHelper()
3215 cluster = self.cfg.GetClusterInfo()
3216 groupinfo = self.cfg.GetAllNodeGroupsInfo()
3217 hypervisors = cluster.enabled_hypervisors
3218 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
3220 i_non_redundant = [] # Non redundant instances
3221 i_non_a_balanced = [] # Non auto-balanced instances
3222 i_offline = 0 # Count of offline instances
3223 n_offline = 0 # Count of offline nodes
3224 n_drained = 0 # Count of nodes being drained
3225 node_vol_should = {}
3227 # FIXME: verify OS list
3230 filemap = _ComputeAncillaryFiles(cluster, False)
3232 # do local checksums
3233 master_node = self.master_node = self.cfg.GetMasterNode()
3234 master_ip = self.cfg.GetMasterIP()
3236 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
3239 if self.cfg.GetUseExternalMipScript():
3240 user_scripts.append(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
3242 node_verify_param = {
3243 constants.NV_FILELIST:
3244 map(vcluster.MakeVirtualPath,
3245 utils.UniqueSequence(filename
3246 for files in filemap
3247 for filename in files)),
3248 constants.NV_NODELIST:
3249 self._SelectSshCheckNodes(node_data_list, self.group_uuid,
3250 self.all_node_info.values()),
3251 constants.NV_HYPERVISOR: hypervisors,
3252 constants.NV_HVPARAMS:
3253 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
3254 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
3255 for node in node_data_list
3256 if not node.offline],
3257 constants.NV_INSTANCELIST: hypervisors,
3258 constants.NV_VERSION: None,
3259 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
3260 constants.NV_NODESETUP: None,
3261 constants.NV_TIME: None,
3262 constants.NV_MASTERIP: (master_node, master_ip),
3263 constants.NV_OSLIST: None,
3264 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
3265 constants.NV_USERSCRIPTS: user_scripts,
3268 if vg_name is not None:
3269 node_verify_param[constants.NV_VGLIST] = None
3270 node_verify_param[constants.NV_LVLIST] = vg_name
3271 node_verify_param[constants.NV_PVLIST] = [vg_name]
3274 node_verify_param[constants.NV_DRBDLIST] = None
3275 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
3277 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3278 # Load file storage paths only from master node
3279 node_verify_param[constants.NV_FILE_STORAGE_PATHS] = master_node
3282 # FIXME: this needs to be changed per node-group, not cluster-wide
3284 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
3285 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3286 bridges.add(default_nicpp[constants.NIC_LINK])
3287 for instance in self.my_inst_info.values():
3288 for nic in instance.nics:
3289 full_nic = cluster.SimpleFillNIC(nic.nicparams)
3290 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3291 bridges.add(full_nic[constants.NIC_LINK])
3294 node_verify_param[constants.NV_BRIDGES] = list(bridges)
3296 # Build our expected cluster state
3297 node_image = dict((node.name, self.NodeImage(offline=node.offline,
3299 vm_capable=node.vm_capable))
3300 for node in node_data_list)
3304 for node in self.all_node_info.values():
3305 path = _SupportsOob(self.cfg, node)
3306 if path and path not in oob_paths:
3307 oob_paths.append(path)
3310 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
3312 for instance in self.my_inst_names:
3313 inst_config = self.my_inst_info[instance]
3314 if inst_config.admin_state == constants.ADMINST_OFFLINE:
3317 for nname in inst_config.all_nodes:
3318 if nname not in node_image:
3319 gnode = self.NodeImage(name=nname)
3320 gnode.ghost = (nname not in self.all_node_info)
3321 node_image[nname] = gnode
3323 inst_config.MapLVsByNode(node_vol_should)
3325 pnode = inst_config.primary_node
3326 node_image[pnode].pinst.append(instance)
3328 for snode in inst_config.secondary_nodes:
3329 nimg = node_image[snode]
3330 nimg.sinst.append(instance)
3331 if pnode not in nimg.sbp:
3332 nimg.sbp[pnode] = []
3333 nimg.sbp[pnode].append(instance)
3335 # At this point, we have the in-memory data structures complete,
3336 # except for the runtime information, which we'll gather next
3338 # Due to the way our RPC system works, exact response times cannot be
3339 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
3340 # time before and after executing the request, we can at least have a time
3342 nvinfo_starttime = time.time()
3343 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
3345 self.cfg.GetClusterName())
3346 nvinfo_endtime = time.time()
3348 if self.extra_lv_nodes and vg_name is not None:
3350 self.rpc.call_node_verify(self.extra_lv_nodes,
3351 {constants.NV_LVLIST: vg_name},
3352 self.cfg.GetClusterName())
3354 extra_lv_nvinfo = {}
3356 all_drbd_map = self.cfg.ComputeDRBDMap()
3358 feedback_fn("* Gathering disk information (%s nodes)" %
3359 len(self.my_node_names))
3360 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
3363 feedback_fn("* Verifying configuration file consistency")
3365 # If not all nodes are being checked, we need to make sure the master node
3366 # and a non-checked vm_capable node are in the list.
3367 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
3369 vf_nvinfo = all_nvinfo.copy()
3370 vf_node_info = list(self.my_node_info.values())
3371 additional_nodes = []
3372 if master_node not in self.my_node_info:
3373 additional_nodes.append(master_node)
3374 vf_node_info.append(self.all_node_info[master_node])
3375 # Add the first vm_capable node we find which is not included,
3376 # excluding the master node (which we already have)
3377 for node in absent_nodes:
3378 nodeinfo = self.all_node_info[node]
3379 if (nodeinfo.vm_capable and not nodeinfo.offline and
3380 node != master_node):
3381 additional_nodes.append(node)
3382 vf_node_info.append(self.all_node_info[node])
3384 key = constants.NV_FILELIST
3385 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
3386 {key: node_verify_param[key]},
3387 self.cfg.GetClusterName()))
3389 vf_nvinfo = all_nvinfo
3390 vf_node_info = self.my_node_info.values()
3392 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
3394 feedback_fn("* Verifying node status")
3398 for node_i in node_data_list:
3400 nimg = node_image[node]
3404 feedback_fn("* Skipping offline node %s" % (node,))
3408 if node == master_node:
3410 elif node_i.master_candidate:
3411 ntype = "master candidate"
3412 elif node_i.drained:
3418 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
3420 msg = all_nvinfo[node].fail_msg
3421 _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
3424 nimg.rpc_fail = True
3427 nresult = all_nvinfo[node].payload
3429 nimg.call_ok = self._VerifyNode(node_i, nresult)
3430 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
3431 self._VerifyNodeNetwork(node_i, nresult)
3432 self._VerifyNodeUserScripts(node_i, nresult)
3433 self._VerifyOob(node_i, nresult)
3434 self._VerifyFileStoragePaths(node_i, nresult,
3435 node == master_node)
3438 self._VerifyNodeLVM(node_i, nresult, vg_name)
3439 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
3442 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
3443 self._UpdateNodeInstances(node_i, nresult, nimg)
3444 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
3445 self._UpdateNodeOS(node_i, nresult, nimg)
3447 if not nimg.os_fail:
3448 if refos_img is None:
3450 self._VerifyNodeOS(node_i, nimg, refos_img)
3451 self._VerifyNodeBridges(node_i, nresult, bridges)
3453 # Check whether all running instancies are primary for the node. (This
3454 # can no longer be done from _VerifyInstance below, since some of the
3455 # wrong instances could be from other node groups.)
3456 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
3458 for inst in non_primary_inst:
3459 test = inst in self.all_inst_info
3460 _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
3461 "instance should not run on node %s", node_i.name)
3462 _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
3463 "node is running unknown instance %s", inst)
3465 for node, result in extra_lv_nvinfo.items():
3466 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
3467 node_image[node], vg_name)
3469 feedback_fn("* Verifying instance status")
3470 for instance in self.my_inst_names:
3472 feedback_fn("* Verifying instance %s" % instance)
3473 inst_config = self.my_inst_info[instance]
3474 self._VerifyInstance(instance, inst_config, node_image,
3476 inst_nodes_offline = []
3478 pnode = inst_config.primary_node
3479 pnode_img = node_image[pnode]
3480 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
3481 constants.CV_ENODERPC, pnode, "instance %s, connection to"
3482 " primary node failed", instance)
3484 _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
3486 constants.CV_EINSTANCEBADNODE, instance,
3487 "instance is marked as running and lives on offline node %s",
3488 inst_config.primary_node)
3490 # If the instance is non-redundant we cannot survive losing its primary
3491 # node, so we are not N+1 compliant.
3492 if inst_config.disk_template not in constants.DTS_MIRRORED:
3493 i_non_redundant.append(instance)
3495 _ErrorIf(len(inst_config.secondary_nodes) > 1,
3496 constants.CV_EINSTANCELAYOUT,
3497 instance, "instance has multiple secondary nodes: %s",
3498 utils.CommaJoin(inst_config.secondary_nodes),
3499 code=self.ETYPE_WARNING)
3501 if inst_config.disk_template in constants.DTS_INT_MIRROR:
3502 pnode = inst_config.primary_node
3503 instance_nodes = utils.NiceSort(inst_config.all_nodes)
3504 instance_groups = {}
3506 for node in instance_nodes:
3507 instance_groups.setdefault(self.all_node_info[node].group,
3511 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
3512 # Sort so that we always list the primary node first.
3513 for group, nodes in sorted(instance_groups.items(),
3514 key=lambda (_, nodes): pnode in nodes,
3517 self._ErrorIf(len(instance_groups) > 1,
3518 constants.CV_EINSTANCESPLITGROUPS,
3519 instance, "instance has primary and secondary nodes in"
3520 " different groups: %s", utils.CommaJoin(pretty_list),
3521 code=self.ETYPE_WARNING)
3523 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
3524 i_non_a_balanced.append(instance)
3526 for snode in inst_config.secondary_nodes:
3527 s_img = node_image[snode]
3528 _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
3529 snode, "instance %s, connection to secondary node failed",
3533 inst_nodes_offline.append(snode)
3535 # warn that the instance lives on offline nodes
3536 _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
3537 "instance has offline secondary node(s) %s",
3538 utils.CommaJoin(inst_nodes_offline))
3539 # ... or ghost/non-vm_capable nodes
3540 for node in inst_config.all_nodes:
3541 _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
3542 instance, "instance lives on ghost node %s", node)
3543 _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
3544 instance, "instance lives on non-vm_capable node %s", node)
3546 feedback_fn("* Verifying orphan volumes")
3547 reserved = utils.FieldSet(*cluster.reserved_lvs)
3549 # We will get spurious "unknown volume" warnings if any node of this group
3550 # is secondary for an instance whose primary is in another group. To avoid
3551 # them, we find these instances and add their volumes to node_vol_should.
3552 for inst in self.all_inst_info.values():
3553 for secondary in inst.secondary_nodes:
3554 if (secondary in self.my_node_info
3555 and inst.name not in self.my_inst_info):
3556 inst.MapLVsByNode(node_vol_should)
3559 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
3561 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
3562 feedback_fn("* Verifying N+1 Memory redundancy")
3563 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
3565 feedback_fn("* Other Notes")
3567 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
3568 % len(i_non_redundant))
3570 if i_non_a_balanced:
3571 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
3572 % len(i_non_a_balanced))
3575 feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
3578 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
3581 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
3585 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
3586 """Analyze the post-hooks' result
3588 This method analyses the hook result, handles it, and sends some
3589 nicely-formatted feedback back to the user.
3591 @param phase: one of L{constants.HOOKS_PHASE_POST} or
3592 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
3593 @param hooks_results: the results of the multi-node hooks rpc call
3594 @param feedback_fn: function used send feedback back to the caller
3595 @param lu_result: previous Exec result
3596 @return: the new Exec result, based on the previous result
3600 # We only really run POST phase hooks, only for non-empty groups,
3601 # and are only interested in their results
3602 if not self.my_node_names:
3605 elif phase == constants.HOOKS_PHASE_POST:
3606 # Used to change hooks' output to proper indentation
3607 feedback_fn("* Hooks Results")
3608 assert hooks_results, "invalid result from hooks"
3610 for node_name in hooks_results:
3611 res = hooks_results[node_name]
3613 test = msg and not res.offline
3614 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3615 "Communication failure in hooks execution: %s", msg)
3616 if res.offline or msg:
3617 # No need to investigate payload if node is offline or gave
3620 for script, hkr, output in res.payload:
3621 test = hkr == constants.HKR_FAIL
3622 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3623 "Script %s failed, output:", script)
3625 output = self._HOOKS_INDENT_RE.sub(" ", output)
3626 feedback_fn("%s" % output)
3632 class LUClusterVerifyDisks(NoHooksLU):
3633 """Verifies the cluster disks status.
3638 def ExpandNames(self):
3639 self.share_locks = _ShareAll()
3640 self.needed_locks = {
3641 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3644 def Exec(self, feedback_fn):
3645 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3647 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3648 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3649 for group in group_names])
3652 class LUGroupVerifyDisks(NoHooksLU):
3653 """Verifies the status of all disks in a node group.
3658 def ExpandNames(self):
3659 # Raises errors.OpPrereqError on its own if group can't be found
3660 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3662 self.share_locks = _ShareAll()
3663 self.needed_locks = {
3664 locking.LEVEL_INSTANCE: [],
3665 locking.LEVEL_NODEGROUP: [],
3666 locking.LEVEL_NODE: [],
3668 # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
3669 # starts one instance of this opcode for every group, which means all
3670 # nodes will be locked for a short amount of time, so it's better to
3671 # acquire the node allocation lock as well.
3672 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3675 def DeclareLocks(self, level):
3676 if level == locking.LEVEL_INSTANCE:
3677 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3679 # Lock instances optimistically, needs verification once node and group
3680 # locks have been acquired
3681 self.needed_locks[locking.LEVEL_INSTANCE] = \
3682 self.cfg.GetNodeGroupInstances(self.group_uuid)
3684 elif level == locking.LEVEL_NODEGROUP:
3685 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3687 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3688 set([self.group_uuid] +
3689 # Lock all groups used by instances optimistically; this requires
3690 # going via the node before it's locked, requiring verification
3693 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3694 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3696 elif level == locking.LEVEL_NODE:
3697 # This will only lock the nodes in the group to be verified which contain
3699 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3700 self._LockInstancesNodes()
3702 # Lock all nodes in group to be verified
3703 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3704 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3705 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3707 def CheckPrereq(self):
3708 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3709 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3710 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3712 assert self.group_uuid in owned_groups
3714 # Check if locked instances are still correct
3715 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3717 # Get instance information
3718 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3720 # Check if node groups for locked instances are still correct
3721 _CheckInstancesNodeGroups(self.cfg, self.instances,
3722 owned_groups, owned_nodes, self.group_uuid)
3724 def Exec(self, feedback_fn):
3725 """Verify integrity of cluster disks.
3727 @rtype: tuple of three items
3728 @return: a tuple of (dict of node-to-node_error, list of instances
3729 which need activate-disks, dict of instance: (node, volume) for
3734 res_instances = set()
3737 nv_dict = _MapInstanceDisksToNodes(
3738 [inst for inst in self.instances.values()
3739 if inst.admin_state == constants.ADMINST_UP])
3742 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3743 set(self.cfg.GetVmCapableNodeList()))
3745 node_lvs = self.rpc.call_lv_list(nodes, [])
3747 for (node, node_res) in node_lvs.items():
3748 if node_res.offline:
3751 msg = node_res.fail_msg
3753 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3754 res_nodes[node] = msg
3757 for lv_name, (_, _, lv_online) in node_res.payload.items():
3758 inst = nv_dict.pop((node, lv_name), None)
3759 if not (lv_online or inst is None):
3760 res_instances.add(inst)
3762 # any leftover items in nv_dict are missing LVs, let's arrange the data
3764 for key, inst in nv_dict.iteritems():
3765 res_missing.setdefault(inst, []).append(list(key))
3767 return (res_nodes, list(res_instances), res_missing)
3770 class LUClusterRepairDiskSizes(NoHooksLU):
3771 """Verifies the cluster disks sizes.
3776 def ExpandNames(self):
3777 if self.op.instances:
3778 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3779 # Not getting the node allocation lock as only a specific set of
3780 # instances (and their nodes) is going to be acquired
3781 self.needed_locks = {
3782 locking.LEVEL_NODE_RES: [],
3783 locking.LEVEL_INSTANCE: self.wanted_names,
3785 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
3787 self.wanted_names = None
3788 self.needed_locks = {
3789 locking.LEVEL_NODE_RES: locking.ALL_SET,
3790 locking.LEVEL_INSTANCE: locking.ALL_SET,
3792 # This opcode is acquires the node locks for all instances
3793 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3796 self.share_locks = {
3797 locking.LEVEL_NODE_RES: 1,
3798 locking.LEVEL_INSTANCE: 0,
3799 locking.LEVEL_NODE_ALLOC: 1,
3802 def DeclareLocks(self, level):
3803 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
3804 self._LockInstancesNodes(primary_only=True, level=level)
3806 def CheckPrereq(self):
3807 """Check prerequisites.
3809 This only checks the optional instance list against the existing names.
3812 if self.wanted_names is None:
3813 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3815 self.wanted_instances = \
3816 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3818 def _EnsureChildSizes(self, disk):
3819 """Ensure children of the disk have the needed disk size.
3821 This is valid mainly for DRBD8 and fixes an issue where the
3822 children have smaller disk size.
3824 @param disk: an L{ganeti.objects.Disk} object
3827 if disk.dev_type == constants.LD_DRBD8:
3828 assert disk.children, "Empty children for DRBD8?"
3829 fchild = disk.children[0]
3830 mismatch = fchild.size < disk.size
3832 self.LogInfo("Child disk has size %d, parent %d, fixing",
3833 fchild.size, disk.size)
3834 fchild.size = disk.size
3836 # and we recurse on this child only, not on the metadev
3837 return self._EnsureChildSizes(fchild) or mismatch
3841 def Exec(self, feedback_fn):
3842 """Verify the size of cluster disks.
3845 # TODO: check child disks too
3846 # TODO: check differences in size between primary/secondary nodes
3848 for instance in self.wanted_instances:
3849 pnode = instance.primary_node
3850 if pnode not in per_node_disks:
3851 per_node_disks[pnode] = []
3852 for idx, disk in enumerate(instance.disks):
3853 per_node_disks[pnode].append((instance, idx, disk))
3855 assert not (frozenset(per_node_disks.keys()) -
3856 self.owned_locks(locking.LEVEL_NODE_RES)), \
3857 "Not owning correct locks"
3858 assert not self.owned_locks(locking.LEVEL_NODE)
3861 for node, dskl in per_node_disks.items():
3862 newl = [v[2].Copy() for v in dskl]
3864 self.cfg.SetDiskID(dsk, node)
3865 result = self.rpc.call_blockdev_getsize(node, newl)
3867 self.LogWarning("Failure in blockdev_getsize call to node"
3868 " %s, ignoring", node)
3870 if len(result.payload) != len(dskl):
3871 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3872 " result.payload=%s", node, len(dskl), result.payload)
3873 self.LogWarning("Invalid result from node %s, ignoring node results",
3876 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3878 self.LogWarning("Disk %d of instance %s did not return size"
3879 " information, ignoring", idx, instance.name)
3881 if not isinstance(size, (int, long)):
3882 self.LogWarning("Disk %d of instance %s did not return valid"
3883 " size information, ignoring", idx, instance.name)
3886 if size != disk.size:
3887 self.LogInfo("Disk %d of instance %s has mismatched size,"
3888 " correcting: recorded %d, actual %d", idx,
3889 instance.name, disk.size, size)
3891 self.cfg.Update(instance, feedback_fn)
3892 changed.append((instance.name, idx, size))
3893 if self._EnsureChildSizes(disk):
3894 self.cfg.Update(instance, feedback_fn)
3895 changed.append((instance.name, idx, disk.size))
3899 class LUClusterRename(LogicalUnit):
3900 """Rename the cluster.
3903 HPATH = "cluster-rename"
3904 HTYPE = constants.HTYPE_CLUSTER
3906 def BuildHooksEnv(self):
3911 "OP_TARGET": self.cfg.GetClusterName(),
3912 "NEW_NAME": self.op.name,
3915 def BuildHooksNodes(self):
3916 """Build hooks nodes.
3919 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3921 def CheckPrereq(self):
3922 """Verify that the passed name is a valid one.
3925 hostname = netutils.GetHostname(name=self.op.name,
3926 family=self.cfg.GetPrimaryIPFamily())
3928 new_name = hostname.name
3929 self.ip = new_ip = hostname.ip
3930 old_name = self.cfg.GetClusterName()
3931 old_ip = self.cfg.GetMasterIP()
3932 if new_name == old_name and new_ip == old_ip:
3933 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3934 " cluster has changed",
3936 if new_ip != old_ip:
3937 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3938 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3939 " reachable on the network" %
3940 new_ip, errors.ECODE_NOTUNIQUE)
3942 self.op.name = new_name
3944 def Exec(self, feedback_fn):
3945 """Rename the cluster.
3948 clustername = self.op.name
3951 # shutdown the master IP
3952 master_params = self.cfg.GetMasterNetworkParameters()
3953 ems = self.cfg.GetUseExternalMipScript()
3954 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
3956 result.Raise("Could not disable the master role")
3959 cluster = self.cfg.GetClusterInfo()
3960 cluster.cluster_name = clustername
3961 cluster.master_ip = new_ip
3962 self.cfg.Update(cluster, feedback_fn)
3964 # update the known hosts file
3965 ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
3966 node_list = self.cfg.GetOnlineNodeList()
3968 node_list.remove(master_params.name)
3971 _UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
3973 master_params.ip = new_ip
3974 result = self.rpc.call_node_activate_master_ip(master_params.name,
3976 msg = result.fail_msg
3978 self.LogWarning("Could not re-enable the master role on"
3979 " the master, please restart manually: %s", msg)
3984 def _ValidateNetmask(cfg, netmask):
3985 """Checks if a netmask is valid.
3987 @type cfg: L{config.ConfigWriter}
3988 @param cfg: The cluster configuration
3990 @param netmask: the netmask to be verified
3991 @raise errors.OpPrereqError: if the validation fails
3994 ip_family = cfg.GetPrimaryIPFamily()
3996 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
3997 except errors.ProgrammerError:
3998 raise errors.OpPrereqError("Invalid primary ip family: %s." %
3999 ip_family, errors.ECODE_INVAL)
4000 if not ipcls.ValidateNetmask(netmask):
4001 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
4002 (netmask), errors.ECODE_INVAL)
4005 class LUClusterSetParams(LogicalUnit):
4006 """Change the parameters of the cluster.
4009 HPATH = "cluster-modify"
4010 HTYPE = constants.HTYPE_CLUSTER
4013 def CheckArguments(self):
4017 if self.op.uid_pool:
4018 uidpool.CheckUidPool(self.op.uid_pool)
4020 if self.op.add_uids:
4021 uidpool.CheckUidPool(self.op.add_uids)
4023 if self.op.remove_uids:
4024 uidpool.CheckUidPool(self.op.remove_uids)
4026 if self.op.master_netmask is not None:
4027 _ValidateNetmask(self.cfg, self.op.master_netmask)
4029 if self.op.diskparams:
4030 for dt_params in self.op.diskparams.values():
4031 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
4033 utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
4034 except errors.OpPrereqError, err:
4035 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
4038 def ExpandNames(self):
4039 # FIXME: in the future maybe other cluster params won't require checking on
4040 # all nodes to be modified.
4041 # FIXME: This opcode changes cluster-wide settings. Is acquiring all
4042 # resource locks the right thing, shouldn't it be the BGL instead?
4043 self.needed_locks = {
4044 locking.LEVEL_NODE: locking.ALL_SET,
4045 locking.LEVEL_INSTANCE: locking.ALL_SET,
4046 locking.LEVEL_NODEGROUP: locking.ALL_SET,
4047 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
4049 self.share_locks = _ShareAll()
4051 def BuildHooksEnv(self):
4056 "OP_TARGET": self.cfg.GetClusterName(),
4057 "NEW_VG_NAME": self.op.vg_name,
4060 def BuildHooksNodes(self):
4061 """Build hooks nodes.
4064 mn = self.cfg.GetMasterNode()
4067 def CheckPrereq(self):
4068 """Check prerequisites.
4070 This checks whether the given params don't conflict and
4071 if the given volume group is valid.
4074 if self.op.vg_name is not None and not self.op.vg_name:
4075 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
4076 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
4077 " instances exist", errors.ECODE_INVAL)
4079 if self.op.drbd_helper is not None and not self.op.drbd_helper:
4080 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
4081 raise errors.OpPrereqError("Cannot disable drbd helper while"
4082 " drbd-based instances exist",
4085 node_list = self.owned_locks(locking.LEVEL_NODE)
4087 # if vg_name not None, checks given volume group on all nodes
4089 vglist = self.rpc.call_vg_list(node_list)
4090 for node in node_list:
4091 msg = vglist[node].fail_msg
4093 # ignoring down node
4094 self.LogWarning("Error while gathering data on node %s"
4095 " (ignoring node): %s", node, msg)
4097 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
4099 constants.MIN_VG_SIZE)
4101 raise errors.OpPrereqError("Error on node '%s': %s" %
4102 (node, vgstatus), errors.ECODE_ENVIRON)
4104 if self.op.drbd_helper:
4105 # checks given drbd helper on all nodes
4106 helpers = self.rpc.call_drbd_helper(node_list)
4107 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
4109 self.LogInfo("Not checking drbd helper on offline node %s", node)
4111 msg = helpers[node].fail_msg
4113 raise errors.OpPrereqError("Error checking drbd helper on node"
4114 " '%s': %s" % (node, msg),
4115 errors.ECODE_ENVIRON)
4116 node_helper = helpers[node].payload
4117 if node_helper != self.op.drbd_helper:
4118 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
4119 (node, node_helper), errors.ECODE_ENVIRON)
4121 self.cluster = cluster = self.cfg.GetClusterInfo()
4122 # validate params changes
4123 if self.op.beparams:
4124 objects.UpgradeBeParams(self.op.beparams)
4125 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4126 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
4128 if self.op.ndparams:
4129 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4130 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
4132 # TODO: we need a more general way to handle resetting
4133 # cluster-level parameters to default values
4134 if self.new_ndparams["oob_program"] == "":
4135 self.new_ndparams["oob_program"] = \
4136 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
4138 if self.op.hv_state:
4139 new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
4140 self.cluster.hv_state_static)
4141 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
4142 for hv, values in new_hv_state.items())
4144 if self.op.disk_state:
4145 new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
4146 self.cluster.disk_state_static)
4147 self.new_disk_state = \
4148 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
4149 for name, values in svalues.items()))
4150 for storage, svalues in new_disk_state.items())
4153 self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
4156 all_instances = self.cfg.GetAllInstancesInfo().values()
4158 for group in self.cfg.GetAllNodeGroupsInfo().values():
4159 instances = frozenset([inst for inst in all_instances
4160 if compat.any(node in group.members
4161 for node in inst.all_nodes)])
4162 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
4163 ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
4164 new = _ComputeNewInstanceViolations(ipol,
4165 new_ipolicy, instances)
4167 violations.update(new)
4170 self.LogWarning("After the ipolicy change the following instances"
4171 " violate them: %s",
4172 utils.CommaJoin(utils.NiceSort(violations)))
4174 if self.op.nicparams:
4175 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
4176 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
4177 objects.NIC.CheckParameterSyntax(self.new_nicparams)
4180 # check all instances for consistency
4181 for instance in self.cfg.GetAllInstancesInfo().values():
4182 for nic_idx, nic in enumerate(instance.nics):
4183 params_copy = copy.deepcopy(nic.nicparams)
4184 params_filled = objects.FillDict(self.new_nicparams, params_copy)
4186 # check parameter syntax
4188 objects.NIC.CheckParameterSyntax(params_filled)
4189 except errors.ConfigurationError, err:
4190 nic_errors.append("Instance %s, nic/%d: %s" %
4191 (instance.name, nic_idx, err))
4193 # if we're moving instances to routed, check that they have an ip
4194 target_mode = params_filled[constants.NIC_MODE]
4195 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
4196 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
4197 " address" % (instance.name, nic_idx))
4199 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
4200 "\n".join(nic_errors), errors.ECODE_INVAL)
4202 # hypervisor list/parameters
4203 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
4204 if self.op.hvparams:
4205 for hv_name, hv_dict in self.op.hvparams.items():
4206 if hv_name not in self.new_hvparams:
4207 self.new_hvparams[hv_name] = hv_dict
4209 self.new_hvparams[hv_name].update(hv_dict)
4211 # disk template parameters
4212 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
4213 if self.op.diskparams:
4214 for dt_name, dt_params in self.op.diskparams.items():
4215 if dt_name not in self.op.diskparams:
4216 self.new_diskparams[dt_name] = dt_params
4218 self.new_diskparams[dt_name].update(dt_params)
4220 # os hypervisor parameters
4221 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
4223 for os_name, hvs in self.op.os_hvp.items():
4224 if os_name not in self.new_os_hvp:
4225 self.new_os_hvp[os_name] = hvs
4227 for hv_name, hv_dict in hvs.items():
4228 if hv_name not in self.new_os_hvp[os_name]:
4229 self.new_os_hvp[os_name][hv_name] = hv_dict
4231 self.new_os_hvp[os_name][hv_name].update(hv_dict)
4234 self.new_osp = objects.FillDict(cluster.osparams, {})
4235 if self.op.osparams:
4236 for os_name, osp in self.op.osparams.items():
4237 if os_name not in self.new_osp:
4238 self.new_osp[os_name] = {}
4240 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
4243 if not self.new_osp[os_name]:
4244 # we removed all parameters
4245 del self.new_osp[os_name]
4247 # check the parameter validity (remote check)
4248 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
4249 os_name, self.new_osp[os_name])
4251 # changes to the hypervisor list
4252 if self.op.enabled_hypervisors is not None:
4253 self.hv_list = self.op.enabled_hypervisors
4254 for hv in self.hv_list:
4255 # if the hypervisor doesn't already exist in the cluster
4256 # hvparams, we initialize it to empty, and then (in both
4257 # cases) we make sure to fill the defaults, as we might not
4258 # have a complete defaults list if the hypervisor wasn't
4260 if hv not in new_hvp:
4262 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
4263 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
4265 self.hv_list = cluster.enabled_hypervisors
4267 if self.op.hvparams or self.op.enabled_hypervisors is not None:
4268 # either the enabled list has changed, or the parameters have, validate
4269 for hv_name, hv_params in self.new_hvparams.items():
4270 if ((self.op.hvparams and hv_name in self.op.hvparams) or
4271 (self.op.enabled_hypervisors and
4272 hv_name in self.op.enabled_hypervisors)):
4273 # either this is a new hypervisor, or its parameters have changed
4274 hv_class = hypervisor.GetHypervisor(hv_name)
4275 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4276 hv_class.CheckParameterSyntax(hv_params)
4277 _CheckHVParams(self, node_list, hv_name, hv_params)
4280 # no need to check any newly-enabled hypervisors, since the
4281 # defaults have already been checked in the above code-block
4282 for os_name, os_hvp in self.new_os_hvp.items():
4283 for hv_name, hv_params in os_hvp.items():
4284 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4285 # we need to fill in the new os_hvp on top of the actual hv_p
4286 cluster_defaults = self.new_hvparams.get(hv_name, {})
4287 new_osp = objects.FillDict(cluster_defaults, hv_params)
4288 hv_class = hypervisor.GetHypervisor(hv_name)
4289 hv_class.CheckParameterSyntax(new_osp)
4290 _CheckHVParams(self, node_list, hv_name, new_osp)
4292 if self.op.default_iallocator:
4293 alloc_script = utils.FindFile(self.op.default_iallocator,
4294 constants.IALLOCATOR_SEARCH_PATH,
4296 if alloc_script is None:
4297 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
4298 " specified" % self.op.default_iallocator,
4301 def Exec(self, feedback_fn):
4302 """Change the parameters of the cluster.
4305 if self.op.vg_name is not None:
4306 new_volume = self.op.vg_name
4309 if new_volume != self.cfg.GetVGName():
4310 self.cfg.SetVGName(new_volume)
4312 feedback_fn("Cluster LVM configuration already in desired"
4313 " state, not changing")
4314 if self.op.drbd_helper is not None:
4315 new_helper = self.op.drbd_helper
4318 if new_helper != self.cfg.GetDRBDHelper():
4319 self.cfg.SetDRBDHelper(new_helper)
4321 feedback_fn("Cluster DRBD helper already in desired state,"
4323 if self.op.hvparams:
4324 self.cluster.hvparams = self.new_hvparams
4326 self.cluster.os_hvp = self.new_os_hvp
4327 if self.op.enabled_hypervisors is not None:
4328 self.cluster.hvparams = self.new_hvparams
4329 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
4330 if self.op.beparams:
4331 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
4332 if self.op.nicparams:
4333 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
4335 self.cluster.ipolicy = self.new_ipolicy
4336 if self.op.osparams:
4337 self.cluster.osparams = self.new_osp
4338 if self.op.ndparams:
4339 self.cluster.ndparams = self.new_ndparams
4340 if self.op.diskparams:
4341 self.cluster.diskparams = self.new_diskparams
4342 if self.op.hv_state:
4343 self.cluster.hv_state_static = self.new_hv_state
4344 if self.op.disk_state:
4345 self.cluster.disk_state_static = self.new_disk_state
4347 if self.op.candidate_pool_size is not None:
4348 self.cluster.candidate_pool_size = self.op.candidate_pool_size
4349 # we need to update the pool size here, otherwise the save will fail
4350 _AdjustCandidatePool(self, [])
4352 if self.op.maintain_node_health is not None:
4353 if self.op.maintain_node_health and not constants.ENABLE_CONFD:
4354 feedback_fn("Note: CONFD was disabled at build time, node health"
4355 " maintenance is not useful (still enabling it)")
4356 self.cluster.maintain_node_health = self.op.maintain_node_health
4358 if self.op.prealloc_wipe_disks is not None:
4359 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
4361 if self.op.add_uids is not None:
4362 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
4364 if self.op.remove_uids is not None:
4365 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
4367 if self.op.uid_pool is not None:
4368 self.cluster.uid_pool = self.op.uid_pool
4370 if self.op.default_iallocator is not None:
4371 self.cluster.default_iallocator = self.op.default_iallocator
4373 if self.op.reserved_lvs is not None:
4374 self.cluster.reserved_lvs = self.op.reserved_lvs
4376 if self.op.use_external_mip_script is not None:
4377 self.cluster.use_external_mip_script = self.op.use_external_mip_script
4379 def helper_os(aname, mods, desc):
4381 lst = getattr(self.cluster, aname)
4382 for key, val in mods:
4383 if key == constants.DDM_ADD:
4385 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
4388 elif key == constants.DDM_REMOVE:
4392 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
4394 raise errors.ProgrammerError("Invalid modification '%s'" % key)
4396 if self.op.hidden_os:
4397 helper_os("hidden_os", self.op.hidden_os, "hidden")
4399 if self.op.blacklisted_os:
4400 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
4402 if self.op.master_netdev:
4403 master_params = self.cfg.GetMasterNetworkParameters()
4404 ems = self.cfg.GetUseExternalMipScript()
4405 feedback_fn("Shutting down master ip on the current netdev (%s)" %
4406 self.cluster.master_netdev)
4407 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4409 result.Raise("Could not disable the master ip")
4410 feedback_fn("Changing master_netdev from %s to %s" %
4411 (master_params.netdev, self.op.master_netdev))
4412 self.cluster.master_netdev = self.op.master_netdev
4414 if self.op.master_netmask:
4415 master_params = self.cfg.GetMasterNetworkParameters()
4416 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
4417 result = self.rpc.call_node_change_master_netmask(master_params.name,
4418 master_params.netmask,
4419 self.op.master_netmask,
4421 master_params.netdev)
4423 msg = "Could not change the master IP netmask: %s" % result.fail_msg
4426 self.cluster.master_netmask = self.op.master_netmask
4428 self.cfg.Update(self.cluster, feedback_fn)
4430 if self.op.master_netdev:
4431 master_params = self.cfg.GetMasterNetworkParameters()
4432 feedback_fn("Starting the master ip on the new master netdev (%s)" %
4433 self.op.master_netdev)
4434 ems = self.cfg.GetUseExternalMipScript()
4435 result = self.rpc.call_node_activate_master_ip(master_params.name,
4438 self.LogWarning("Could not re-enable the master ip on"
4439 " the master, please restart manually: %s",
4443 def _UploadHelper(lu, nodes, fname):
4444 """Helper for uploading a file and showing warnings.
4447 if os.path.exists(fname):
4448 result = lu.rpc.call_upload_file(nodes, fname)
4449 for to_node, to_result in result.items():
4450 msg = to_result.fail_msg
4452 msg = ("Copy of file %s to node %s failed: %s" %
4453 (fname, to_node, msg))
4457 def _ComputeAncillaryFiles(cluster, redist):
4458 """Compute files external to Ganeti which need to be consistent.
4460 @type redist: boolean
4461 @param redist: Whether to include files which need to be redistributed
4464 # Compute files for all nodes
4466 pathutils.SSH_KNOWN_HOSTS_FILE,
4467 pathutils.CONFD_HMAC_KEY,
4468 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
4469 pathutils.SPICE_CERT_FILE,
4470 pathutils.SPICE_CACERT_FILE,
4471 pathutils.RAPI_USERS_FILE,
4475 # we need to ship at least the RAPI certificate
4476 files_all.add(pathutils.RAPI_CERT_FILE)
4478 files_all.update(pathutils.ALL_CERT_FILES)
4479 files_all.update(ssconf.SimpleStore().GetFileList())
4481 if cluster.modify_etc_hosts:
4482 files_all.add(pathutils.ETC_HOSTS)
4484 if cluster.use_external_mip_script:
4485 files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
4487 # Files which are optional, these must:
4488 # - be present in one other category as well
4489 # - either exist or not exist on all nodes of that category (mc, vm all)
4491 pathutils.RAPI_USERS_FILE,
4494 # Files which should only be on master candidates
4498 files_mc.add(pathutils.CLUSTER_CONF_FILE)
4502 (constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE)):
4503 files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
4504 files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
4506 # Files which should only be on VM-capable nodes
4509 for hv_name in cluster.enabled_hypervisors
4510 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
4514 for hv_name in cluster.enabled_hypervisors
4515 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
4517 # Filenames in each category must be unique
4518 all_files_set = files_all | files_mc | files_vm
4519 assert (len(all_files_set) ==
4520 sum(map(len, [files_all, files_mc, files_vm]))), \
4521 "Found file listed in more than one file list"
4523 # Optional files must be present in one other category
4524 assert all_files_set.issuperset(files_opt), \
4525 "Optional file not in a different required list"
4527 # This one file should never ever be re-distributed via RPC
4528 assert not (redist and
4529 pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
4531 return (files_all, files_opt, files_mc, files_vm)
4534 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
4535 """Distribute additional files which are part of the cluster configuration.
4537 ConfigWriter takes care of distributing the config and ssconf files, but
4538 there are more files which should be distributed to all nodes. This function
4539 makes sure those are copied.
4541 @param lu: calling logical unit
4542 @param additional_nodes: list of nodes not in the config to distribute to
4543 @type additional_vm: boolean
4544 @param additional_vm: whether the additional nodes are vm-capable or not
4547 # Gather target nodes
4548 cluster = lu.cfg.GetClusterInfo()
4549 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
4551 online_nodes = lu.cfg.GetOnlineNodeList()
4552 online_set = frozenset(online_nodes)
4553 vm_nodes = list(online_set.intersection(lu.cfg.GetVmCapableNodeList()))
4555 if additional_nodes is not None:
4556 online_nodes.extend(additional_nodes)
4558 vm_nodes.extend(additional_nodes)
4560 # Never distribute to master node
4561 for nodelist in [online_nodes, vm_nodes]:
4562 if master_info.name in nodelist:
4563 nodelist.remove(master_info.name)
4566 (files_all, _, files_mc, files_vm) = \
4567 _ComputeAncillaryFiles(cluster, True)
4569 # Never re-distribute configuration file from here
4570 assert not (pathutils.CLUSTER_CONF_FILE in files_all or
4571 pathutils.CLUSTER_CONF_FILE in files_vm)
4572 assert not files_mc, "Master candidates not handled in this function"
4575 (online_nodes, files_all),
4576 (vm_nodes, files_vm),
4580 for (node_list, files) in filemap:
4582 _UploadHelper(lu, node_list, fname)
4585 class LUClusterRedistConf(NoHooksLU):
4586 """Force the redistribution of cluster configuration.
4588 This is a very simple LU.
4593 def ExpandNames(self):
4594 self.needed_locks = {
4595 locking.LEVEL_NODE: locking.ALL_SET,
4596 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
4598 self.share_locks = _ShareAll()
4600 def Exec(self, feedback_fn):
4601 """Redistribute the configuration.
4604 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
4605 _RedistributeAncillaryFiles(self)
4608 class LUClusterActivateMasterIp(NoHooksLU):
4609 """Activate the master IP on the master node.
4612 def Exec(self, feedback_fn):
4613 """Activate the master IP.
4616 master_params = self.cfg.GetMasterNetworkParameters()
4617 ems = self.cfg.GetUseExternalMipScript()
4618 result = self.rpc.call_node_activate_master_ip(master_params.name,
4620 result.Raise("Could not activate the master IP")
4623 class LUClusterDeactivateMasterIp(NoHooksLU):
4624 """Deactivate the master IP on the master node.
4627 def Exec(self, feedback_fn):
4628 """Deactivate the master IP.
4631 master_params = self.cfg.GetMasterNetworkParameters()
4632 ems = self.cfg.GetUseExternalMipScript()
4633 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4635 result.Raise("Could not deactivate the master IP")
4638 def _WaitForSync(lu, instance, disks=None, oneshot=False):
4639 """Sleep and poll for an instance's disk to sync.
4642 if not instance.disks or disks is not None and not disks:
4645 disks = _ExpandCheckDisks(instance, disks)
4648 lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
4650 node = instance.primary_node
4653 lu.cfg.SetDiskID(dev, node)
4655 # TODO: Convert to utils.Retry
4658 degr_retries = 10 # in seconds, as we sleep 1 second each time
4662 cumul_degraded = False
4663 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
4664 msg = rstats.fail_msg
4666 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
4669 raise errors.RemoteError("Can't contact node %s for mirror data,"
4670 " aborting." % node)
4673 rstats = rstats.payload
4675 for i, mstat in enumerate(rstats):
4677 lu.LogWarning("Can't compute data for node %s/%s",
4678 node, disks[i].iv_name)
4681 cumul_degraded = (cumul_degraded or
4682 (mstat.is_degraded and mstat.sync_percent is None))
4683 if mstat.sync_percent is not None:
4685 if mstat.estimated_time is not None:
4686 rem_time = ("%s remaining (estimated)" %
4687 utils.FormatSeconds(mstat.estimated_time))
4688 max_time = mstat.estimated_time
4690 rem_time = "no time estimate"
4691 lu.LogInfo("- device %s: %5.2f%% done, %s",
4692 disks[i].iv_name, mstat.sync_percent, rem_time)
4694 # if we're done but degraded, let's do a few small retries, to
4695 # make sure we see a stable and not transient situation; therefore
4696 # we force restart of the loop
4697 if (done or oneshot) and cumul_degraded and degr_retries > 0:
4698 logging.info("Degraded disks found, %d retries left", degr_retries)
4706 time.sleep(min(60, max_time))
4709 lu.LogInfo("Instance %s's disks are in sync", instance.name)
4711 return not cumul_degraded
4714 def _BlockdevFind(lu, node, dev, instance):
4715 """Wrapper around call_blockdev_find to annotate diskparams.
4717 @param lu: A reference to the lu object
4718 @param node: The node to call out
4719 @param dev: The device to find
4720 @param instance: The instance object the device belongs to
4721 @returns The result of the rpc call
4724 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4725 return lu.rpc.call_blockdev_find(node, disk)
4728 def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4729 """Wrapper around L{_CheckDiskConsistencyInner}.
4732 (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
4733 return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
4737 def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
4739 """Check that mirrors are not degraded.
4741 @attention: The device has to be annotated already.
4743 The ldisk parameter, if True, will change the test from the
4744 is_degraded attribute (which represents overall non-ok status for
4745 the device(s)) to the ldisk (representing the local storage status).
4748 lu.cfg.SetDiskID(dev, node)
4752 if on_primary or dev.AssembleOnSecondary():
4753 rstats = lu.rpc.call_blockdev_find(node, dev)
4754 msg = rstats.fail_msg
4756 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
4758 elif not rstats.payload:
4759 lu.LogWarning("Can't find disk on node %s", node)
4763 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
4765 result = result and not rstats.payload.is_degraded
4768 for child in dev.children:
4769 result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
4775 class LUOobCommand(NoHooksLU):
4776 """Logical unit for OOB handling.
4780 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
4782 def ExpandNames(self):
4783 """Gather locks we need.
4786 if self.op.node_names:
4787 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
4788 lock_names = self.op.node_names
4790 lock_names = locking.ALL_SET
4792 self.needed_locks = {
4793 locking.LEVEL_NODE: lock_names,
4796 self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
4798 if not self.op.node_names:
4799 # Acquire node allocation lock only if all nodes are affected
4800 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
4802 def CheckPrereq(self):
4803 """Check prerequisites.
4806 - the node exists in the configuration
4809 Any errors are signaled by raising errors.OpPrereqError.
4813 self.master_node = self.cfg.GetMasterNode()
4815 assert self.op.power_delay >= 0.0
4817 if self.op.node_names:
4818 if (self.op.command in self._SKIP_MASTER and
4819 self.master_node in self.op.node_names):
4820 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
4821 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
4823 if master_oob_handler:
4824 additional_text = ("run '%s %s %s' if you want to operate on the"
4825 " master regardless") % (master_oob_handler,
4829 additional_text = "it does not support out-of-band operations"
4831 raise errors.OpPrereqError(("Operating on the master node %s is not"
4832 " allowed for %s; %s") %
4833 (self.master_node, self.op.command,
4834 additional_text), errors.ECODE_INVAL)
4836 self.op.node_names = self.cfg.GetNodeList()
4837 if self.op.command in self._SKIP_MASTER:
4838 self.op.node_names.remove(self.master_node)
4840 if self.op.command in self._SKIP_MASTER:
4841 assert self.master_node not in self.op.node_names
4843 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
4845 raise errors.OpPrereqError("Node %s not found" % node_name,
4848 self.nodes.append(node)
4850 if (not self.op.ignore_status and
4851 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
4852 raise errors.OpPrereqError(("Cannot power off node %s because it is"
4853 " not marked offline") % node_name,
4856 def Exec(self, feedback_fn):
4857 """Execute OOB and return result if we expect any.
4860 master_node = self.master_node
4863 for idx, node in enumerate(utils.NiceSort(self.nodes,
4864 key=lambda node: node.name)):
4865 node_entry = [(constants.RS_NORMAL, node.name)]
4866 ret.append(node_entry)
4868 oob_program = _SupportsOob(self.cfg, node)
4871 node_entry.append((constants.RS_UNAVAIL, None))
4874 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4875 self.op.command, oob_program, node.name)
4876 result = self.rpc.call_run_oob(master_node, oob_program,
4877 self.op.command, node.name,
4881 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4882 node.name, result.fail_msg)
4883 node_entry.append((constants.RS_NODATA, None))
4886 self._CheckPayload(result)
4887 except errors.OpExecError, err:
4888 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4890 node_entry.append((constants.RS_NODATA, None))
4892 if self.op.command == constants.OOB_HEALTH:
4893 # For health we should log important events
4894 for item, status in result.payload:
4895 if status in [constants.OOB_STATUS_WARNING,
4896 constants.OOB_STATUS_CRITICAL]:
4897 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4898 item, node.name, status)
4900 if self.op.command == constants.OOB_POWER_ON:
4902 elif self.op.command == constants.OOB_POWER_OFF:
4903 node.powered = False
4904 elif self.op.command == constants.OOB_POWER_STATUS:
4905 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4906 if powered != node.powered:
4907 logging.warning(("Recorded power state (%s) of node '%s' does not"
4908 " match actual power state (%s)"), node.powered,
4911 # For configuration changing commands we should update the node
4912 if self.op.command in (constants.OOB_POWER_ON,
4913 constants.OOB_POWER_OFF):
4914 self.cfg.Update(node, feedback_fn)
4916 node_entry.append((constants.RS_NORMAL, result.payload))
4918 if (self.op.command == constants.OOB_POWER_ON and
4919 idx < len(self.nodes) - 1):
4920 time.sleep(self.op.power_delay)
4924 def _CheckPayload(self, result):
4925 """Checks if the payload is valid.
4927 @param result: RPC result
4928 @raises errors.OpExecError: If payload is not valid
4932 if self.op.command == constants.OOB_HEALTH:
4933 if not isinstance(result.payload, list):
4934 errs.append("command 'health' is expected to return a list but got %s" %
4935 type(result.payload))
4937 for item, status in result.payload:
4938 if status not in constants.OOB_STATUSES:
4939 errs.append("health item '%s' has invalid status '%s'" %
4942 if self.op.command == constants.OOB_POWER_STATUS:
4943 if not isinstance(result.payload, dict):
4944 errs.append("power-status is expected to return a dict but got %s" %
4945 type(result.payload))
4947 if self.op.command in [
4948 constants.OOB_POWER_ON,
4949 constants.OOB_POWER_OFF,
4950 constants.OOB_POWER_CYCLE,
4952 if result.payload is not None:
4953 errs.append("%s is expected to not return payload but got '%s'" %
4954 (self.op.command, result.payload))
4957 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4958 utils.CommaJoin(errs))
4961 class _OsQuery(_QueryBase):
4962 FIELDS = query.OS_FIELDS
4964 def ExpandNames(self, lu):
4965 # Lock all nodes in shared mode
4966 # Temporary removal of locks, should be reverted later
4967 # TODO: reintroduce locks when they are lighter-weight
4968 lu.needed_locks = {}
4969 #self.share_locks[locking.LEVEL_NODE] = 1
4970 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4972 # The following variables interact with _QueryBase._GetNames
4974 self.wanted = self.names
4976 self.wanted = locking.ALL_SET
4978 self.do_locking = self.use_locking
4980 def DeclareLocks(self, lu, level):
4984 def _DiagnoseByOS(rlist):
4985 """Remaps a per-node return list into an a per-os per-node dictionary
4987 @param rlist: a map with node names as keys and OS objects as values
4990 @return: a dictionary with osnames as keys and as value another
4991 map, with nodes as keys and tuples of (path, status, diagnose,
4992 variants, parameters, api_versions) as values, eg::
4994 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4995 (/srv/..., False, "invalid api")],
4996 "node2": [(/srv/..., True, "", [], [])]}
5001 # we build here the list of nodes that didn't fail the RPC (at RPC
5002 # level), so that nodes with a non-responding node daemon don't
5003 # make all OSes invalid
5004 good_nodes = [node_name for node_name in rlist
5005 if not rlist[node_name].fail_msg]
5006 for node_name, nr in rlist.items():
5007 if nr.fail_msg or not nr.payload:
5009 for (name, path, status, diagnose, variants,
5010 params, api_versions) in nr.payload:
5011 if name not in all_os:
5012 # build a list of nodes for this os containing empty lists
5013 # for each node in node_list
5015 for nname in good_nodes:
5016 all_os[name][nname] = []
5017 # convert params from [name, help] to (name, help)
5018 params = [tuple(v) for v in params]
5019 all_os[name][node_name].append((path, status, diagnose,
5020 variants, params, api_versions))
5023 def _GetQueryData(self, lu):
5024 """Computes the list of nodes and their attributes.
5027 # Locking is not used
5028 assert not (compat.any(lu.glm.is_owned(level)
5029 for level in locking.LEVELS
5030 if level != locking.LEVEL_CLUSTER) or
5031 self.do_locking or self.use_locking)
5033 valid_nodes = [node.name
5034 for node in lu.cfg.GetAllNodesInfo().values()
5035 if not node.offline and node.vm_capable]
5036 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
5037 cluster = lu.cfg.GetClusterInfo()
5041 for (os_name, os_data) in pol.items():
5042 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
5043 hidden=(os_name in cluster.hidden_os),
5044 blacklisted=(os_name in cluster.blacklisted_os))
5048 api_versions = set()
5050 for idx, osl in enumerate(os_data.values()):
5051 info.valid = bool(info.valid and osl and osl[0][1])
5055 (node_variants, node_params, node_api) = osl[0][3:6]
5058 variants.update(node_variants)
5059 parameters.update(node_params)
5060 api_versions.update(node_api)
5062 # Filter out inconsistent values
5063 variants.intersection_update(node_variants)
5064 parameters.intersection_update(node_params)
5065 api_versions.intersection_update(node_api)
5067 info.variants = list(variants)
5068 info.parameters = list(parameters)
5069 info.api_versions = list(api_versions)
5071 data[os_name] = info
5073 # Prepare data in requested order
5074 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
5078 class LUOsDiagnose(NoHooksLU):
5079 """Logical unit for OS diagnose/query.
5085 def _BuildFilter(fields, names):
5086 """Builds a filter for querying OSes.
5089 name_filter = qlang.MakeSimpleFilter("name", names)
5091 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
5092 # respective field is not requested
5093 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
5094 for fname in ["hidden", "blacklisted"]
5095 if fname not in fields]
5096 if "valid" not in fields:
5097 status_filter.append([qlang.OP_TRUE, "valid"])
5100 status_filter.insert(0, qlang.OP_AND)
5102 status_filter = None
5104 if name_filter and status_filter:
5105 return [qlang.OP_AND, name_filter, status_filter]
5109 return status_filter
5111 def CheckArguments(self):
5112 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
5113 self.op.output_fields, False)
5115 def ExpandNames(self):
5116 self.oq.ExpandNames(self)
5118 def Exec(self, feedback_fn):
5119 return self.oq.OldStyleQuery(self)
5122 class LUNodeRemove(LogicalUnit):
5123 """Logical unit for removing a node.
5126 HPATH = "node-remove"
5127 HTYPE = constants.HTYPE_NODE
5129 def BuildHooksEnv(self):
5134 "OP_TARGET": self.op.node_name,
5135 "NODE_NAME": self.op.node_name,
5138 def BuildHooksNodes(self):
5139 """Build hooks nodes.
5141 This doesn't run on the target node in the pre phase as a failed
5142 node would then be impossible to remove.
5145 all_nodes = self.cfg.GetNodeList()
5147 all_nodes.remove(self.op.node_name)
5150 return (all_nodes, all_nodes)
5152 def CheckPrereq(self):
5153 """Check prerequisites.
5156 - the node exists in the configuration
5157 - it does not have primary or secondary instances
5158 - it's not the master
5160 Any errors are signaled by raising errors.OpPrereqError.
5163 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5164 node = self.cfg.GetNodeInfo(self.op.node_name)
5165 assert node is not None
5167 masternode = self.cfg.GetMasterNode()
5168 if node.name == masternode:
5169 raise errors.OpPrereqError("Node is the master node, failover to another"
5170 " node is required", errors.ECODE_INVAL)
5172 for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
5173 if node.name in instance.all_nodes:
5174 raise errors.OpPrereqError("Instance %s is still running on the node,"
5175 " please remove first" % instance_name,
5177 self.op.node_name = node.name
5180 def Exec(self, feedback_fn):
5181 """Removes the node from the cluster.
5185 logging.info("Stopping the node daemon and removing configs from node %s",
5188 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
5190 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5193 # Promote nodes to master candidate as needed
5194 _AdjustCandidatePool(self, exceptions=[node.name])
5195 self.context.RemoveNode(node.name)
5197 # Run post hooks on the node before it's removed
5198 _RunPostHook(self, node.name)
5200 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
5201 msg = result.fail_msg
5203 self.LogWarning("Errors encountered on the remote node while leaving"
5204 " the cluster: %s", msg)
5206 # Remove node from our /etc/hosts
5207 if self.cfg.GetClusterInfo().modify_etc_hosts:
5208 master_node = self.cfg.GetMasterNode()
5209 result = self.rpc.call_etc_hosts_modify(master_node,
5210 constants.ETC_HOSTS_REMOVE,
5212 result.Raise("Can't update hosts file with new host data")
5213 _RedistributeAncillaryFiles(self)
5216 class _NodeQuery(_QueryBase):
5217 FIELDS = query.NODE_FIELDS
5219 def ExpandNames(self, lu):
5220 lu.needed_locks = {}
5221 lu.share_locks = _ShareAll()
5224 self.wanted = _GetWantedNodes(lu, self.names)
5226 self.wanted = locking.ALL_SET
5228 self.do_locking = (self.use_locking and
5229 query.NQ_LIVE in self.requested_data)
5232 # If any non-static field is requested we need to lock the nodes
5233 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
5234 lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
5236 def DeclareLocks(self, lu, level):
5239 def _GetQueryData(self, lu):
5240 """Computes the list of nodes and their attributes.
5243 all_info = lu.cfg.GetAllNodesInfo()
5245 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
5247 # Gather data as requested
5248 if query.NQ_LIVE in self.requested_data:
5249 # filter out non-vm_capable nodes
5250 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
5252 node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
5253 [lu.cfg.GetHypervisorType()])
5254 live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
5255 for (name, nresult) in node_data.items()
5256 if not nresult.fail_msg and nresult.payload)
5260 if query.NQ_INST in self.requested_data:
5261 node_to_primary = dict([(name, set()) for name in nodenames])
5262 node_to_secondary = dict([(name, set()) for name in nodenames])
5264 inst_data = lu.cfg.GetAllInstancesInfo()
5266 for inst in inst_data.values():
5267 if inst.primary_node in node_to_primary:
5268 node_to_primary[inst.primary_node].add(inst.name)
5269 for secnode in inst.secondary_nodes:
5270 if secnode in node_to_secondary:
5271 node_to_secondary[secnode].add(inst.name)
5273 node_to_primary = None
5274 node_to_secondary = None
5276 if query.NQ_OOB in self.requested_data:
5277 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
5278 for name, node in all_info.iteritems())
5282 if query.NQ_GROUP in self.requested_data:
5283 groups = lu.cfg.GetAllNodeGroupsInfo()
5287 return query.NodeQueryData([all_info[name] for name in nodenames],
5288 live_data, lu.cfg.GetMasterNode(),
5289 node_to_primary, node_to_secondary, groups,
5290 oob_support, lu.cfg.GetClusterInfo())
5293 class LUNodeQuery(NoHooksLU):
5294 """Logical unit for querying nodes.
5297 # pylint: disable=W0142
5300 def CheckArguments(self):
5301 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
5302 self.op.output_fields, self.op.use_locking)
5304 def ExpandNames(self):
5305 self.nq.ExpandNames(self)
5307 def DeclareLocks(self, level):
5308 self.nq.DeclareLocks(self, level)
5310 def Exec(self, feedback_fn):
5311 return self.nq.OldStyleQuery(self)
5314 class LUNodeQueryvols(NoHooksLU):
5315 """Logical unit for getting volumes on node(s).
5319 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
5320 _FIELDS_STATIC = utils.FieldSet("node")
5322 def CheckArguments(self):
5323 _CheckOutputFields(static=self._FIELDS_STATIC,
5324 dynamic=self._FIELDS_DYNAMIC,
5325 selected=self.op.output_fields)
5327 def ExpandNames(self):
5328 self.share_locks = _ShareAll()
5331 self.needed_locks = {
5332 locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
5335 self.needed_locks = {
5336 locking.LEVEL_NODE: locking.ALL_SET,
5337 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
5340 def Exec(self, feedback_fn):
5341 """Computes the list of nodes and their attributes.
5344 nodenames = self.owned_locks(locking.LEVEL_NODE)
5345 volumes = self.rpc.call_node_volumes(nodenames)
5347 ilist = self.cfg.GetAllInstancesInfo()
5348 vol2inst = _MapInstanceDisksToNodes(ilist.values())
5351 for node in nodenames:
5352 nresult = volumes[node]
5355 msg = nresult.fail_msg
5357 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
5360 node_vols = sorted(nresult.payload,
5361 key=operator.itemgetter("dev"))
5363 for vol in node_vols:
5365 for field in self.op.output_fields:
5368 elif field == "phys":
5372 elif field == "name":
5374 elif field == "size":
5375 val = int(float(vol["size"]))
5376 elif field == "instance":
5377 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
5379 raise errors.ParameterError(field)
5380 node_output.append(str(val))
5382 output.append(node_output)
5387 class LUNodeQueryStorage(NoHooksLU):
5388 """Logical unit for getting information on storage units on node(s).
5391 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
5394 def CheckArguments(self):
5395 _CheckOutputFields(static=self._FIELDS_STATIC,
5396 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
5397 selected=self.op.output_fields)
5399 def ExpandNames(self):
5400 self.share_locks = _ShareAll()
5403 self.needed_locks = {
5404 locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
5407 self.needed_locks = {
5408 locking.LEVEL_NODE: locking.ALL_SET,
5409 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
5412 def Exec(self, feedback_fn):
5413 """Computes the list of nodes and their attributes.
5416 self.nodes = self.owned_locks(locking.LEVEL_NODE)
5418 # Always get name to sort by
5419 if constants.SF_NAME in self.op.output_fields:
5420 fields = self.op.output_fields[:]
5422 fields = [constants.SF_NAME] + self.op.output_fields
5424 # Never ask for node or type as it's only known to the LU
5425 for extra in [constants.SF_NODE, constants.SF_TYPE]:
5426 while extra in fields:
5427 fields.remove(extra)
5429 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
5430 name_idx = field_idx[constants.SF_NAME]
5432 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5433 data = self.rpc.call_storage_list(self.nodes,
5434 self.op.storage_type, st_args,
5435 self.op.name, fields)
5439 for node in utils.NiceSort(self.nodes):
5440 nresult = data[node]
5444 msg = nresult.fail_msg
5446 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
5449 rows = dict([(row[name_idx], row) for row in nresult.payload])
5451 for name in utils.NiceSort(rows.keys()):
5456 for field in self.op.output_fields:
5457 if field == constants.SF_NODE:
5459 elif field == constants.SF_TYPE:
5460 val = self.op.storage_type
5461 elif field in field_idx:
5462 val = row[field_idx[field]]
5464 raise errors.ParameterError(field)
5473 class _InstanceQuery(_QueryBase):
5474 FIELDS = query.INSTANCE_FIELDS
5476 def ExpandNames(self, lu):
5477 lu.needed_locks = {}
5478 lu.share_locks = _ShareAll()
5481 self.wanted = _GetWantedInstances(lu, self.names)
5483 self.wanted = locking.ALL_SET
5485 self.do_locking = (self.use_locking and
5486 query.IQ_LIVE in self.requested_data)
5488 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5489 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
5490 lu.needed_locks[locking.LEVEL_NODE] = []
5491 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5493 self.do_grouplocks = (self.do_locking and
5494 query.IQ_NODES in self.requested_data)
5496 def DeclareLocks(self, lu, level):
5498 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
5499 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
5501 # Lock all groups used by instances optimistically; this requires going
5502 # via the node before it's locked, requiring verification later on
5503 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
5505 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
5506 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
5507 elif level == locking.LEVEL_NODE:
5508 lu._LockInstancesNodes() # pylint: disable=W0212
5511 def _CheckGroupLocks(lu):
5512 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
5513 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
5515 # Check if node groups for locked instances are still correct
5516 for instance_name in owned_instances:
5517 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
5519 def _GetQueryData(self, lu):
5520 """Computes the list of instances and their attributes.
5523 if self.do_grouplocks:
5524 self._CheckGroupLocks(lu)
5526 cluster = lu.cfg.GetClusterInfo()
5527 all_info = lu.cfg.GetAllInstancesInfo()
5529 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
5531 instance_list = [all_info[name] for name in instance_names]
5532 nodes = frozenset(itertools.chain(*(inst.all_nodes
5533 for inst in instance_list)))
5534 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5537 wrongnode_inst = set()
5539 # Gather data as requested
5540 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
5542 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
5544 result = node_data[name]
5546 # offline nodes will be in both lists
5547 assert result.fail_msg
5548 offline_nodes.append(name)
5550 bad_nodes.append(name)
5551 elif result.payload:
5552 for inst in result.payload:
5553 if inst in all_info:
5554 if all_info[inst].primary_node == name:
5555 live_data.update(result.payload)
5557 wrongnode_inst.add(inst)
5559 # orphan instance; we don't list it here as we don't
5560 # handle this case yet in the output of instance listing
5561 logging.warning("Orphan instance '%s' found on node %s",
5563 # else no instance is alive
5567 if query.IQ_DISKUSAGE in self.requested_data:
5568 gmi = ganeti.masterd.instance
5569 disk_usage = dict((inst.name,
5570 gmi.ComputeDiskSize(inst.disk_template,
5571 [{constants.IDISK_SIZE: disk.size}
5572 for disk in inst.disks]))
5573 for inst in instance_list)
5577 if query.IQ_CONSOLE in self.requested_data:
5579 for inst in instance_list:
5580 if inst.name in live_data:
5581 # Instance is running
5582 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
5584 consinfo[inst.name] = None
5585 assert set(consinfo.keys()) == set(instance_names)
5589 if query.IQ_NODES in self.requested_data:
5590 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
5592 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
5593 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
5594 for uuid in set(map(operator.attrgetter("group"),
5600 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
5601 disk_usage, offline_nodes, bad_nodes,
5602 live_data, wrongnode_inst, consinfo,
5606 class LUQuery(NoHooksLU):
5607 """Query for resources/items of a certain kind.
5610 # pylint: disable=W0142
5613 def CheckArguments(self):
5614 qcls = _GetQueryImplementation(self.op.what)
5616 self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
5618 def ExpandNames(self):
5619 self.impl.ExpandNames(self)
5621 def DeclareLocks(self, level):
5622 self.impl.DeclareLocks(self, level)
5624 def Exec(self, feedback_fn):
5625 return self.impl.NewStyleQuery(self)
5628 class LUQueryFields(NoHooksLU):
5629 """Query for resources/items of a certain kind.
5632 # pylint: disable=W0142
5635 def CheckArguments(self):
5636 self.qcls = _GetQueryImplementation(self.op.what)
5638 def ExpandNames(self):
5639 self.needed_locks = {}
5641 def Exec(self, feedback_fn):
5642 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
5645 class LUNodeModifyStorage(NoHooksLU):
5646 """Logical unit for modifying a storage volume on a node.
5651 def CheckArguments(self):
5652 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5654 storage_type = self.op.storage_type
5657 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
5659 raise errors.OpPrereqError("Storage units of type '%s' can not be"
5660 " modified" % storage_type,
5663 diff = set(self.op.changes.keys()) - modifiable
5665 raise errors.OpPrereqError("The following fields can not be modified for"
5666 " storage units of type '%s': %r" %
5667 (storage_type, list(diff)),
5670 def ExpandNames(self):
5671 self.needed_locks = {
5672 locking.LEVEL_NODE: self.op.node_name,
5675 def Exec(self, feedback_fn):
5676 """Computes the list of nodes and their attributes.
5679 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5680 result = self.rpc.call_storage_modify(self.op.node_name,
5681 self.op.storage_type, st_args,
5682 self.op.name, self.op.changes)
5683 result.Raise("Failed to modify storage unit '%s' on %s" %
5684 (self.op.name, self.op.node_name))
5687 class LUNodeAdd(LogicalUnit):
5688 """Logical unit for adding node to the cluster.
5692 HTYPE = constants.HTYPE_NODE
5693 _NFLAGS = ["master_capable", "vm_capable"]
5695 def CheckArguments(self):
5696 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
5697 # validate/normalize the node name
5698 self.hostname = netutils.GetHostname(name=self.op.node_name,
5699 family=self.primary_ip_family)
5700 self.op.node_name = self.hostname.name
5702 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
5703 raise errors.OpPrereqError("Cannot readd the master node",
5706 if self.op.readd and self.op.group:
5707 raise errors.OpPrereqError("Cannot pass a node group when a node is"
5708 " being readded", errors.ECODE_INVAL)
5710 def BuildHooksEnv(self):
5713 This will run on all nodes before, and on all nodes + the new node after.
5717 "OP_TARGET": self.op.node_name,
5718 "NODE_NAME": self.op.node_name,
5719 "NODE_PIP": self.op.primary_ip,
5720 "NODE_SIP": self.op.secondary_ip,
5721 "MASTER_CAPABLE": str(self.op.master_capable),
5722 "VM_CAPABLE": str(self.op.vm_capable),
5725 def BuildHooksNodes(self):
5726 """Build hooks nodes.
5729 # Exclude added node
5730 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
5731 post_nodes = pre_nodes + [self.op.node_name, ]
5733 return (pre_nodes, post_nodes)
5735 def CheckPrereq(self):
5736 """Check prerequisites.
5739 - the new node is not already in the config
5741 - its parameters (single/dual homed) matches the cluster
5743 Any errors are signaled by raising errors.OpPrereqError.
5747 hostname = self.hostname
5748 node = hostname.name
5749 primary_ip = self.op.primary_ip = hostname.ip
5750 if self.op.secondary_ip is None:
5751 if self.primary_ip_family == netutils.IP6Address.family:
5752 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
5753 " IPv4 address must be given as secondary",
5755 self.op.secondary_ip = primary_ip
5757 secondary_ip = self.op.secondary_ip
5758 if not netutils.IP4Address.IsValid(secondary_ip):
5759 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5760 " address" % secondary_ip, errors.ECODE_INVAL)
5762 node_list = cfg.GetNodeList()
5763 if not self.op.readd and node in node_list:
5764 raise errors.OpPrereqError("Node %s is already in the configuration" %
5765 node, errors.ECODE_EXISTS)
5766 elif self.op.readd and node not in node_list:
5767 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
5770 self.changed_primary_ip = False
5772 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
5773 if self.op.readd and node == existing_node_name:
5774 if existing_node.secondary_ip != secondary_ip:
5775 raise errors.OpPrereqError("Readded node doesn't have the same IP"
5776 " address configuration as before",
5778 if existing_node.primary_ip != primary_ip:
5779 self.changed_primary_ip = True
5783 if (existing_node.primary_ip == primary_ip or
5784 existing_node.secondary_ip == primary_ip or
5785 existing_node.primary_ip == secondary_ip or
5786 existing_node.secondary_ip == secondary_ip):
5787 raise errors.OpPrereqError("New node ip address(es) conflict with"
5788 " existing node %s" % existing_node.name,
5789 errors.ECODE_NOTUNIQUE)
5791 # After this 'if' block, None is no longer a valid value for the
5792 # _capable op attributes
5794 old_node = self.cfg.GetNodeInfo(node)
5795 assert old_node is not None, "Can't retrieve locked node %s" % node
5796 for attr in self._NFLAGS:
5797 if getattr(self.op, attr) is None:
5798 setattr(self.op, attr, getattr(old_node, attr))
5800 for attr in self._NFLAGS:
5801 if getattr(self.op, attr) is None:
5802 setattr(self.op, attr, True)
5804 if self.op.readd and not self.op.vm_capable:
5805 pri, sec = cfg.GetNodeInstances(node)
5807 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
5808 " flag set to false, but it already holds"
5809 " instances" % node,
5812 # check that the type of the node (single versus dual homed) is the
5813 # same as for the master
5814 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
5815 master_singlehomed = myself.secondary_ip == myself.primary_ip
5816 newbie_singlehomed = secondary_ip == primary_ip
5817 if master_singlehomed != newbie_singlehomed:
5818 if master_singlehomed:
5819 raise errors.OpPrereqError("The master has no secondary ip but the"
5820 " new node has one",
5823 raise errors.OpPrereqError("The master has a secondary ip but the"
5824 " new node doesn't have one",
5827 # checks reachability
5828 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
5829 raise errors.OpPrereqError("Node not reachable by ping",
5830 errors.ECODE_ENVIRON)
5832 if not newbie_singlehomed:
5833 # check reachability from my secondary ip to newbie's secondary ip
5834 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5835 source=myself.secondary_ip):
5836 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5837 " based ping to node daemon port",
5838 errors.ECODE_ENVIRON)
5845 if self.op.master_capable:
5846 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
5848 self.master_candidate = False
5851 self.new_node = old_node
5853 node_group = cfg.LookupNodeGroup(self.op.group)
5854 self.new_node = objects.Node(name=node,
5855 primary_ip=primary_ip,
5856 secondary_ip=secondary_ip,
5857 master_candidate=self.master_candidate,
5858 offline=False, drained=False,
5861 if self.op.ndparams:
5862 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
5864 if self.op.hv_state:
5865 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
5867 if self.op.disk_state:
5868 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
5870 # TODO: If we need to have multiple DnsOnlyRunner we probably should make
5871 # it a property on the base class.
5872 result = rpc.DnsOnlyRunner().call_version([node])[node]
5873 result.Raise("Can't get version information from node %s" % node)
5874 if constants.PROTOCOL_VERSION == result.payload:
5875 logging.info("Communication to node %s fine, sw version %s match",
5876 node, result.payload)
5878 raise errors.OpPrereqError("Version mismatch master version %s,"
5879 " node version %s" %
5880 (constants.PROTOCOL_VERSION, result.payload),
5881 errors.ECODE_ENVIRON)
5883 def Exec(self, feedback_fn):
5884 """Adds the new node to the cluster.
5887 new_node = self.new_node
5888 node = new_node.name
5890 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5893 # We adding a new node so we assume it's powered
5894 new_node.powered = True
5896 # for re-adds, reset the offline/drained/master-candidate flags;
5897 # we need to reset here, otherwise offline would prevent RPC calls
5898 # later in the procedure; this also means that if the re-add
5899 # fails, we are left with a non-offlined, broken node
5901 new_node.drained = new_node.offline = False # pylint: disable=W0201
5902 self.LogInfo("Readding a node, the offline/drained flags were reset")
5903 # if we demote the node, we do cleanup later in the procedure
5904 new_node.master_candidate = self.master_candidate
5905 if self.changed_primary_ip:
5906 new_node.primary_ip = self.op.primary_ip
5908 # copy the master/vm_capable flags
5909 for attr in self._NFLAGS:
5910 setattr(new_node, attr, getattr(self.op, attr))
5912 # notify the user about any possible mc promotion
5913 if new_node.master_candidate:
5914 self.LogInfo("Node will be a master candidate")
5916 if self.op.ndparams:
5917 new_node.ndparams = self.op.ndparams
5919 new_node.ndparams = {}
5921 if self.op.hv_state:
5922 new_node.hv_state_static = self.new_hv_state
5924 if self.op.disk_state:
5925 new_node.disk_state_static = self.new_disk_state
5927 # Add node to our /etc/hosts, and add key to known_hosts
5928 if self.cfg.GetClusterInfo().modify_etc_hosts:
5929 master_node = self.cfg.GetMasterNode()
5930 result = self.rpc.call_etc_hosts_modify(master_node,
5931 constants.ETC_HOSTS_ADD,
5934 result.Raise("Can't update hosts file with new host data")
5936 if new_node.secondary_ip != new_node.primary_ip:
5937 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5940 node_verify_list = [self.cfg.GetMasterNode()]
5941 node_verify_param = {
5942 constants.NV_NODELIST: ([node], {}),
5943 # TODO: do a node-net-test as well?
5946 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5947 self.cfg.GetClusterName())
5948 for verifier in node_verify_list:
5949 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5950 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5952 for failed in nl_payload:
5953 feedback_fn("ssh/hostname verification failed"
5954 " (checking from %s): %s" %
5955 (verifier, nl_payload[failed]))
5956 raise errors.OpExecError("ssh/hostname verification failed")
5959 _RedistributeAncillaryFiles(self)
5960 self.context.ReaddNode(new_node)
5961 # make sure we redistribute the config
5962 self.cfg.Update(new_node, feedback_fn)
5963 # and make sure the new node will not have old files around
5964 if not new_node.master_candidate:
5965 result = self.rpc.call_node_demote_from_mc(new_node.name)
5966 msg = result.fail_msg
5968 self.LogWarning("Node failed to demote itself from master"
5969 " candidate status: %s" % msg)
5971 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5972 additional_vm=self.op.vm_capable)
5973 self.context.AddNode(new_node, self.proc.GetECId())
5976 class LUNodeSetParams(LogicalUnit):
5977 """Modifies the parameters of a node.
5979 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5980 to the node role (as _ROLE_*)
5981 @cvar _R2F: a dictionary from node role to tuples of flags
5982 @cvar _FLAGS: a list of attribute names corresponding to the flags
5985 HPATH = "node-modify"
5986 HTYPE = constants.HTYPE_NODE
5988 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5990 (True, False, False): _ROLE_CANDIDATE,
5991 (False, True, False): _ROLE_DRAINED,
5992 (False, False, True): _ROLE_OFFLINE,
5993 (False, False, False): _ROLE_REGULAR,
5995 _R2F = dict((v, k) for k, v in _F2R.items())
5996 _FLAGS = ["master_candidate", "drained", "offline"]
5998 def CheckArguments(self):
5999 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6000 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
6001 self.op.master_capable, self.op.vm_capable,
6002 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
6004 if all_mods.count(None) == len(all_mods):
6005 raise errors.OpPrereqError("Please pass at least one modification",
6007 if all_mods.count(True) > 1:
6008 raise errors.OpPrereqError("Can't set the node into more than one"
6009 " state at the same time",
6012 # Boolean value that tells us whether we might be demoting from MC
6013 self.might_demote = (self.op.master_candidate is False or
6014 self.op.offline is True or
6015 self.op.drained is True or
6016 self.op.master_capable is False)
6018 if self.op.secondary_ip:
6019 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
6020 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
6021 " address" % self.op.secondary_ip,
6024 self.lock_all = self.op.auto_promote and self.might_demote
6025 self.lock_instances = self.op.secondary_ip is not None
6027 def _InstanceFilter(self, instance):
6028 """Filter for getting affected instances.
6031 return (instance.disk_template in constants.DTS_INT_MIRROR and
6032 self.op.node_name in instance.all_nodes)
6034 def ExpandNames(self):
6036 self.needed_locks = {
6037 locking.LEVEL_NODE: locking.ALL_SET,
6039 # Block allocations when all nodes are locked
6040 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
6043 self.needed_locks = {
6044 locking.LEVEL_NODE: self.op.node_name,
6047 # Since modifying a node can have severe effects on currently running
6048 # operations the resource lock is at least acquired in shared mode
6049 self.needed_locks[locking.LEVEL_NODE_RES] = \
6050 self.needed_locks[locking.LEVEL_NODE]
6052 # Get all locks except nodes in shared mode; they are not used for anything
6053 # but read-only access
6054 self.share_locks = _ShareAll()
6055 self.share_locks[locking.LEVEL_NODE] = 0
6056 self.share_locks[locking.LEVEL_NODE_RES] = 0
6057 self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
6059 if self.lock_instances:
6060 self.needed_locks[locking.LEVEL_INSTANCE] = \
6061 frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
6063 def BuildHooksEnv(self):
6066 This runs on the master node.
6070 "OP_TARGET": self.op.node_name,
6071 "MASTER_CANDIDATE": str(self.op.master_candidate),
6072 "OFFLINE": str(self.op.offline),
6073 "DRAINED": str(self.op.drained),
6074 "MASTER_CAPABLE": str(self.op.master_capable),
6075 "VM_CAPABLE": str(self.op.vm_capable),
6078 def BuildHooksNodes(self):
6079 """Build hooks nodes.
6082 nl = [self.cfg.GetMasterNode(), self.op.node_name]
6085 def CheckPrereq(self):
6086 """Check prerequisites.
6088 This only checks the instance list against the existing names.
6091 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
6093 if self.lock_instances:
6094 affected_instances = \
6095 self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
6097 # Verify instance locks
6098 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
6099 wanted_instances = frozenset(affected_instances.keys())
6100 if wanted_instances - owned_instances:
6101 raise errors.OpPrereqError("Instances affected by changing node %s's"
6102 " secondary IP address have changed since"
6103 " locks were acquired, wanted '%s', have"
6104 " '%s'; retry the operation" %
6106 utils.CommaJoin(wanted_instances),
6107 utils.CommaJoin(owned_instances)),
6110 affected_instances = None
6112 if (self.op.master_candidate is not None or
6113 self.op.drained is not None or
6114 self.op.offline is not None):
6115 # we can't change the master's node flags
6116 if self.op.node_name == self.cfg.GetMasterNode():
6117 raise errors.OpPrereqError("The master role can be changed"
6118 " only via master-failover",
6121 if self.op.master_candidate and not node.master_capable:
6122 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
6123 " it a master candidate" % node.name,
6126 if self.op.vm_capable is False:
6127 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
6129 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
6130 " the vm_capable flag" % node.name,
6133 if node.master_candidate and self.might_demote and not self.lock_all:
6134 assert not self.op.auto_promote, "auto_promote set but lock_all not"
6135 # check if after removing the current node, we're missing master
6137 (mc_remaining, mc_should, _) = \
6138 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
6139 if mc_remaining < mc_should:
6140 raise errors.OpPrereqError("Not enough master candidates, please"
6141 " pass auto promote option to allow"
6142 " promotion (--auto-promote or RAPI"
6143 " auto_promote=True)", errors.ECODE_STATE)
6145 self.old_flags = old_flags = (node.master_candidate,
6146 node.drained, node.offline)
6147 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
6148 self.old_role = old_role = self._F2R[old_flags]
6150 # Check for ineffective changes
6151 for attr in self._FLAGS:
6152 if (getattr(self.op, attr) is False and getattr(node, attr) is False):
6153 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
6154 setattr(self.op, attr, None)
6156 # Past this point, any flag change to False means a transition
6157 # away from the respective state, as only real changes are kept
6159 # TODO: We might query the real power state if it supports OOB
6160 if _SupportsOob(self.cfg, node):
6161 if self.op.offline is False and not (node.powered or
6162 self.op.powered is True):
6163 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
6164 " offline status can be reset") %
6165 self.op.node_name, errors.ECODE_STATE)
6166 elif self.op.powered is not None:
6167 raise errors.OpPrereqError(("Unable to change powered state for node %s"
6168 " as it does not support out-of-band"
6169 " handling") % self.op.node_name,
6172 # If we're being deofflined/drained, we'll MC ourself if needed
6173 if (self.op.drained is False or self.op.offline is False or
6174 (self.op.master_capable and not node.master_capable)):
6175 if _DecideSelfPromotion(self):
6176 self.op.master_candidate = True
6177 self.LogInfo("Auto-promoting node to master candidate")
6179 # If we're no longer master capable, we'll demote ourselves from MC
6180 if self.op.master_capable is False and node.master_candidate:
6181 self.LogInfo("Demoting from master candidate")
6182 self.op.master_candidate = False
6185 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
6186 if self.op.master_candidate:
6187 new_role = self._ROLE_CANDIDATE
6188 elif self.op.drained:
6189 new_role = self._ROLE_DRAINED
6190 elif self.op.offline:
6191 new_role = self._ROLE_OFFLINE
6192 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
6193 # False is still in new flags, which means we're un-setting (the
6195 new_role = self._ROLE_REGULAR
6196 else: # no new flags, nothing, keep old role
6199 self.new_role = new_role
6201 if old_role == self._ROLE_OFFLINE and new_role != old_role:
6202 # Trying to transition out of offline status
6203 result = self.rpc.call_version([node.name])[node.name]
6205 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
6206 " to report its version: %s" %
6207 (node.name, result.fail_msg),
6210 self.LogWarning("Transitioning node from offline to online state"
6211 " without using re-add. Please make sure the node"
6214 # When changing the secondary ip, verify if this is a single-homed to
6215 # multi-homed transition or vice versa, and apply the relevant
6217 if self.op.secondary_ip:
6218 # Ok even without locking, because this can't be changed by any LU
6219 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
6220 master_singlehomed = master.secondary_ip == master.primary_ip
6221 if master_singlehomed and self.op.secondary_ip != node.primary_ip:
6222 if self.op.force and node.name == master.name:
6223 self.LogWarning("Transitioning from single-homed to multi-homed"
6224 " cluster; all nodes will require a secondary IP"
6227 raise errors.OpPrereqError("Changing the secondary ip on a"
6228 " single-homed cluster requires the"
6229 " --force option to be passed, and the"
6230 " target node to be the master",
6232 elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
6233 if self.op.force and node.name == master.name:
6234 self.LogWarning("Transitioning from multi-homed to single-homed"
6235 " cluster; secondary IP addresses will have to be"
6238 raise errors.OpPrereqError("Cannot set the secondary IP to be the"
6239 " same as the primary IP on a multi-homed"
6240 " cluster, unless the --force option is"
6241 " passed, and the target node is the"
6242 " master", errors.ECODE_INVAL)
6244 assert not (frozenset(affected_instances) -
6245 self.owned_locks(locking.LEVEL_INSTANCE))
6248 if affected_instances:
6249 msg = ("Cannot change secondary IP address: offline node has"
6250 " instances (%s) configured to use it" %
6251 utils.CommaJoin(affected_instances.keys()))
6252 raise errors.OpPrereqError(msg, errors.ECODE_STATE)
6254 # On online nodes, check that no instances are running, and that
6255 # the node has the new ip and we can reach it.
6256 for instance in affected_instances.values():
6257 _CheckInstanceState(self, instance, INSTANCE_DOWN,
6258 msg="cannot change secondary ip")
6260 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
6261 if master.name != node.name:
6262 # check reachability from master secondary ip to new secondary ip
6263 if not netutils.TcpPing(self.op.secondary_ip,
6264 constants.DEFAULT_NODED_PORT,
6265 source=master.secondary_ip):
6266 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
6267 " based ping to node daemon port",
6268 errors.ECODE_ENVIRON)
6270 if self.op.ndparams:
6271 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
6272 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
6273 self.new_ndparams = new_ndparams
6275 if self.op.hv_state:
6276 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
6277 self.node.hv_state_static)
6279 if self.op.disk_state:
6280 self.new_disk_state = \
6281 _MergeAndVerifyDiskState(self.op.disk_state,
6282 self.node.disk_state_static)
6284 def Exec(self, feedback_fn):
6289 old_role = self.old_role
6290 new_role = self.new_role
6294 if self.op.ndparams:
6295 node.ndparams = self.new_ndparams
6297 if self.op.powered is not None:
6298 node.powered = self.op.powered
6300 if self.op.hv_state:
6301 node.hv_state_static = self.new_hv_state
6303 if self.op.disk_state:
6304 node.disk_state_static = self.new_disk_state
6306 for attr in ["master_capable", "vm_capable"]:
6307 val = getattr(self.op, attr)
6309 setattr(node, attr, val)
6310 result.append((attr, str(val)))
6312 if new_role != old_role:
6313 # Tell the node to demote itself, if no longer MC and not offline
6314 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
6315 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
6317 self.LogWarning("Node failed to demote itself: %s", msg)
6319 new_flags = self._R2F[new_role]
6320 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
6322 result.append((desc, str(nf)))
6323 (node.master_candidate, node.drained, node.offline) = new_flags
6325 # we locked all nodes, we adjust the CP before updating this node
6327 _AdjustCandidatePool(self, [node.name])
6329 if self.op.secondary_ip:
6330 node.secondary_ip = self.op.secondary_ip
6331 result.append(("secondary_ip", self.op.secondary_ip))
6333 # this will trigger configuration file update, if needed
6334 self.cfg.Update(node, feedback_fn)
6336 # this will trigger job queue propagation or cleanup if the mc
6338 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
6339 self.context.ReaddNode(node)
6344 class LUNodePowercycle(NoHooksLU):
6345 """Powercycles a node.
6350 def CheckArguments(self):
6351 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6352 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
6353 raise errors.OpPrereqError("The node is the master and the force"
6354 " parameter was not set",
6357 def ExpandNames(self):
6358 """Locking for PowercycleNode.
6360 This is a last-resort option and shouldn't block on other
6361 jobs. Therefore, we grab no locks.
6364 self.needed_locks = {}
6366 def Exec(self, feedback_fn):
6370 result = self.rpc.call_node_powercycle(self.op.node_name,
6371 self.cfg.GetHypervisorType())
6372 result.Raise("Failed to schedule the reboot")
6373 return result.payload
6376 class LUClusterQuery(NoHooksLU):
6377 """Query cluster configuration.
6382 def ExpandNames(self):
6383 self.needed_locks = {}
6385 def Exec(self, feedback_fn):
6386 """Return cluster config.
6389 cluster = self.cfg.GetClusterInfo()
6392 # Filter just for enabled hypervisors
6393 for os_name, hv_dict in cluster.os_hvp.items():
6394 os_hvp[os_name] = {}
6395 for hv_name, hv_params in hv_dict.items():
6396 if hv_name in cluster.enabled_hypervisors:
6397 os_hvp[os_name][hv_name] = hv_params
6399 # Convert ip_family to ip_version
6400 primary_ip_version = constants.IP4_VERSION
6401 if cluster.primary_ip_family == netutils.IP6Address.family:
6402 primary_ip_version = constants.IP6_VERSION
6405 "software_version": constants.RELEASE_VERSION,
6406 "protocol_version": constants.PROTOCOL_VERSION,
6407 "config_version": constants.CONFIG_VERSION,
6408 "os_api_version": max(constants.OS_API_VERSIONS),
6409 "export_version": constants.EXPORT_VERSION,
6410 "architecture": runtime.GetArchInfo(),
6411 "name": cluster.cluster_name,
6412 "master": cluster.master_node,
6413 "default_hypervisor": cluster.primary_hypervisor,
6414 "enabled_hypervisors": cluster.enabled_hypervisors,
6415 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
6416 for hypervisor_name in cluster.enabled_hypervisors]),
6418 "beparams": cluster.beparams,
6419 "osparams": cluster.osparams,
6420 "ipolicy": cluster.ipolicy,
6421 "nicparams": cluster.nicparams,
6422 "ndparams": cluster.ndparams,
6423 "diskparams": cluster.diskparams,
6424 "candidate_pool_size": cluster.candidate_pool_size,
6425 "master_netdev": cluster.master_netdev,
6426 "master_netmask": cluster.master_netmask,
6427 "use_external_mip_script": cluster.use_external_mip_script,
6428 "volume_group_name": cluster.volume_group_name,
6429 "drbd_usermode_helper": cluster.drbd_usermode_helper,
6430 "file_storage_dir": cluster.file_storage_dir,
6431 "shared_file_storage_dir": cluster.shared_file_storage_dir,
6432 "maintain_node_health": cluster.maintain_node_health,
6433 "ctime": cluster.ctime,
6434 "mtime": cluster.mtime,
6435 "uuid": cluster.uuid,
6436 "tags": list(cluster.GetTags()),
6437 "uid_pool": cluster.uid_pool,
6438 "default_iallocator": cluster.default_iallocator,
6439 "reserved_lvs": cluster.reserved_lvs,
6440 "primary_ip_version": primary_ip_version,
6441 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
6442 "hidden_os": cluster.hidden_os,
6443 "blacklisted_os": cluster.blacklisted_os,
6449 class LUClusterConfigQuery(NoHooksLU):
6450 """Return configuration values.
6455 def CheckArguments(self):
6456 self.cq = _ClusterQuery(None, self.op.output_fields, False)
6458 def ExpandNames(self):
6459 self.cq.ExpandNames(self)
6461 def DeclareLocks(self, level):
6462 self.cq.DeclareLocks(self, level)
6464 def Exec(self, feedback_fn):
6465 result = self.cq.OldStyleQuery(self)
6467 assert len(result) == 1
6472 class _ClusterQuery(_QueryBase):
6473 FIELDS = query.CLUSTER_FIELDS
6475 #: Do not sort (there is only one item)
6478 def ExpandNames(self, lu):
6479 lu.needed_locks = {}
6481 # The following variables interact with _QueryBase._GetNames
6482 self.wanted = locking.ALL_SET
6483 self.do_locking = self.use_locking
6486 raise errors.OpPrereqError("Can not use locking for cluster queries",
6489 def DeclareLocks(self, lu, level):
6492 def _GetQueryData(self, lu):
6493 """Computes the list of nodes and their attributes.
6496 # Locking is not used
6497 assert not (compat.any(lu.glm.is_owned(level)
6498 for level in locking.LEVELS
6499 if level != locking.LEVEL_CLUSTER) or
6500 self.do_locking or self.use_locking)
6502 if query.CQ_CONFIG in self.requested_data:
6503 cluster = lu.cfg.GetClusterInfo()
6505 cluster = NotImplemented
6507 if query.CQ_QUEUE_DRAINED in self.requested_data:
6508 drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
6510 drain_flag = NotImplemented
6512 if query.CQ_WATCHER_PAUSE in self.requested_data:
6513 master_name = lu.cfg.GetMasterNode()
6515 result = lu.rpc.call_get_watcher_pause(master_name)
6516 result.Raise("Can't retrieve watcher pause from master node '%s'" %
6519 watcher_pause = result.payload
6521 watcher_pause = NotImplemented
6523 return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
6526 class LUInstanceActivateDisks(NoHooksLU):
6527 """Bring up an instance's disks.
6532 def ExpandNames(self):
6533 self._ExpandAndLockInstance()
6534 self.needed_locks[locking.LEVEL_NODE] = []
6535 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6537 def DeclareLocks(self, level):
6538 if level == locking.LEVEL_NODE:
6539 self._LockInstancesNodes()
6541 def CheckPrereq(self):
6542 """Check prerequisites.
6544 This checks that the instance is in the cluster.
6547 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6548 assert self.instance is not None, \
6549 "Cannot retrieve locked instance %s" % self.op.instance_name
6550 _CheckNodeOnline(self, self.instance.primary_node)
6552 def Exec(self, feedback_fn):
6553 """Activate the disks.
6556 disks_ok, disks_info = \
6557 _AssembleInstanceDisks(self, self.instance,
6558 ignore_size=self.op.ignore_size)
6560 raise errors.OpExecError("Cannot activate block devices")
6562 if self.op.wait_for_sync:
6563 if not _WaitForSync(self, self.instance):
6564 raise errors.OpExecError("Some disks of the instance are degraded!")
6569 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
6571 """Prepare the block devices for an instance.
6573 This sets up the block devices on all nodes.
6575 @type lu: L{LogicalUnit}
6576 @param lu: the logical unit on whose behalf we execute
6577 @type instance: L{objects.Instance}
6578 @param instance: the instance for whose disks we assemble
6579 @type disks: list of L{objects.Disk} or None
6580 @param disks: which disks to assemble (or all, if None)
6581 @type ignore_secondaries: boolean
6582 @param ignore_secondaries: if true, errors on secondary nodes
6583 won't result in an error return from the function
6584 @type ignore_size: boolean
6585 @param ignore_size: if true, the current known size of the disk
6586 will not be used during the disk activation, useful for cases
6587 when the size is wrong
6588 @return: False if the operation failed, otherwise a list of
6589 (host, instance_visible_name, node_visible_name)
6590 with the mapping from node devices to instance devices
6595 iname = instance.name
6596 disks = _ExpandCheckDisks(instance, disks)
6598 # With the two passes mechanism we try to reduce the window of
6599 # opportunity for the race condition of switching DRBD to primary
6600 # before handshaking occured, but we do not eliminate it
6602 # The proper fix would be to wait (with some limits) until the
6603 # connection has been made and drbd transitions from WFConnection
6604 # into any other network-connected state (Connected, SyncTarget,
6607 # 1st pass, assemble on all nodes in secondary mode
6608 for idx, inst_disk in enumerate(disks):
6609 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6611 node_disk = node_disk.Copy()
6612 node_disk.UnsetSize()
6613 lu.cfg.SetDiskID(node_disk, node)
6614 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6616 msg = result.fail_msg
6618 is_offline_secondary = (node in instance.secondary_nodes and
6620 lu.LogWarning("Could not prepare block device %s on node %s"
6621 " (is_primary=False, pass=1): %s",
6622 inst_disk.iv_name, node, msg)
6623 if not (ignore_secondaries or is_offline_secondary):
6626 # FIXME: race condition on drbd migration to primary
6628 # 2nd pass, do only the primary node
6629 for idx, inst_disk in enumerate(disks):
6632 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6633 if node != instance.primary_node:
6636 node_disk = node_disk.Copy()
6637 node_disk.UnsetSize()
6638 lu.cfg.SetDiskID(node_disk, node)
6639 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6641 msg = result.fail_msg
6643 lu.LogWarning("Could not prepare block device %s on node %s"
6644 " (is_primary=True, pass=2): %s",
6645 inst_disk.iv_name, node, msg)
6648 dev_path = result.payload
6650 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
6652 # leave the disks configured for the primary node
6653 # this is a workaround that would be fixed better by
6654 # improving the logical/physical id handling
6656 lu.cfg.SetDiskID(disk, instance.primary_node)
6658 return disks_ok, device_info
6661 def _StartInstanceDisks(lu, instance, force):
6662 """Start the disks of an instance.
6665 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
6666 ignore_secondaries=force)
6668 _ShutdownInstanceDisks(lu, instance)
6669 if force is not None and not force:
6671 hint=("If the message above refers to a secondary node,"
6672 " you can retry the operation using '--force'"))
6673 raise errors.OpExecError("Disk consistency error")
6676 class LUInstanceDeactivateDisks(NoHooksLU):
6677 """Shutdown an instance's disks.
6682 def ExpandNames(self):
6683 self._ExpandAndLockInstance()
6684 self.needed_locks[locking.LEVEL_NODE] = []
6685 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6687 def DeclareLocks(self, level):
6688 if level == locking.LEVEL_NODE:
6689 self._LockInstancesNodes()
6691 def CheckPrereq(self):
6692 """Check prerequisites.
6694 This checks that the instance is in the cluster.
6697 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6698 assert self.instance is not None, \
6699 "Cannot retrieve locked instance %s" % self.op.instance_name
6701 def Exec(self, feedback_fn):
6702 """Deactivate the disks
6705 instance = self.instance
6707 _ShutdownInstanceDisks(self, instance)
6709 _SafeShutdownInstanceDisks(self, instance)
6712 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
6713 """Shutdown block devices of an instance.
6715 This function checks if an instance is running, before calling
6716 _ShutdownInstanceDisks.
6719 _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
6720 _ShutdownInstanceDisks(lu, instance, disks=disks)
6723 def _ExpandCheckDisks(instance, disks):
6724 """Return the instance disks selected by the disks list
6726 @type disks: list of L{objects.Disk} or None
6727 @param disks: selected disks
6728 @rtype: list of L{objects.Disk}
6729 @return: selected instance disks to act on
6733 return instance.disks
6735 if not set(disks).issubset(instance.disks):
6736 raise errors.ProgrammerError("Can only act on disks belonging to the"
6741 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
6742 """Shutdown block devices of an instance.
6744 This does the shutdown on all nodes of the instance.
6746 If the ignore_primary is false, errors on the primary node are
6751 disks = _ExpandCheckDisks(instance, disks)
6754 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
6755 lu.cfg.SetDiskID(top_disk, node)
6756 result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
6757 msg = result.fail_msg
6759 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
6760 disk.iv_name, node, msg)
6761 if ((node == instance.primary_node and not ignore_primary) or
6762 (node != instance.primary_node and not result.offline)):
6767 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
6768 """Checks if a node has enough free memory.
6770 This function checks if a given node has the needed amount of free
6771 memory. In case the node has less memory or we cannot get the
6772 information from the node, this function raises an OpPrereqError
6775 @type lu: C{LogicalUnit}
6776 @param lu: a logical unit from which we get configuration data
6778 @param node: the node to check
6779 @type reason: C{str}
6780 @param reason: string to use in the error message
6781 @type requested: C{int}
6782 @param requested: the amount of memory in MiB to check for
6783 @type hypervisor_name: C{str}
6784 @param hypervisor_name: the hypervisor to ask for memory stats
6786 @return: node current free memory
6787 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
6788 we cannot check the node
6791 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6792 nodeinfo[node].Raise("Can't get data from node %s" % node,
6793 prereq=True, ecode=errors.ECODE_ENVIRON)
6794 (_, _, (hv_info, )) = nodeinfo[node].payload
6796 free_mem = hv_info.get("memory_free", None)
6797 if not isinstance(free_mem, int):
6798 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6799 " was '%s'" % (node, free_mem),
6800 errors.ECODE_ENVIRON)
6801 if requested > free_mem:
6802 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
6803 " needed %s MiB, available %s MiB" %
6804 (node, reason, requested, free_mem),
6809 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
6810 """Checks if nodes have enough free disk space in all the VGs.
6812 This function checks if all given nodes have the needed amount of
6813 free disk. In case any node has less disk or we cannot get the
6814 information from the node, this function raises an OpPrereqError
6817 @type lu: C{LogicalUnit}
6818 @param lu: a logical unit from which we get configuration data
6819 @type nodenames: C{list}
6820 @param nodenames: the list of node names to check
6821 @type req_sizes: C{dict}
6822 @param req_sizes: the hash of vg and corresponding amount of disk in
6824 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6825 or we cannot check the node
6828 for vg, req_size in req_sizes.items():
6829 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
6832 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
6833 """Checks if nodes have enough free disk space in the specified VG.
6835 This function checks if all given nodes have the needed amount of
6836 free disk. In case any node has less disk or we cannot get the
6837 information from the node, this function raises an OpPrereqError
6840 @type lu: C{LogicalUnit}
6841 @param lu: a logical unit from which we get configuration data
6842 @type nodenames: C{list}
6843 @param nodenames: the list of node names to check
6845 @param vg: the volume group to check
6846 @type requested: C{int}
6847 @param requested: the amount of disk in MiB to check for
6848 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6849 or we cannot check the node
6852 nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6853 for node in nodenames:
6854 info = nodeinfo[node]
6855 info.Raise("Cannot get current information from node %s" % node,
6856 prereq=True, ecode=errors.ECODE_ENVIRON)
6857 (_, (vg_info, ), _) = info.payload
6858 vg_free = vg_info.get("vg_free", None)
6859 if not isinstance(vg_free, int):
6860 raise errors.OpPrereqError("Can't compute free disk space on node"
6861 " %s for vg %s, result was '%s'" %
6862 (node, vg, vg_free), errors.ECODE_ENVIRON)
6863 if requested > vg_free:
6864 raise errors.OpPrereqError("Not enough disk space on target node %s"
6865 " vg %s: required %d MiB, available %d MiB" %
6866 (node, vg, requested, vg_free),
6870 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
6871 """Checks if nodes have enough physical CPUs
6873 This function checks if all given nodes have the needed number of
6874 physical CPUs. In case any node has less CPUs or we cannot get the
6875 information from the node, this function raises an OpPrereqError
6878 @type lu: C{LogicalUnit}
6879 @param lu: a logical unit from which we get configuration data
6880 @type nodenames: C{list}
6881 @param nodenames: the list of node names to check
6882 @type requested: C{int}
6883 @param requested: the minimum acceptable number of physical CPUs
6884 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
6885 or we cannot check the node
6888 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6889 for node in nodenames:
6890 info = nodeinfo[node]
6891 info.Raise("Cannot get current information from node %s" % node,
6892 prereq=True, ecode=errors.ECODE_ENVIRON)
6893 (_, _, (hv_info, )) = info.payload
6894 num_cpus = hv_info.get("cpu_total", None)
6895 if not isinstance(num_cpus, int):
6896 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6897 " on node %s, result was '%s'" %
6898 (node, num_cpus), errors.ECODE_ENVIRON)
6899 if requested > num_cpus:
6900 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
6901 "required" % (node, num_cpus, requested),
6905 class LUInstanceStartup(LogicalUnit):
6906 """Starts an instance.
6909 HPATH = "instance-start"
6910 HTYPE = constants.HTYPE_INSTANCE
6913 def CheckArguments(self):
6915 if self.op.beparams:
6916 # fill the beparams dict
6917 objects.UpgradeBeParams(self.op.beparams)
6918 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6920 def ExpandNames(self):
6921 self._ExpandAndLockInstance()
6922 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
6924 def DeclareLocks(self, level):
6925 if level == locking.LEVEL_NODE_RES:
6926 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
6928 def BuildHooksEnv(self):
6931 This runs on master, primary and secondary nodes of the instance.
6935 "FORCE": self.op.force,
6938 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6942 def BuildHooksNodes(self):
6943 """Build hooks nodes.
6946 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6949 def CheckPrereq(self):
6950 """Check prerequisites.
6952 This checks that the instance is in the cluster.
6955 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6956 assert self.instance is not None, \
6957 "Cannot retrieve locked instance %s" % self.op.instance_name
6960 if self.op.hvparams:
6961 # check hypervisor parameter syntax (locally)
6962 cluster = self.cfg.GetClusterInfo()
6963 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6964 filled_hvp = cluster.FillHV(instance)
6965 filled_hvp.update(self.op.hvparams)
6966 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
6967 hv_type.CheckParameterSyntax(filled_hvp)
6968 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
6970 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6972 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
6974 if self.primary_offline and self.op.ignore_offline_nodes:
6975 self.LogWarning("Ignoring offline primary node")
6977 if self.op.hvparams or self.op.beparams:
6978 self.LogWarning("Overridden parameters are ignored")
6980 _CheckNodeOnline(self, instance.primary_node)
6982 bep = self.cfg.GetClusterInfo().FillBE(instance)
6983 bep.update(self.op.beparams)
6985 # check bridges existence
6986 _CheckInstanceBridgesExist(self, instance)
6988 remote_info = self.rpc.call_instance_info(instance.primary_node,
6990 instance.hypervisor)
6991 remote_info.Raise("Error checking node %s" % instance.primary_node,
6992 prereq=True, ecode=errors.ECODE_ENVIRON)
6993 if not remote_info.payload: # not running already
6994 _CheckNodeFreeMemory(self, instance.primary_node,
6995 "starting instance %s" % instance.name,
6996 bep[constants.BE_MINMEM], instance.hypervisor)
6998 def Exec(self, feedback_fn):
6999 """Start the instance.
7002 instance = self.instance
7003 force = self.op.force
7005 if not self.op.no_remember:
7006 self.cfg.MarkInstanceUp(instance.name)
7008 if self.primary_offline:
7009 assert self.op.ignore_offline_nodes
7010 self.LogInfo("Primary node offline, marked instance as started")
7012 node_current = instance.primary_node
7014 _StartInstanceDisks(self, instance, force)
7017 self.rpc.call_instance_start(node_current,
7018 (instance, self.op.hvparams,
7020 self.op.startup_paused)
7021 msg = result.fail_msg
7023 _ShutdownInstanceDisks(self, instance)
7024 raise errors.OpExecError("Could not start instance: %s" % msg)
7027 class LUInstanceReboot(LogicalUnit):
7028 """Reboot an instance.
7031 HPATH = "instance-reboot"
7032 HTYPE = constants.HTYPE_INSTANCE
7035 def ExpandNames(self):
7036 self._ExpandAndLockInstance()
7038 def BuildHooksEnv(self):
7041 This runs on master, primary and secondary nodes of the instance.
7045 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
7046 "REBOOT_TYPE": self.op.reboot_type,
7047 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7050 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7054 def BuildHooksNodes(self):
7055 """Build hooks nodes.
7058 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7061 def CheckPrereq(self):
7062 """Check prerequisites.
7064 This checks that the instance is in the cluster.
7067 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7068 assert self.instance is not None, \
7069 "Cannot retrieve locked instance %s" % self.op.instance_name
7070 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
7071 _CheckNodeOnline(self, instance.primary_node)
7073 # check bridges existence
7074 _CheckInstanceBridgesExist(self, instance)
7076 def Exec(self, feedback_fn):
7077 """Reboot the instance.
7080 instance = self.instance
7081 ignore_secondaries = self.op.ignore_secondaries
7082 reboot_type = self.op.reboot_type
7084 remote_info = self.rpc.call_instance_info(instance.primary_node,
7086 instance.hypervisor)
7087 remote_info.Raise("Error checking node %s" % instance.primary_node)
7088 instance_running = bool(remote_info.payload)
7090 node_current = instance.primary_node
7092 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
7093 constants.INSTANCE_REBOOT_HARD]:
7094 for disk in instance.disks:
7095 self.cfg.SetDiskID(disk, node_current)
7096 result = self.rpc.call_instance_reboot(node_current, instance,
7098 self.op.shutdown_timeout)
7099 result.Raise("Could not reboot instance")
7101 if instance_running:
7102 result = self.rpc.call_instance_shutdown(node_current, instance,
7103 self.op.shutdown_timeout)
7104 result.Raise("Could not shutdown instance for full reboot")
7105 _ShutdownInstanceDisks(self, instance)
7107 self.LogInfo("Instance %s was already stopped, starting now",
7109 _StartInstanceDisks(self, instance, ignore_secondaries)
7110 result = self.rpc.call_instance_start(node_current,
7111 (instance, None, None), False)
7112 msg = result.fail_msg
7114 _ShutdownInstanceDisks(self, instance)
7115 raise errors.OpExecError("Could not start instance for"
7116 " full reboot: %s" % msg)
7118 self.cfg.MarkInstanceUp(instance.name)
7121 class LUInstanceShutdown(LogicalUnit):
7122 """Shutdown an instance.
7125 HPATH = "instance-stop"
7126 HTYPE = constants.HTYPE_INSTANCE
7129 def ExpandNames(self):
7130 self._ExpandAndLockInstance()
7132 def BuildHooksEnv(self):
7135 This runs on master, primary and secondary nodes of the instance.
7138 env = _BuildInstanceHookEnvByObject(self, self.instance)
7139 env["TIMEOUT"] = self.op.timeout
7142 def BuildHooksNodes(self):
7143 """Build hooks nodes.
7146 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7149 def CheckPrereq(self):
7150 """Check prerequisites.
7152 This checks that the instance is in the cluster.
7155 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7156 assert self.instance is not None, \
7157 "Cannot retrieve locked instance %s" % self.op.instance_name
7159 if not self.op.force:
7160 _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
7162 self.LogWarning("Ignoring offline instance check")
7164 self.primary_offline = \
7165 self.cfg.GetNodeInfo(self.instance.primary_node).offline
7167 if self.primary_offline and self.op.ignore_offline_nodes:
7168 self.LogWarning("Ignoring offline primary node")
7170 _CheckNodeOnline(self, self.instance.primary_node)
7172 def Exec(self, feedback_fn):
7173 """Shutdown the instance.
7176 instance = self.instance
7177 node_current = instance.primary_node
7178 timeout = self.op.timeout
7180 # If the instance is offline we shouldn't mark it as down, as that
7181 # resets the offline flag.
7182 if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
7183 self.cfg.MarkInstanceDown(instance.name)
7185 if self.primary_offline:
7186 assert self.op.ignore_offline_nodes
7187 self.LogInfo("Primary node offline, marked instance as stopped")
7189 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
7190 msg = result.fail_msg
7192 self.LogWarning("Could not shutdown instance: %s", msg)
7194 _ShutdownInstanceDisks(self, instance)
7197 class LUInstanceReinstall(LogicalUnit):
7198 """Reinstall an instance.
7201 HPATH = "instance-reinstall"
7202 HTYPE = constants.HTYPE_INSTANCE
7205 def ExpandNames(self):
7206 self._ExpandAndLockInstance()
7208 def BuildHooksEnv(self):
7211 This runs on master, primary and secondary nodes of the instance.
7214 return _BuildInstanceHookEnvByObject(self, self.instance)
7216 def BuildHooksNodes(self):
7217 """Build hooks nodes.
7220 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7223 def CheckPrereq(self):
7224 """Check prerequisites.
7226 This checks that the instance is in the cluster and is not running.
7229 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7230 assert instance is not None, \
7231 "Cannot retrieve locked instance %s" % self.op.instance_name
7232 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
7233 " offline, cannot reinstall")
7235 if instance.disk_template == constants.DT_DISKLESS:
7236 raise errors.OpPrereqError("Instance '%s' has no disks" %
7237 self.op.instance_name,
7239 _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
7241 if self.op.os_type is not None:
7243 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
7244 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
7245 instance_os = self.op.os_type
7247 instance_os = instance.os
7249 nodelist = list(instance.all_nodes)
7251 if self.op.osparams:
7252 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
7253 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
7254 self.os_inst = i_osdict # the new dict (without defaults)
7258 self.instance = instance
7260 def Exec(self, feedback_fn):
7261 """Reinstall the instance.
7264 inst = self.instance
7266 if self.op.os_type is not None:
7267 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
7268 inst.os = self.op.os_type
7269 # Write to configuration
7270 self.cfg.Update(inst, feedback_fn)
7272 _StartInstanceDisks(self, inst, None)
7274 feedback_fn("Running the instance OS create scripts...")
7275 # FIXME: pass debug option from opcode to backend
7276 result = self.rpc.call_instance_os_add(inst.primary_node,
7277 (inst, self.os_inst), True,
7278 self.op.debug_level)
7279 result.Raise("Could not install OS for instance %s on node %s" %
7280 (inst.name, inst.primary_node))
7282 _ShutdownInstanceDisks(self, inst)
7285 class LUInstanceRecreateDisks(LogicalUnit):
7286 """Recreate an instance's missing disks.
7289 HPATH = "instance-recreate-disks"
7290 HTYPE = constants.HTYPE_INSTANCE
7293 _MODIFYABLE = compat.UniqueFrozenset([
7294 constants.IDISK_SIZE,
7295 constants.IDISK_MODE,
7298 # New or changed disk parameters may have different semantics
7299 assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
7300 constants.IDISK_ADOPT,
7302 # TODO: Implement support changing VG while recreating
7304 constants.IDISK_METAVG,
7305 constants.IDISK_PROVIDER,
7308 def _RunAllocator(self):
7309 """Run the allocator based on input opcode.
7312 be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
7315 # The allocator should actually run in "relocate" mode, but current
7316 # allocators don't support relocating all the nodes of an instance at
7317 # the same time. As a workaround we use "allocate" mode, but this is
7318 # suboptimal for two reasons:
7319 # - The instance name passed to the allocator is present in the list of
7320 # existing instances, so there could be a conflict within the
7321 # internal structures of the allocator. This doesn't happen with the
7322 # current allocators, but it's a liability.
7323 # - The allocator counts the resources used by the instance twice: once
7324 # because the instance exists already, and once because it tries to
7325 # allocate a new instance.
7326 # The allocator could choose some of the nodes on which the instance is
7327 # running, but that's not a problem. If the instance nodes are broken,
7328 # they should be already be marked as drained or offline, and hence
7329 # skipped by the allocator. If instance disks have been lost for other
7330 # reasons, then recreating the disks on the same nodes should be fine.
7331 disk_template = self.instance.disk_template
7332 spindle_use = be_full[constants.BE_SPINDLE_USE]
7333 req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
7334 disk_template=disk_template,
7335 tags=list(self.instance.GetTags()),
7336 os=self.instance.os,
7338 vcpus=be_full[constants.BE_VCPUS],
7339 memory=be_full[constants.BE_MAXMEM],
7340 spindle_use=spindle_use,
7341 disks=[{constants.IDISK_SIZE: d.size,
7342 constants.IDISK_MODE: d.mode}
7343 for d in self.instance.disks],
7344 hypervisor=self.instance.hypervisor)
7345 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
7347 ial.Run(self.op.iallocator)
7349 assert req.RequiredNodes() == len(self.instance.all_nodes)
7352 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7353 " %s" % (self.op.iallocator, ial.info),
7356 self.op.nodes = ial.result
7357 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7358 self.op.instance_name, self.op.iallocator,
7359 utils.CommaJoin(ial.result))
7361 def CheckArguments(self):
7362 if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
7363 # Normalize and convert deprecated list of disk indices
7364 self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
7366 duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
7368 raise errors.OpPrereqError("Some disks have been specified more than"
7369 " once: %s" % utils.CommaJoin(duplicates),
7372 # We don't want _CheckIAllocatorOrNode selecting the default iallocator
7373 # when neither iallocator nor nodes are specified
7374 if self.op.iallocator or self.op.nodes:
7375 _CheckIAllocatorOrNode(self, "iallocator", "nodes")
7377 for (idx, params) in self.op.disks:
7378 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
7379 unsupported = frozenset(params.keys()) - self._MODIFYABLE
7381 raise errors.OpPrereqError("Parameters for disk %s try to change"
7382 " unmodifyable parameter(s): %s" %
7383 (idx, utils.CommaJoin(unsupported)),
7386 def ExpandNames(self):
7387 self._ExpandAndLockInstance()
7388 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7391 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
7392 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
7394 self.needed_locks[locking.LEVEL_NODE] = []
7395 if self.op.iallocator:
7396 # iallocator will select a new node in the same group
7397 self.needed_locks[locking.LEVEL_NODEGROUP] = []
7398 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
7400 self.needed_locks[locking.LEVEL_NODE_RES] = []
7402 def DeclareLocks(self, level):
7403 if level == locking.LEVEL_NODEGROUP:
7404 assert self.op.iallocator is not None
7405 assert not self.op.nodes
7406 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
7407 self.share_locks[locking.LEVEL_NODEGROUP] = 1
7408 # Lock the primary group used by the instance optimistically; this
7409 # requires going via the node before it's locked, requiring
7410 # verification later on
7411 self.needed_locks[locking.LEVEL_NODEGROUP] = \
7412 self.cfg.GetInstanceNodeGroups(self.op.instance_name, primary_only=True)
7414 elif level == locking.LEVEL_NODE:
7415 # If an allocator is used, then we lock all the nodes in the current
7416 # instance group, as we don't know yet which ones will be selected;
7417 # if we replace the nodes without using an allocator, locks are
7418 # already declared in ExpandNames; otherwise, we need to lock all the
7419 # instance nodes for disk re-creation
7420 if self.op.iallocator:
7421 assert not self.op.nodes
7422 assert not self.needed_locks[locking.LEVEL_NODE]
7423 assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
7425 # Lock member nodes of the group of the primary node
7426 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
7427 self.needed_locks[locking.LEVEL_NODE].extend(
7428 self.cfg.GetNodeGroup(group_uuid).members)
7430 assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
7431 elif not self.op.nodes:
7432 self._LockInstancesNodes(primary_only=False)
7433 elif level == locking.LEVEL_NODE_RES:
7435 self.needed_locks[locking.LEVEL_NODE_RES] = \
7436 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7438 def BuildHooksEnv(self):
7441 This runs on master, primary and secondary nodes of the instance.
7444 return _BuildInstanceHookEnvByObject(self, self.instance)
7446 def BuildHooksNodes(self):
7447 """Build hooks nodes.
7450 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7453 def CheckPrereq(self):
7454 """Check prerequisites.
7456 This checks that the instance is in the cluster and is not running.
7459 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7460 assert instance is not None, \
7461 "Cannot retrieve locked instance %s" % self.op.instance_name
7463 if len(self.op.nodes) != len(instance.all_nodes):
7464 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
7465 " %d replacement nodes were specified" %
7466 (instance.name, len(instance.all_nodes),
7467 len(self.op.nodes)),
7469 assert instance.disk_template != constants.DT_DRBD8 or \
7470 len(self.op.nodes) == 2
7471 assert instance.disk_template != constants.DT_PLAIN or \
7472 len(self.op.nodes) == 1
7473 primary_node = self.op.nodes[0]
7475 primary_node = instance.primary_node
7476 if not self.op.iallocator:
7477 _CheckNodeOnline(self, primary_node)
7479 if instance.disk_template == constants.DT_DISKLESS:
7480 raise errors.OpPrereqError("Instance '%s' has no disks" %
7481 self.op.instance_name, errors.ECODE_INVAL)
7483 # Verify if node group locks are still correct
7484 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
7486 # Node group locks are acquired only for the primary node (and only
7487 # when the allocator is used)
7488 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
7491 # if we replace nodes *and* the old primary is offline, we don't
7492 # check the instance state
7493 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
7494 if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline):
7495 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7496 msg="cannot recreate disks")
7499 self.disks = dict(self.op.disks)
7501 self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
7503 maxidx = max(self.disks.keys())
7504 if maxidx >= len(instance.disks):
7505 raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
7508 if ((self.op.nodes or self.op.iallocator) and
7509 sorted(self.disks.keys()) != range(len(instance.disks))):
7510 raise errors.OpPrereqError("Can't recreate disks partially and"
7511 " change the nodes at the same time",
7514 self.instance = instance
7516 if self.op.iallocator:
7517 self._RunAllocator()
7518 # Release unneeded node and node resource locks
7519 _ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
7520 _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
7521 _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
7523 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
7525 def Exec(self, feedback_fn):
7526 """Recreate the disks.
7529 instance = self.instance
7531 assert (self.owned_locks(locking.LEVEL_NODE) ==
7532 self.owned_locks(locking.LEVEL_NODE_RES))
7535 mods = [] # keeps track of needed changes
7537 for idx, disk in enumerate(instance.disks):
7539 changes = self.disks[idx]
7541 # Disk should not be recreated
7545 # update secondaries for disks, if needed
7546 if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
7547 # need to update the nodes and minors
7548 assert len(self.op.nodes) == 2
7549 assert len(disk.logical_id) == 6 # otherwise disk internals
7551 (_, _, old_port, _, _, old_secret) = disk.logical_id
7552 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
7553 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
7554 new_minors[0], new_minors[1], old_secret)
7555 assert len(disk.logical_id) == len(new_id)
7559 mods.append((idx, new_id, changes))
7561 # now that we have passed all asserts above, we can apply the mods
7562 # in a single run (to avoid partial changes)
7563 for idx, new_id, changes in mods:
7564 disk = instance.disks[idx]
7565 if new_id is not None:
7566 assert disk.dev_type == constants.LD_DRBD8
7567 disk.logical_id = new_id
7569 disk.Update(size=changes.get(constants.IDISK_SIZE, None),
7570 mode=changes.get(constants.IDISK_MODE, None))
7572 # change primary node, if needed
7574 instance.primary_node = self.op.nodes[0]
7575 self.LogWarning("Changing the instance's nodes, you will have to"
7576 " remove any disks left on the older nodes manually")
7579 self.cfg.Update(instance, feedback_fn)
7581 # All touched nodes must be locked
7582 mylocks = self.owned_locks(locking.LEVEL_NODE)
7583 assert mylocks.issuperset(frozenset(instance.all_nodes))
7584 _CreateDisks(self, instance, to_skip=to_skip)
7587 class LUInstanceRename(LogicalUnit):
7588 """Rename an instance.
7591 HPATH = "instance-rename"
7592 HTYPE = constants.HTYPE_INSTANCE
7594 def CheckArguments(self):
7598 if self.op.ip_check and not self.op.name_check:
7599 # TODO: make the ip check more flexible and not depend on the name check
7600 raise errors.OpPrereqError("IP address check requires a name check",
7603 def BuildHooksEnv(self):
7606 This runs on master, primary and secondary nodes of the instance.
7609 env = _BuildInstanceHookEnvByObject(self, self.instance)
7610 env["INSTANCE_NEW_NAME"] = self.op.new_name
7613 def BuildHooksNodes(self):
7614 """Build hooks nodes.
7617 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7620 def CheckPrereq(self):
7621 """Check prerequisites.
7623 This checks that the instance is in the cluster and is not running.
7626 self.op.instance_name = _ExpandInstanceName(self.cfg,
7627 self.op.instance_name)
7628 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7629 assert instance is not None
7630 _CheckNodeOnline(self, instance.primary_node)
7631 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7632 msg="cannot rename")
7633 self.instance = instance
7635 new_name = self.op.new_name
7636 if self.op.name_check:
7637 hostname = _CheckHostnameSane(self, new_name)
7638 new_name = self.op.new_name = hostname.name
7639 if (self.op.ip_check and
7640 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
7641 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7642 (hostname.ip, new_name),
7643 errors.ECODE_NOTUNIQUE)
7645 instance_list = self.cfg.GetInstanceList()
7646 if new_name in instance_list and new_name != instance.name:
7647 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7648 new_name, errors.ECODE_EXISTS)
7650 def Exec(self, feedback_fn):
7651 """Rename the instance.
7654 inst = self.instance
7655 old_name = inst.name
7657 rename_file_storage = False
7658 if (inst.disk_template in constants.DTS_FILEBASED and
7659 self.op.new_name != inst.name):
7660 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7661 rename_file_storage = True
7663 self.cfg.RenameInstance(inst.name, self.op.new_name)
7664 # Change the instance lock. This is definitely safe while we hold the BGL.
7665 # Otherwise the new lock would have to be added in acquired mode.
7667 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
7668 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
7669 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
7671 # re-read the instance from the configuration after rename
7672 inst = self.cfg.GetInstanceInfo(self.op.new_name)
7674 if rename_file_storage:
7675 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7676 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
7677 old_file_storage_dir,
7678 new_file_storage_dir)
7679 result.Raise("Could not rename on node %s directory '%s' to '%s'"
7680 " (but the instance has been renamed in Ganeti)" %
7681 (inst.primary_node, old_file_storage_dir,
7682 new_file_storage_dir))
7684 _StartInstanceDisks(self, inst, None)
7685 # update info on disks
7686 info = _GetInstanceInfoText(inst)
7687 for (idx, disk) in enumerate(inst.disks):
7688 for node in inst.all_nodes:
7689 self.cfg.SetDiskID(disk, node)
7690 result = self.rpc.call_blockdev_setinfo(node, disk, info)
7692 self.LogWarning("Error setting info on node %s for disk %s: %s",
7693 node, idx, result.fail_msg)
7695 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
7696 old_name, self.op.debug_level)
7697 msg = result.fail_msg
7699 msg = ("Could not run OS rename script for instance %s on node %s"
7700 " (but the instance has been renamed in Ganeti): %s" %
7701 (inst.name, inst.primary_node, msg))
7702 self.LogWarning(msg)
7704 _ShutdownInstanceDisks(self, inst)
7709 class LUInstanceRemove(LogicalUnit):
7710 """Remove an instance.
7713 HPATH = "instance-remove"
7714 HTYPE = constants.HTYPE_INSTANCE
7717 def ExpandNames(self):
7718 self._ExpandAndLockInstance()
7719 self.needed_locks[locking.LEVEL_NODE] = []
7720 self.needed_locks[locking.LEVEL_NODE_RES] = []
7721 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7723 def DeclareLocks(self, level):
7724 if level == locking.LEVEL_NODE:
7725 self._LockInstancesNodes()
7726 elif level == locking.LEVEL_NODE_RES:
7728 self.needed_locks[locking.LEVEL_NODE_RES] = \
7729 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
7731 def BuildHooksEnv(self):
7734 This runs on master, primary and secondary nodes of the instance.
7737 env = _BuildInstanceHookEnvByObject(self, self.instance)
7738 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
7741 def BuildHooksNodes(self):
7742 """Build hooks nodes.
7745 nl = [self.cfg.GetMasterNode()]
7746 nl_post = list(self.instance.all_nodes) + nl
7747 return (nl, nl_post)
7749 def CheckPrereq(self):
7750 """Check prerequisites.
7752 This checks that the instance is in the cluster.
7755 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7756 assert self.instance is not None, \
7757 "Cannot retrieve locked instance %s" % self.op.instance_name
7759 def Exec(self, feedback_fn):
7760 """Remove the instance.
7763 instance = self.instance
7764 logging.info("Shutting down instance %s on node %s",
7765 instance.name, instance.primary_node)
7767 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
7768 self.op.shutdown_timeout)
7769 msg = result.fail_msg
7771 if self.op.ignore_failures:
7772 feedback_fn("Warning: can't shutdown instance: %s" % msg)
7774 raise errors.OpExecError("Could not shutdown instance %s on"
7776 (instance.name, instance.primary_node, msg))
7778 assert (self.owned_locks(locking.LEVEL_NODE) ==
7779 self.owned_locks(locking.LEVEL_NODE_RES))
7780 assert not (set(instance.all_nodes) -
7781 self.owned_locks(locking.LEVEL_NODE)), \
7782 "Not owning correct locks"
7784 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
7787 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
7788 """Utility function to remove an instance.
7791 logging.info("Removing block devices for instance %s", instance.name)
7793 if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
7794 if not ignore_failures:
7795 raise errors.OpExecError("Can't remove instance's disks")
7796 feedback_fn("Warning: can't remove instance's disks")
7798 logging.info("Removing instance %s out of cluster config", instance.name)
7800 lu.cfg.RemoveInstance(instance.name)
7802 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
7803 "Instance lock removal conflict"
7805 # Remove lock for the instance
7806 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
7809 class LUInstanceQuery(NoHooksLU):
7810 """Logical unit for querying instances.
7813 # pylint: disable=W0142
7816 def CheckArguments(self):
7817 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
7818 self.op.output_fields, self.op.use_locking)
7820 def ExpandNames(self):
7821 self.iq.ExpandNames(self)
7823 def DeclareLocks(self, level):
7824 self.iq.DeclareLocks(self, level)
7826 def Exec(self, feedback_fn):
7827 return self.iq.OldStyleQuery(self)
7830 def _ExpandNamesForMigration(lu):
7831 """Expands names for use with L{TLMigrateInstance}.
7833 @type lu: L{LogicalUnit}
7836 if lu.op.target_node is not None:
7837 lu.op.target_node = _ExpandNodeName(lu.cfg, lu.op.target_node)
7839 lu.needed_locks[locking.LEVEL_NODE] = []
7840 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7842 lu.needed_locks[locking.LEVEL_NODE_RES] = []
7843 lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
7845 # The node allocation lock is actually only needed for replicated instances
7846 # (e.g. DRBD8) and if an iallocator is used.
7847 lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
7850 def _DeclareLocksForMigration(lu, level):
7851 """Declares locks for L{TLMigrateInstance}.
7853 @type lu: L{LogicalUnit}
7854 @param level: Lock level
7857 if level == locking.LEVEL_NODE_ALLOC:
7858 assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
7860 instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
7862 # Node locks are already declared here rather than at LEVEL_NODE as we need
7863 # the instance object anyway to declare the node allocation lock.
7864 if instance.disk_template in constants.DTS_EXT_MIRROR:
7865 if lu.op.target_node is None:
7866 lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7867 lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
7869 lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7871 del lu.recalculate_locks[locking.LEVEL_NODE]
7873 lu._LockInstancesNodes() # pylint: disable=W0212
7875 elif level == locking.LEVEL_NODE:
7876 # Node locks are declared together with the node allocation lock
7877 assert (lu.needed_locks[locking.LEVEL_NODE] or
7878 lu.needed_locks[locking.LEVEL_NODE] is locking.ALL_SET)
7880 elif level == locking.LEVEL_NODE_RES:
7882 lu.needed_locks[locking.LEVEL_NODE_RES] = \
7883 _CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
7886 class LUInstanceFailover(LogicalUnit):
7887 """Failover an instance.
7890 HPATH = "instance-failover"
7891 HTYPE = constants.HTYPE_INSTANCE
7894 def CheckArguments(self):
7895 """Check the arguments.
7898 self.iallocator = getattr(self.op, "iallocator", None)
7899 self.target_node = getattr(self.op, "target_node", None)
7901 def ExpandNames(self):
7902 self._ExpandAndLockInstance()
7903 _ExpandNamesForMigration(self)
7906 TLMigrateInstance(self, self.op.instance_name, False, True, False,
7907 self.op.ignore_consistency, True,
7908 self.op.shutdown_timeout, self.op.ignore_ipolicy)
7910 self.tasklets = [self._migrater]
7912 def DeclareLocks(self, level):
7913 _DeclareLocksForMigration(self, level)
7915 def BuildHooksEnv(self):
7918 This runs on master, primary and secondary nodes of the instance.
7921 instance = self._migrater.instance
7922 source_node = instance.primary_node
7923 target_node = self.op.target_node
7925 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
7926 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7927 "OLD_PRIMARY": source_node,
7928 "NEW_PRIMARY": target_node,
7931 if instance.disk_template in constants.DTS_INT_MIRROR:
7932 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
7933 env["NEW_SECONDARY"] = source_node
7935 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
7937 env.update(_BuildInstanceHookEnvByObject(self, instance))
7941 def BuildHooksNodes(self):
7942 """Build hooks nodes.
7945 instance = self._migrater.instance
7946 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7947 return (nl, nl + [instance.primary_node])
7950 class LUInstanceMigrate(LogicalUnit):
7951 """Migrate an instance.
7953 This is migration without shutting down, compared to the failover,
7954 which is done with shutdown.
7957 HPATH = "instance-migrate"
7958 HTYPE = constants.HTYPE_INSTANCE
7961 def ExpandNames(self):
7962 self._ExpandAndLockInstance()
7963 _ExpandNamesForMigration(self)
7966 TLMigrateInstance(self, self.op.instance_name, self.op.cleanup,
7967 False, self.op.allow_failover, False,
7968 self.op.allow_runtime_changes,
7969 constants.DEFAULT_SHUTDOWN_TIMEOUT,
7970 self.op.ignore_ipolicy)
7972 self.tasklets = [self._migrater]
7974 def DeclareLocks(self, level):
7975 _DeclareLocksForMigration(self, level)
7977 def BuildHooksEnv(self):
7980 This runs on master, primary and secondary nodes of the instance.
7983 instance = self._migrater.instance
7984 source_node = instance.primary_node
7985 target_node = self.op.target_node
7986 env = _BuildInstanceHookEnvByObject(self, instance)
7988 "MIGRATE_LIVE": self._migrater.live,
7989 "MIGRATE_CLEANUP": self.op.cleanup,
7990 "OLD_PRIMARY": source_node,
7991 "NEW_PRIMARY": target_node,
7992 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7995 if instance.disk_template in constants.DTS_INT_MIRROR:
7996 env["OLD_SECONDARY"] = target_node
7997 env["NEW_SECONDARY"] = source_node
7999 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
8003 def BuildHooksNodes(self):
8004 """Build hooks nodes.
8007 instance = self._migrater.instance
8008 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
8009 return (nl, nl + [instance.primary_node])
8012 class LUInstanceMove(LogicalUnit):
8013 """Move an instance by data-copying.
8016 HPATH = "instance-move"
8017 HTYPE = constants.HTYPE_INSTANCE
8020 def ExpandNames(self):
8021 self._ExpandAndLockInstance()
8022 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8023 self.op.target_node = target_node
8024 self.needed_locks[locking.LEVEL_NODE] = [target_node]
8025 self.needed_locks[locking.LEVEL_NODE_RES] = []
8026 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8028 def DeclareLocks(self, level):
8029 if level == locking.LEVEL_NODE:
8030 self._LockInstancesNodes(primary_only=True)
8031 elif level == locking.LEVEL_NODE_RES:
8033 self.needed_locks[locking.LEVEL_NODE_RES] = \
8034 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
8036 def BuildHooksEnv(self):
8039 This runs on master, primary and secondary nodes of the instance.
8043 "TARGET_NODE": self.op.target_node,
8044 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
8046 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8049 def BuildHooksNodes(self):
8050 """Build hooks nodes.
8054 self.cfg.GetMasterNode(),
8055 self.instance.primary_node,
8056 self.op.target_node,
8060 def CheckPrereq(self):
8061 """Check prerequisites.
8063 This checks that the instance is in the cluster.
8066 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8067 assert self.instance is not None, \
8068 "Cannot retrieve locked instance %s" % self.op.instance_name
8070 node = self.cfg.GetNodeInfo(self.op.target_node)
8071 assert node is not None, \
8072 "Cannot retrieve locked node %s" % self.op.target_node
8074 self.target_node = target_node = node.name
8076 if target_node == instance.primary_node:
8077 raise errors.OpPrereqError("Instance %s is already on the node %s" %
8078 (instance.name, target_node),
8081 bep = self.cfg.GetClusterInfo().FillBE(instance)
8083 for idx, dsk in enumerate(instance.disks):
8084 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
8085 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
8086 " cannot copy" % idx, errors.ECODE_STATE)
8088 _CheckNodeOnline(self, target_node)
8089 _CheckNodeNotDrained(self, target_node)
8090 _CheckNodeVmCapable(self, target_node)
8091 cluster = self.cfg.GetClusterInfo()
8092 group_info = self.cfg.GetNodeGroup(node.group)
8093 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
8094 _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
8095 ignore=self.op.ignore_ipolicy)
8097 if instance.admin_state == constants.ADMINST_UP:
8098 # check memory requirements on the secondary node
8099 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
8100 instance.name, bep[constants.BE_MAXMEM],
8101 instance.hypervisor)
8103 self.LogInfo("Not checking memory on the secondary node as"
8104 " instance will not be started")
8106 # check bridge existance
8107 _CheckInstanceBridgesExist(self, instance, node=target_node)
8109 def Exec(self, feedback_fn):
8110 """Move an instance.
8112 The move is done by shutting it down on its present node, copying
8113 the data over (slow) and starting it on the new node.
8116 instance = self.instance
8118 source_node = instance.primary_node
8119 target_node = self.target_node
8121 self.LogInfo("Shutting down instance %s on source node %s",
8122 instance.name, source_node)
8124 assert (self.owned_locks(locking.LEVEL_NODE) ==
8125 self.owned_locks(locking.LEVEL_NODE_RES))
8127 result = self.rpc.call_instance_shutdown(source_node, instance,
8128 self.op.shutdown_timeout)
8129 msg = result.fail_msg
8131 if self.op.ignore_consistency:
8132 self.LogWarning("Could not shutdown instance %s on node %s."
8133 " Proceeding anyway. Please make sure node"
8134 " %s is down. Error details: %s",
8135 instance.name, source_node, source_node, msg)
8137 raise errors.OpExecError("Could not shutdown instance %s on"
8139 (instance.name, source_node, msg))
8141 # create the target disks
8143 _CreateDisks(self, instance, target_node=target_node)
8144 except errors.OpExecError:
8145 self.LogWarning("Device creation failed, reverting...")
8147 _RemoveDisks(self, instance, target_node=target_node)
8149 self.cfg.ReleaseDRBDMinors(instance.name)
8152 cluster_name = self.cfg.GetClusterInfo().cluster_name
8155 # activate, get path, copy the data over
8156 for idx, disk in enumerate(instance.disks):
8157 self.LogInfo("Copying data for disk %d", idx)
8158 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
8159 instance.name, True, idx)
8161 self.LogWarning("Can't assemble newly created disk %d: %s",
8162 idx, result.fail_msg)
8163 errs.append(result.fail_msg)
8165 dev_path = result.payload
8166 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
8167 target_node, dev_path,
8170 self.LogWarning("Can't copy data over for disk %d: %s",
8171 idx, result.fail_msg)
8172 errs.append(result.fail_msg)
8176 self.LogWarning("Some disks failed to copy, aborting")
8178 _RemoveDisks(self, instance, target_node=target_node)
8180 self.cfg.ReleaseDRBDMinors(instance.name)
8181 raise errors.OpExecError("Errors during disk copy: %s" %
8184 instance.primary_node = target_node
8185 self.cfg.Update(instance, feedback_fn)
8187 self.LogInfo("Removing the disks on the original node")
8188 _RemoveDisks(self, instance, target_node=source_node)
8190 # Only start the instance if it's marked as up
8191 if instance.admin_state == constants.ADMINST_UP:
8192 self.LogInfo("Starting instance %s on node %s",
8193 instance.name, target_node)
8195 disks_ok, _ = _AssembleInstanceDisks(self, instance,
8196 ignore_secondaries=True)
8198 _ShutdownInstanceDisks(self, instance)
8199 raise errors.OpExecError("Can't activate the instance's disks")
8201 result = self.rpc.call_instance_start(target_node,
8202 (instance, None, None), False)
8203 msg = result.fail_msg
8205 _ShutdownInstanceDisks(self, instance)
8206 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8207 (instance.name, target_node, msg))
8210 class LUNodeMigrate(LogicalUnit):
8211 """Migrate all instances from a node.
8214 HPATH = "node-migrate"
8215 HTYPE = constants.HTYPE_NODE
8218 def CheckArguments(self):
8221 def ExpandNames(self):
8222 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8224 self.share_locks = _ShareAll()
8225 self.needed_locks = {
8226 locking.LEVEL_NODE: [self.op.node_name],
8229 def BuildHooksEnv(self):
8232 This runs on the master, the primary and all the secondaries.
8236 "NODE_NAME": self.op.node_name,
8237 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
8240 def BuildHooksNodes(self):
8241 """Build hooks nodes.
8244 nl = [self.cfg.GetMasterNode()]
8247 def CheckPrereq(self):
8250 def Exec(self, feedback_fn):
8251 # Prepare jobs for migration instances
8252 allow_runtime_changes = self.op.allow_runtime_changes
8254 [opcodes.OpInstanceMigrate(instance_name=inst.name,
8257 iallocator=self.op.iallocator,
8258 target_node=self.op.target_node,
8259 allow_runtime_changes=allow_runtime_changes,
8260 ignore_ipolicy=self.op.ignore_ipolicy)]
8261 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)]
8263 # TODO: Run iallocator in this opcode and pass correct placement options to
8264 # OpInstanceMigrate. Since other jobs can modify the cluster between
8265 # running the iallocator and the actual migration, a good consistency model
8266 # will have to be found.
8268 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
8269 frozenset([self.op.node_name]))
8271 return ResultWithJobs(jobs)
8274 class TLMigrateInstance(Tasklet):
8275 """Tasklet class for instance migration.
8278 @ivar live: whether the migration will be done live or non-live;
8279 this variable is initalized only after CheckPrereq has run
8280 @type cleanup: boolean
8281 @ivar cleanup: Wheater we cleanup from a failed migration
8282 @type iallocator: string
8283 @ivar iallocator: The iallocator used to determine target_node
8284 @type target_node: string
8285 @ivar target_node: If given, the target_node to reallocate the instance to
8286 @type failover: boolean
8287 @ivar failover: Whether operation results in failover or migration
8288 @type fallback: boolean
8289 @ivar fallback: Whether fallback to failover is allowed if migration not
8291 @type ignore_consistency: boolean
8292 @ivar ignore_consistency: Wheter we should ignore consistency between source
8294 @type shutdown_timeout: int
8295 @ivar shutdown_timeout: In case of failover timeout of the shutdown
8296 @type ignore_ipolicy: bool
8297 @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
8302 _MIGRATION_POLL_INTERVAL = 1 # seconds
8303 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
8305 def __init__(self, lu, instance_name, cleanup, failover, fallback,
8306 ignore_consistency, allow_runtime_changes, shutdown_timeout,
8308 """Initializes this class.
8311 Tasklet.__init__(self, lu)
8314 self.instance_name = instance_name
8315 self.cleanup = cleanup
8316 self.live = False # will be overridden later
8317 self.failover = failover
8318 self.fallback = fallback
8319 self.ignore_consistency = ignore_consistency
8320 self.shutdown_timeout = shutdown_timeout
8321 self.ignore_ipolicy = ignore_ipolicy
8322 self.allow_runtime_changes = allow_runtime_changes
8324 def CheckPrereq(self):
8325 """Check prerequisites.
8327 This checks that the instance is in the cluster.
8330 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
8331 instance = self.cfg.GetInstanceInfo(instance_name)
8332 assert instance is not None
8333 self.instance = instance
8334 cluster = self.cfg.GetClusterInfo()
8336 if (not self.cleanup and
8337 not instance.admin_state == constants.ADMINST_UP and
8338 not self.failover and self.fallback):
8339 self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
8340 " switching to failover")
8341 self.failover = True
8343 if instance.disk_template not in constants.DTS_MIRRORED:
8348 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
8349 " %s" % (instance.disk_template, text),
8352 if instance.disk_template in constants.DTS_EXT_MIRROR:
8353 assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
8355 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
8357 if self.lu.op.iallocator:
8358 self._RunAllocator()
8360 # We set set self.target_node as it is required by
8362 self.target_node = self.lu.op.target_node
8364 # Check that the target node is correct in terms of instance policy
8365 nodeinfo = self.cfg.GetNodeInfo(self.target_node)
8366 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8367 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
8369 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8370 ignore=self.ignore_ipolicy)
8372 # self.target_node is already populated, either directly or by the
8374 target_node = self.target_node
8375 if self.target_node == instance.primary_node:
8376 raise errors.OpPrereqError("Cannot migrate instance %s"
8377 " to its primary (%s)" %
8378 (instance.name, instance.primary_node),
8381 if len(self.lu.tasklets) == 1:
8382 # It is safe to release locks only when we're the only tasklet
8384 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
8385 keep=[instance.primary_node, self.target_node])
8386 _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
8389 assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
8391 secondary_nodes = instance.secondary_nodes
8392 if not secondary_nodes:
8393 raise errors.ConfigurationError("No secondary node but using"
8394 " %s disk template" %
8395 instance.disk_template)
8396 target_node = secondary_nodes[0]
8397 if self.lu.op.iallocator or (self.lu.op.target_node and
8398 self.lu.op.target_node != target_node):
8400 text = "failed over"
8403 raise errors.OpPrereqError("Instances with disk template %s cannot"
8404 " be %s to arbitrary nodes"
8405 " (neither an iallocator nor a target"
8406 " node can be passed)" %
8407 (instance.disk_template, text),
8409 nodeinfo = self.cfg.GetNodeInfo(target_node)
8410 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8411 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
8413 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8414 ignore=self.ignore_ipolicy)
8416 i_be = cluster.FillBE(instance)
8418 # check memory requirements on the secondary node
8419 if (not self.cleanup and
8420 (not self.failover or instance.admin_state == constants.ADMINST_UP)):
8421 self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
8422 "migrating instance %s" %
8424 i_be[constants.BE_MINMEM],
8425 instance.hypervisor)
8427 self.lu.LogInfo("Not checking memory on the secondary node as"
8428 " instance will not be started")
8430 # check if failover must be forced instead of migration
8431 if (not self.cleanup and not self.failover and
8432 i_be[constants.BE_ALWAYS_FAILOVER]):
8433 self.lu.LogInfo("Instance configured to always failover; fallback"
8435 self.failover = True
8437 # check bridge existance
8438 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
8440 if not self.cleanup:
8441 _CheckNodeNotDrained(self.lu, target_node)
8442 if not self.failover:
8443 result = self.rpc.call_instance_migratable(instance.primary_node,
8445 if result.fail_msg and self.fallback:
8446 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
8448 self.failover = True
8450 result.Raise("Can't migrate, please use failover",
8451 prereq=True, ecode=errors.ECODE_STATE)
8453 assert not (self.failover and self.cleanup)
8455 if not self.failover:
8456 if self.lu.op.live is not None and self.lu.op.mode is not None:
8457 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
8458 " parameters are accepted",
8460 if self.lu.op.live is not None:
8462 self.lu.op.mode = constants.HT_MIGRATION_LIVE
8464 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
8465 # reset the 'live' parameter to None so that repeated
8466 # invocations of CheckPrereq do not raise an exception
8467 self.lu.op.live = None
8468 elif self.lu.op.mode is None:
8469 # read the default value from the hypervisor
8470 i_hv = cluster.FillHV(self.instance, skip_globals=False)
8471 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
8473 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
8475 # Failover is never live
8478 if not (self.failover or self.cleanup):
8479 remote_info = self.rpc.call_instance_info(instance.primary_node,
8481 instance.hypervisor)
8482 remote_info.Raise("Error checking instance on node %s" %
8483 instance.primary_node)
8484 instance_running = bool(remote_info.payload)
8485 if instance_running:
8486 self.current_mem = int(remote_info.payload["memory"])
8488 def _RunAllocator(self):
8489 """Run the allocator based on input opcode.
8492 assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
8494 # FIXME: add a self.ignore_ipolicy option
8495 req = iallocator.IAReqRelocate(name=self.instance_name,
8496 relocate_from=[self.instance.primary_node])
8497 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
8499 ial.Run(self.lu.op.iallocator)
8502 raise errors.OpPrereqError("Can't compute nodes using"
8503 " iallocator '%s': %s" %
8504 (self.lu.op.iallocator, ial.info),
8506 self.target_node = ial.result[0]
8507 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8508 self.instance_name, self.lu.op.iallocator,
8509 utils.CommaJoin(ial.result))
8511 def _WaitUntilSync(self):
8512 """Poll with custom rpc for disk sync.
8514 This uses our own step-based rpc call.
8517 self.feedback_fn("* wait until resync is done")
8521 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8523 (self.instance.disks,
8526 for node, nres in result.items():
8527 nres.Raise("Cannot resync disks on node %s" % node)
8528 node_done, node_percent = nres.payload
8529 all_done = all_done and node_done
8530 if node_percent is not None:
8531 min_percent = min(min_percent, node_percent)
8533 if min_percent < 100:
8534 self.feedback_fn(" - progress: %.1f%%" % min_percent)
8537 def _EnsureSecondary(self, node):
8538 """Demote a node to secondary.
8541 self.feedback_fn("* switching node %s to secondary mode" % node)
8543 for dev in self.instance.disks:
8544 self.cfg.SetDiskID(dev, node)
8546 result = self.rpc.call_blockdev_close(node, self.instance.name,
8547 self.instance.disks)
8548 result.Raise("Cannot change disk to secondary on node %s" % node)
8550 def _GoStandalone(self):
8551 """Disconnect from the network.
8554 self.feedback_fn("* changing into standalone mode")
8555 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
8556 self.instance.disks)
8557 for node, nres in result.items():
8558 nres.Raise("Cannot disconnect disks node %s" % node)
8560 def _GoReconnect(self, multimaster):
8561 """Reconnect to the network.
8567 msg = "single-master"
8568 self.feedback_fn("* changing disks into %s mode" % msg)
8569 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8570 (self.instance.disks, self.instance),
8571 self.instance.name, multimaster)
8572 for node, nres in result.items():
8573 nres.Raise("Cannot change disks config on node %s" % node)
8575 def _ExecCleanup(self):
8576 """Try to cleanup after a failed migration.
8578 The cleanup is done by:
8579 - check that the instance is running only on one node
8580 (and update the config if needed)
8581 - change disks on its secondary node to secondary
8582 - wait until disks are fully synchronized
8583 - disconnect from the network
8584 - change disks into single-master mode
8585 - wait again until disks are fully synchronized
8588 instance = self.instance
8589 target_node = self.target_node
8590 source_node = self.source_node
8592 # check running on only one node
8593 self.feedback_fn("* checking where the instance actually runs"
8594 " (if this hangs, the hypervisor might be in"
8596 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
8597 for node, result in ins_l.items():
8598 result.Raise("Can't contact node %s" % node)
8600 runningon_source = instance.name in ins_l[source_node].payload
8601 runningon_target = instance.name in ins_l[target_node].payload
8603 if runningon_source and runningon_target:
8604 raise errors.OpExecError("Instance seems to be running on two nodes,"
8605 " or the hypervisor is confused; you will have"
8606 " to ensure manually that it runs only on one"
8607 " and restart this operation")
8609 if not (runningon_source or runningon_target):
8610 raise errors.OpExecError("Instance does not seem to be running at all;"
8611 " in this case it's safer to repair by"
8612 " running 'gnt-instance stop' to ensure disk"
8613 " shutdown, and then restarting it")
8615 if runningon_target:
8616 # the migration has actually succeeded, we need to update the config
8617 self.feedback_fn("* instance running on secondary node (%s),"
8618 " updating config" % target_node)
8619 instance.primary_node = target_node
8620 self.cfg.Update(instance, self.feedback_fn)
8621 demoted_node = source_node
8623 self.feedback_fn("* instance confirmed to be running on its"
8624 " primary node (%s)" % source_node)
8625 demoted_node = target_node
8627 if instance.disk_template in constants.DTS_INT_MIRROR:
8628 self._EnsureSecondary(demoted_node)
8630 self._WaitUntilSync()
8631 except errors.OpExecError:
8632 # we ignore here errors, since if the device is standalone, it
8633 # won't be able to sync
8635 self._GoStandalone()
8636 self._GoReconnect(False)
8637 self._WaitUntilSync()
8639 self.feedback_fn("* done")
8641 def _RevertDiskStatus(self):
8642 """Try to revert the disk status after a failed migration.
8645 target_node = self.target_node
8646 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
8650 self._EnsureSecondary(target_node)
8651 self._GoStandalone()
8652 self._GoReconnect(False)
8653 self._WaitUntilSync()
8654 except errors.OpExecError, err:
8655 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
8656 " please try to recover the instance manually;"
8657 " error '%s'" % str(err))
8659 def _AbortMigration(self):
8660 """Call the hypervisor code to abort a started migration.
8663 instance = self.instance
8664 target_node = self.target_node
8665 source_node = self.source_node
8666 migration_info = self.migration_info
8668 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
8672 abort_msg = abort_result.fail_msg
8674 logging.error("Aborting migration failed on target node %s: %s",
8675 target_node, abort_msg)
8676 # Don't raise an exception here, as we stil have to try to revert the
8677 # disk status, even if this step failed.
8679 abort_result = self.rpc.call_instance_finalize_migration_src(
8680 source_node, instance, False, self.live)
8681 abort_msg = abort_result.fail_msg
8683 logging.error("Aborting migration failed on source node %s: %s",
8684 source_node, abort_msg)
8686 def _ExecMigration(self):
8687 """Migrate an instance.
8689 The migrate is done by:
8690 - change the disks into dual-master mode
8691 - wait until disks are fully synchronized again
8692 - migrate the instance
8693 - change disks on the new secondary node (the old primary) to secondary
8694 - wait until disks are fully synchronized
8695 - change disks into single-master mode
8698 instance = self.instance
8699 target_node = self.target_node
8700 source_node = self.source_node
8702 # Check for hypervisor version mismatch and warn the user.
8703 nodeinfo = self.rpc.call_node_info([source_node, target_node],
8704 None, [self.instance.hypervisor])
8705 for ninfo in nodeinfo.values():
8706 ninfo.Raise("Unable to retrieve node information from node '%s'" %
8708 (_, _, (src_info, )) = nodeinfo[source_node].payload
8709 (_, _, (dst_info, )) = nodeinfo[target_node].payload
8711 if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
8712 (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
8713 src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
8714 dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
8715 if src_version != dst_version:
8716 self.feedback_fn("* warning: hypervisor version mismatch between"
8717 " source (%s) and target (%s) node" %
8718 (src_version, dst_version))
8720 self.feedback_fn("* checking disk consistency between source and target")
8721 for (idx, dev) in enumerate(instance.disks):
8722 if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8723 raise errors.OpExecError("Disk %s is degraded or not fully"
8724 " synchronized on target node,"
8725 " aborting migration" % idx)
8727 if self.current_mem > self.tgt_free_mem:
8728 if not self.allow_runtime_changes:
8729 raise errors.OpExecError("Memory ballooning not allowed and not enough"
8730 " free memory to fit instance %s on target"
8731 " node %s (have %dMB, need %dMB)" %
8732 (instance.name, target_node,
8733 self.tgt_free_mem, self.current_mem))
8734 self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
8735 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
8738 rpcres.Raise("Cannot modify instance runtime memory")
8740 # First get the migration information from the remote node
8741 result = self.rpc.call_migration_info(source_node, instance)
8742 msg = result.fail_msg
8744 log_err = ("Failed fetching source migration information from %s: %s" %
8746 logging.error(log_err)
8747 raise errors.OpExecError(log_err)
8749 self.migration_info = migration_info = result.payload
8751 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8752 # Then switch the disks to master/master mode
8753 self._EnsureSecondary(target_node)
8754 self._GoStandalone()
8755 self._GoReconnect(True)
8756 self._WaitUntilSync()
8758 self.feedback_fn("* preparing %s to accept the instance" % target_node)
8759 result = self.rpc.call_accept_instance(target_node,
8762 self.nodes_ip[target_node])
8764 msg = result.fail_msg
8766 logging.error("Instance pre-migration failed, trying to revert"
8767 " disk status: %s", msg)
8768 self.feedback_fn("Pre-migration failed, aborting")
8769 self._AbortMigration()
8770 self._RevertDiskStatus()
8771 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
8772 (instance.name, msg))
8774 self.feedback_fn("* migrating instance to %s" % target_node)
8775 result = self.rpc.call_instance_migrate(source_node, instance,
8776 self.nodes_ip[target_node],
8778 msg = result.fail_msg
8780 logging.error("Instance migration failed, trying to revert"
8781 " disk status: %s", msg)
8782 self.feedback_fn("Migration failed, aborting")
8783 self._AbortMigration()
8784 self._RevertDiskStatus()
8785 raise errors.OpExecError("Could not migrate instance %s: %s" %
8786 (instance.name, msg))
8788 self.feedback_fn("* starting memory transfer")
8789 last_feedback = time.time()
8791 result = self.rpc.call_instance_get_migration_status(source_node,
8793 msg = result.fail_msg
8794 ms = result.payload # MigrationStatus instance
8795 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
8796 logging.error("Instance migration failed, trying to revert"
8797 " disk status: %s", msg)
8798 self.feedback_fn("Migration failed, aborting")
8799 self._AbortMigration()
8800 self._RevertDiskStatus()
8802 msg = "hypervisor returned failure"
8803 raise errors.OpExecError("Could not migrate instance %s: %s" %
8804 (instance.name, msg))
8806 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
8807 self.feedback_fn("* memory transfer complete")
8810 if (utils.TimeoutExpired(last_feedback,
8811 self._MIGRATION_FEEDBACK_INTERVAL) and
8812 ms.transferred_ram is not None):
8813 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
8814 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
8815 last_feedback = time.time()
8817 time.sleep(self._MIGRATION_POLL_INTERVAL)
8819 result = self.rpc.call_instance_finalize_migration_src(source_node,
8823 msg = result.fail_msg
8825 logging.error("Instance migration succeeded, but finalization failed"
8826 " on the source node: %s", msg)
8827 raise errors.OpExecError("Could not finalize instance migration: %s" %
8830 instance.primary_node = target_node
8832 # distribute new instance config to the other nodes
8833 self.cfg.Update(instance, self.feedback_fn)
8835 result = self.rpc.call_instance_finalize_migration_dst(target_node,
8839 msg = result.fail_msg
8841 logging.error("Instance migration succeeded, but finalization failed"
8842 " on the target node: %s", msg)
8843 raise errors.OpExecError("Could not finalize instance migration: %s" %
8846 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8847 self._EnsureSecondary(source_node)
8848 self._WaitUntilSync()
8849 self._GoStandalone()
8850 self._GoReconnect(False)
8851 self._WaitUntilSync()
8853 # If the instance's disk template is `rbd' or `ext' and there was a
8854 # successful migration, unmap the device from the source node.
8855 if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
8856 disks = _ExpandCheckDisks(instance, instance.disks)
8857 self.feedback_fn("* unmapping instance's disks from %s" % source_node)
8859 result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
8860 msg = result.fail_msg
8862 logging.error("Migration was successful, but couldn't unmap the"
8863 " block device %s on source node %s: %s",
8864 disk.iv_name, source_node, msg)
8865 logging.error("You need to unmap the device %s manually on %s",
8866 disk.iv_name, source_node)
8868 self.feedback_fn("* done")
8870 def _ExecFailover(self):
8871 """Failover an instance.
8873 The failover is done by shutting it down on its present node and
8874 starting it on the secondary.
8877 instance = self.instance
8878 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
8880 source_node = instance.primary_node
8881 target_node = self.target_node
8883 if instance.admin_state == constants.ADMINST_UP:
8884 self.feedback_fn("* checking disk consistency between source and target")
8885 for (idx, dev) in enumerate(instance.disks):
8886 # for drbd, these are drbd over lvm
8887 if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8889 if primary_node.offline:
8890 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8892 (primary_node.name, idx, target_node))
8893 elif not self.ignore_consistency:
8894 raise errors.OpExecError("Disk %s is degraded on target node,"
8895 " aborting failover" % idx)
8897 self.feedback_fn("* not checking disk consistency as instance is not"
8900 self.feedback_fn("* shutting down instance on source node")
8901 logging.info("Shutting down instance %s on node %s",
8902 instance.name, source_node)
8904 result = self.rpc.call_instance_shutdown(source_node, instance,
8905 self.shutdown_timeout)
8906 msg = result.fail_msg
8908 if self.ignore_consistency or primary_node.offline:
8909 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
8910 " proceeding anyway; please make sure node"
8911 " %s is down; error details: %s",
8912 instance.name, source_node, source_node, msg)
8914 raise errors.OpExecError("Could not shutdown instance %s on"
8916 (instance.name, source_node, msg))
8918 self.feedback_fn("* deactivating the instance's disks on source node")
8919 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
8920 raise errors.OpExecError("Can't shut down the instance's disks")
8922 instance.primary_node = target_node
8923 # distribute new instance config to the other nodes
8924 self.cfg.Update(instance, self.feedback_fn)
8926 # Only start the instance if it's marked as up
8927 if instance.admin_state == constants.ADMINST_UP:
8928 self.feedback_fn("* activating the instance's disks on target node %s" %
8930 logging.info("Starting instance %s on node %s",
8931 instance.name, target_node)
8933 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
8934 ignore_secondaries=True)
8936 _ShutdownInstanceDisks(self.lu, instance)
8937 raise errors.OpExecError("Can't activate the instance's disks")
8939 self.feedback_fn("* starting the instance on the target node %s" %
8941 result = self.rpc.call_instance_start(target_node, (instance, None, None),
8943 msg = result.fail_msg
8945 _ShutdownInstanceDisks(self.lu, instance)
8946 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8947 (instance.name, target_node, msg))
8949 def Exec(self, feedback_fn):
8950 """Perform the migration.
8953 self.feedback_fn = feedback_fn
8954 self.source_node = self.instance.primary_node
8956 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
8957 if self.instance.disk_template in constants.DTS_INT_MIRROR:
8958 self.target_node = self.instance.secondary_nodes[0]
8959 # Otherwise self.target_node has been populated either
8960 # directly, or through an iallocator.
8962 self.all_nodes = [self.source_node, self.target_node]
8963 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
8964 in self.cfg.GetMultiNodeInfo(self.all_nodes))
8967 feedback_fn("Failover instance %s" % self.instance.name)
8968 self._ExecFailover()
8970 feedback_fn("Migrating instance %s" % self.instance.name)
8973 return self._ExecCleanup()
8975 return self._ExecMigration()
8978 def _CreateBlockDev(lu, node, instance, device, force_create, info,
8980 """Wrapper around L{_CreateBlockDevInner}.
8982 This method annotates the root device first.
8985 (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
8986 return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
8990 def _CreateBlockDevInner(lu, node, instance, device, force_create,
8992 """Create a tree of block devices on a given node.
8994 If this device type has to be created on secondaries, create it and
8997 If not, just recurse to children keeping the same 'force' value.
8999 @attention: The device has to be annotated already.
9001 @param lu: the lu on whose behalf we execute
9002 @param node: the node on which to create the device
9003 @type instance: L{objects.Instance}
9004 @param instance: the instance which owns the device
9005 @type device: L{objects.Disk}
9006 @param device: the device to create
9007 @type force_create: boolean
9008 @param force_create: whether to force creation of this device; this
9009 will be change to True whenever we find a device which has
9010 CreateOnSecondary() attribute
9011 @param info: the extra 'metadata' we should attach to the device
9012 (this will be represented as a LVM tag)
9013 @type force_open: boolean
9014 @param force_open: this parameter will be passes to the
9015 L{backend.BlockdevCreate} function where it specifies
9016 whether we run on primary or not, and it affects both
9017 the child assembly and the device own Open() execution
9020 if device.CreateOnSecondary():
9024 for child in device.children:
9025 _CreateBlockDevInner(lu, node, instance, child, force_create,
9028 if not force_create:
9031 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
9034 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
9035 """Create a single block device on a given node.
9037 This will not recurse over children of the device, so they must be
9040 @param lu: the lu on whose behalf we execute
9041 @param node: the node on which to create the device
9042 @type instance: L{objects.Instance}
9043 @param instance: the instance which owns the device
9044 @type device: L{objects.Disk}
9045 @param device: the device to create
9046 @param info: the extra 'metadata' we should attach to the device
9047 (this will be represented as a LVM tag)
9048 @type force_open: boolean
9049 @param force_open: this parameter will be passes to the
9050 L{backend.BlockdevCreate} function where it specifies
9051 whether we run on primary or not, and it affects both
9052 the child assembly and the device own Open() execution
9055 lu.cfg.SetDiskID(device, node)
9056 result = lu.rpc.call_blockdev_create(node, device, device.size,
9057 instance.name, force_open, info)
9058 result.Raise("Can't create block device %s on"
9059 " node %s for instance %s" % (device, node, instance.name))
9060 if device.physical_id is None:
9061 device.physical_id = result.payload
9064 def _GenerateUniqueNames(lu, exts):
9065 """Generate a suitable LV name.
9067 This will generate a logical volume name for the given instance.
9072 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
9073 results.append("%s%s" % (new_id, val))
9077 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
9078 iv_name, p_minor, s_minor):
9079 """Generate a drbd8 device complete with its children.
9082 assert len(vgnames) == len(names) == 2
9083 port = lu.cfg.AllocatePort()
9084 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
9086 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
9087 logical_id=(vgnames[0], names[0]),
9089 dev_meta = objects.Disk(dev_type=constants.LD_LV,
9090 size=constants.DRBD_META_SIZE,
9091 logical_id=(vgnames[1], names[1]),
9093 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
9094 logical_id=(primary, secondary, port,
9097 children=[dev_data, dev_meta],
9098 iv_name=iv_name, params={})
9102 _DISK_TEMPLATE_NAME_PREFIX = {
9103 constants.DT_PLAIN: "",
9104 constants.DT_RBD: ".rbd",
9105 constants.DT_EXT: ".ext",
9109 _DISK_TEMPLATE_DEVICE_TYPE = {
9110 constants.DT_PLAIN: constants.LD_LV,
9111 constants.DT_FILE: constants.LD_FILE,
9112 constants.DT_SHARED_FILE: constants.LD_FILE,
9113 constants.DT_BLOCK: constants.LD_BLOCKDEV,
9114 constants.DT_RBD: constants.LD_RBD,
9115 constants.DT_EXT: constants.LD_EXT,
9119 def _GenerateDiskTemplate(
9120 lu, template_name, instance_name, primary_node, secondary_nodes,
9121 disk_info, file_storage_dir, file_driver, base_index,
9122 feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
9123 _req_shr_file_storage=opcodes.RequireSharedFileStorage):
9124 """Generate the entire disk layout for a given template type.
9127 vgname = lu.cfg.GetVGName()
9128 disk_count = len(disk_info)
9131 if template_name == constants.DT_DISKLESS:
9133 elif template_name == constants.DT_DRBD8:
9134 if len(secondary_nodes) != 1:
9135 raise errors.ProgrammerError("Wrong template configuration")
9136 remote_node = secondary_nodes[0]
9137 minors = lu.cfg.AllocateDRBDMinor(
9138 [primary_node, remote_node] * len(disk_info), instance_name)
9140 (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
9142 drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
9145 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
9146 for i in range(disk_count)]):
9147 names.append(lv_prefix + "_data")
9148 names.append(lv_prefix + "_meta")
9149 for idx, disk in enumerate(disk_info):
9150 disk_index = idx + base_index
9151 data_vg = disk.get(constants.IDISK_VG, vgname)
9152 meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
9153 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
9154 disk[constants.IDISK_SIZE],
9156 names[idx * 2:idx * 2 + 2],
9157 "disk/%d" % disk_index,
9158 minors[idx * 2], minors[idx * 2 + 1])
9159 disk_dev.mode = disk[constants.IDISK_MODE]
9160 disks.append(disk_dev)
9163 raise errors.ProgrammerError("Wrong template configuration")
9165 if template_name == constants.DT_FILE:
9167 elif template_name == constants.DT_SHARED_FILE:
9168 _req_shr_file_storage()
9170 name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
9171 if name_prefix is None:
9174 names = _GenerateUniqueNames(lu, ["%s.disk%s" %
9175 (name_prefix, base_index + i)
9176 for i in range(disk_count)])
9178 if template_name == constants.DT_PLAIN:
9180 def logical_id_fn(idx, _, disk):
9181 vg = disk.get(constants.IDISK_VG, vgname)
9182 return (vg, names[idx])
9184 elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
9186 lambda _, disk_index, disk: (file_driver,
9187 "%s/disk%d" % (file_storage_dir,
9189 elif template_name == constants.DT_BLOCK:
9191 lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
9192 disk[constants.IDISK_ADOPT])
9193 elif template_name == constants.DT_RBD:
9194 logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
9195 elif template_name == constants.DT_EXT:
9196 def logical_id_fn(idx, _, disk):
9197 provider = disk.get(constants.IDISK_PROVIDER, None)
9198 if provider is None:
9199 raise errors.ProgrammerError("Disk template is %s, but '%s' is"
9200 " not found", constants.DT_EXT,
9201 constants.IDISK_PROVIDER)
9202 return (provider, names[idx])
9204 raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
9206 dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
9208 for idx, disk in enumerate(disk_info):
9210 # Only for the Ext template add disk_info to params
9211 if template_name == constants.DT_EXT:
9212 params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
9214 if key not in constants.IDISK_PARAMS:
9215 params[key] = disk[key]
9216 disk_index = idx + base_index
9217 size = disk[constants.IDISK_SIZE]
9218 feedback_fn("* disk %s, size %s" %
9219 (disk_index, utils.FormatUnit(size, "h")))
9220 disks.append(objects.Disk(dev_type=dev_type, size=size,
9221 logical_id=logical_id_fn(idx, disk_index, disk),
9222 iv_name="disk/%d" % disk_index,
9223 mode=disk[constants.IDISK_MODE],
9229 def _GetInstanceInfoText(instance):
9230 """Compute that text that should be added to the disk's metadata.
9233 return "originstname+%s" % instance.name
9236 def _CalcEta(time_taken, written, total_size):
9237 """Calculates the ETA based on size written and total size.
9239 @param time_taken: The time taken so far
9240 @param written: amount written so far
9241 @param total_size: The total size of data to be written
9242 @return: The remaining time in seconds
9245 avg_time = time_taken / float(written)
9246 return (total_size - written) * avg_time
9249 def _WipeDisks(lu, instance, disks=None):
9250 """Wipes instance disks.
9252 @type lu: L{LogicalUnit}
9253 @param lu: the logical unit on whose behalf we execute
9254 @type instance: L{objects.Instance}
9255 @param instance: the instance whose disks we should create
9256 @return: the success of the wipe
9259 node = instance.primary_node
9262 disks = [(idx, disk, 0)
9263 for (idx, disk) in enumerate(instance.disks)]
9265 for (_, device, _) in disks:
9266 lu.cfg.SetDiskID(device, node)
9268 logging.info("Pausing synchronization of disks of instance '%s'",
9270 result = lu.rpc.call_blockdev_pause_resume_sync(node,
9271 (map(compat.snd, disks),
9274 result.Raise("Failed to pause disk synchronization on node '%s'" % node)
9276 for idx, success in enumerate(result.payload):
9278 logging.warn("Pausing synchronization of disk %s of instance '%s'"
9279 " failed", idx, instance.name)
9282 for (idx, device, offset) in disks:
9283 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
9284 # MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
9286 int(min(constants.MAX_WIPE_CHUNK,
9287 device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
9291 start_time = time.time()
9296 info_text = (" (from %s to %s)" %
9297 (utils.FormatUnit(offset, "h"),
9298 utils.FormatUnit(size, "h")))
9300 lu.LogInfo("* Wiping disk %s%s", idx, info_text)
9302 logging.info("Wiping disk %d for instance %s on node %s using"
9303 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
9305 while offset < size:
9306 wipe_size = min(wipe_chunk_size, size - offset)
9308 logging.debug("Wiping disk %d, offset %s, chunk %s",
9309 idx, offset, wipe_size)
9311 result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
9313 result.Raise("Could not wipe disk %d at offset %d for size %d" %
9314 (idx, offset, wipe_size))
9318 if now - last_output >= 60:
9319 eta = _CalcEta(now - start_time, offset, size)
9320 lu.LogInfo(" - done: %.1f%% ETA: %s",
9321 offset / float(size) * 100, utils.FormatSeconds(eta))
9324 logging.info("Resuming synchronization of disks for instance '%s'",
9327 result = lu.rpc.call_blockdev_pause_resume_sync(node,
9328 (map(compat.snd, disks),
9333 lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
9334 node, result.fail_msg)
9336 for idx, success in enumerate(result.payload):
9338 lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
9339 " failed", idx, instance.name)
9342 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
9343 """Create all disks for an instance.
9345 This abstracts away some work from AddInstance.
9347 @type lu: L{LogicalUnit}
9348 @param lu: the logical unit on whose behalf we execute
9349 @type instance: L{objects.Instance}
9350 @param instance: the instance whose disks we should create
9352 @param to_skip: list of indices to skip
9353 @type target_node: string
9354 @param target_node: if passed, overrides the target node for creation
9356 @return: the success of the creation
9359 info = _GetInstanceInfoText(instance)
9360 if target_node is None:
9361 pnode = instance.primary_node
9362 all_nodes = instance.all_nodes
9367 if instance.disk_template in constants.DTS_FILEBASED:
9368 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9369 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
9371 result.Raise("Failed to create directory '%s' on"
9372 " node %s" % (file_storage_dir, pnode))
9374 # Note: this needs to be kept in sync with adding of disks in
9375 # LUInstanceSetParams
9376 for idx, device in enumerate(instance.disks):
9377 if to_skip and idx in to_skip:
9379 logging.info("Creating disk %s for instance '%s'", idx, instance.name)
9381 for node in all_nodes:
9382 f_create = node == pnode
9383 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
9386 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
9387 """Remove all disks for an instance.
9389 This abstracts away some work from `AddInstance()` and
9390 `RemoveInstance()`. Note that in case some of the devices couldn't
9391 be removed, the removal will continue with the other ones (compare
9392 with `_CreateDisks()`).
9394 @type lu: L{LogicalUnit}
9395 @param lu: the logical unit on whose behalf we execute
9396 @type instance: L{objects.Instance}
9397 @param instance: the instance whose disks we should remove
9398 @type target_node: string
9399 @param target_node: used to override the node on which to remove the disks
9401 @return: the success of the removal
9404 logging.info("Removing block devices for instance %s", instance.name)
9407 ports_to_release = set()
9408 anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
9409 for (idx, device) in enumerate(anno_disks):
9411 edata = [(target_node, device)]
9413 edata = device.ComputeNodeTree(instance.primary_node)
9414 for node, disk in edata:
9415 lu.cfg.SetDiskID(disk, node)
9416 result = lu.rpc.call_blockdev_remove(node, disk)
9418 lu.LogWarning("Could not remove disk %s on node %s,"
9419 " continuing anyway: %s", idx, node, result.fail_msg)
9420 if not (result.offline and node != instance.primary_node):
9423 # if this is a DRBD disk, return its port to the pool
9424 if device.dev_type in constants.LDS_DRBD:
9425 ports_to_release.add(device.logical_id[2])
9427 if all_result or ignore_failures:
9428 for port in ports_to_release:
9429 lu.cfg.AddTcpUdpPort(port)
9431 if instance.disk_template in constants.DTS_FILEBASED:
9432 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
9436 tgt = instance.primary_node
9437 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
9439 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
9440 file_storage_dir, instance.primary_node, result.fail_msg)
9446 def _ComputeDiskSizePerVG(disk_template, disks):
9447 """Compute disk size requirements in the volume group
9450 def _compute(disks, payload):
9451 """Universal algorithm.
9456 vgs[disk[constants.IDISK_VG]] = \
9457 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
9461 # Required free disk space as a function of disk and swap space
9463 constants.DT_DISKLESS: {},
9464 constants.DT_PLAIN: _compute(disks, 0),
9465 # 128 MB are added for drbd metadata for each disk
9466 constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
9467 constants.DT_FILE: {},
9468 constants.DT_SHARED_FILE: {},
9471 if disk_template not in req_size_dict:
9472 raise errors.ProgrammerError("Disk template '%s' size requirement"
9473 " is unknown" % disk_template)
9475 return req_size_dict[disk_template]
9478 def _FilterVmNodes(lu, nodenames):
9479 """Filters out non-vm_capable nodes from a list.
9481 @type lu: L{LogicalUnit}
9482 @param lu: the logical unit for which we check
9483 @type nodenames: list
9484 @param nodenames: the list of nodes on which we should check
9486 @return: the list of vm-capable nodes
9489 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
9490 return [name for name in nodenames if name not in vm_nodes]
9493 def _CheckHVParams(lu, nodenames, hvname, hvparams):
9494 """Hypervisor parameter validation.
9496 This function abstract the hypervisor parameter validation to be
9497 used in both instance create and instance modify.
9499 @type lu: L{LogicalUnit}
9500 @param lu: the logical unit for which we check
9501 @type nodenames: list
9502 @param nodenames: the list of nodes on which we should check
9503 @type hvname: string
9504 @param hvname: the name of the hypervisor we should use
9505 @type hvparams: dict
9506 @param hvparams: the parameters which we need to check
9507 @raise errors.OpPrereqError: if the parameters are not valid
9510 nodenames = _FilterVmNodes(lu, nodenames)
9512 cluster = lu.cfg.GetClusterInfo()
9513 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
9515 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
9516 for node in nodenames:
9520 info.Raise("Hypervisor parameter validation failed on node %s" % node)
9523 def _CheckOSParams(lu, required, nodenames, osname, osparams):
9524 """OS parameters validation.
9526 @type lu: L{LogicalUnit}
9527 @param lu: the logical unit for which we check
9528 @type required: boolean
9529 @param required: whether the validation should fail if the OS is not
9531 @type nodenames: list
9532 @param nodenames: the list of nodes on which we should check
9533 @type osname: string
9534 @param osname: the name of the hypervisor we should use
9535 @type osparams: dict
9536 @param osparams: the parameters which we need to check
9537 @raise errors.OpPrereqError: if the parameters are not valid
9540 nodenames = _FilterVmNodes(lu, nodenames)
9541 result = lu.rpc.call_os_validate(nodenames, required, osname,
9542 [constants.OS_VALIDATE_PARAMETERS],
9544 for node, nres in result.items():
9545 # we don't check for offline cases since this should be run only
9546 # against the master node and/or an instance's nodes
9547 nres.Raise("OS Parameters validation failed on node %s" % node)
9548 if not nres.payload:
9549 lu.LogInfo("OS %s not found on node %s, validation skipped",
9553 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
9554 """Wrapper around IAReqInstanceAlloc.
9556 @param op: The instance opcode
9557 @param disks: The computed disks
9558 @param nics: The computed nics
9559 @param beparams: The full filled beparams
9560 @param node_whitelist: List of nodes which should appear as online to the
9561 allocator (unless the node is already marked offline)
9563 @returns: A filled L{iallocator.IAReqInstanceAlloc}
9566 spindle_use = beparams[constants.BE_SPINDLE_USE]
9567 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
9568 disk_template=op.disk_template,
9571 vcpus=beparams[constants.BE_VCPUS],
9572 memory=beparams[constants.BE_MAXMEM],
9573 spindle_use=spindle_use,
9575 nics=[n.ToDict() for n in nics],
9576 hypervisor=op.hypervisor,
9577 node_whitelist=node_whitelist)
9580 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
9581 """Computes the nics.
9583 @param op: The instance opcode
9584 @param cluster: Cluster configuration object
9585 @param default_ip: The default ip to assign
9586 @param cfg: An instance of the configuration object
9587 @param ec_id: Execution context ID
9589 @returns: The build up nics
9594 nic_mode_req = nic.get(constants.INIC_MODE, None)
9595 nic_mode = nic_mode_req
9596 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
9597 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
9599 net = nic.get(constants.INIC_NETWORK, None)
9600 link = nic.get(constants.NIC_LINK, None)
9601 ip = nic.get(constants.INIC_IP, None)
9603 if net is None or net.lower() == constants.VALUE_NONE:
9606 if nic_mode_req is not None or link is not None:
9607 raise errors.OpPrereqError("If network is given, no mode or link"
9608 " is allowed to be passed",
9611 # ip validity checks
9612 if ip is None or ip.lower() == constants.VALUE_NONE:
9614 elif ip.lower() == constants.VALUE_AUTO:
9615 if not op.name_check:
9616 raise errors.OpPrereqError("IP address set to auto but name checks"
9617 " have been skipped",
9621 # We defer pool operations until later, so that the iallocator has
9622 # filled in the instance's node(s) dimara
9623 if ip.lower() == constants.NIC_IP_POOL:
9625 raise errors.OpPrereqError("if ip=pool, parameter network"
9626 " must be passed too",
9629 elif not netutils.IPAddress.IsValid(ip):
9630 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
9635 # TODO: check the ip address for uniqueness
9636 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
9637 raise errors.OpPrereqError("Routed nic mode requires an ip address",
9640 # MAC address verification
9641 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
9642 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9643 mac = utils.NormalizeAndValidateMac(mac)
9646 # TODO: We need to factor this out
9647 cfg.ReserveMAC(mac, ec_id)
9648 except errors.ReservationError:
9649 raise errors.OpPrereqError("MAC address %s already in use"
9650 " in cluster" % mac,
9651 errors.ECODE_NOTUNIQUE)
9653 # Build nic parameters
9656 nicparams[constants.NIC_MODE] = nic_mode
9658 nicparams[constants.NIC_LINK] = link
9660 check_params = cluster.SimpleFillNIC(nicparams)
9661 objects.NIC.CheckParameterSyntax(check_params)
9662 nics.append(objects.NIC(mac=mac, ip=nic_ip,
9663 network=net, nicparams=nicparams))
9668 def _ComputeDisks(op, default_vg):
9669 """Computes the instance disks.
9671 @param op: The instance opcode
9672 @param default_vg: The default_vg to assume
9674 @return: The computed disks
9678 for disk in op.disks:
9679 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
9680 if mode not in constants.DISK_ACCESS_SET:
9681 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
9682 mode, errors.ECODE_INVAL)
9683 size = disk.get(constants.IDISK_SIZE, None)
9685 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
9688 except (TypeError, ValueError):
9689 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
9692 ext_provider = disk.get(constants.IDISK_PROVIDER, None)
9693 if ext_provider and op.disk_template != constants.DT_EXT:
9694 raise errors.OpPrereqError("The '%s' option is only valid for the %s"
9695 " disk template, not %s" %
9696 (constants.IDISK_PROVIDER, constants.DT_EXT,
9697 op.disk_template), errors.ECODE_INVAL)
9699 data_vg = disk.get(constants.IDISK_VG, default_vg)
9701 constants.IDISK_SIZE: size,
9702 constants.IDISK_MODE: mode,
9703 constants.IDISK_VG: data_vg,
9706 if constants.IDISK_METAVG in disk:
9707 new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
9708 if constants.IDISK_ADOPT in disk:
9709 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
9711 # For extstorage, demand the `provider' option and add any
9712 # additional parameters (ext-params) to the dict
9713 if op.disk_template == constants.DT_EXT:
9715 new_disk[constants.IDISK_PROVIDER] = ext_provider
9717 if key not in constants.IDISK_PARAMS:
9718 new_disk[key] = disk[key]
9720 raise errors.OpPrereqError("Missing provider for template '%s'" %
9721 constants.DT_EXT, errors.ECODE_INVAL)
9723 disks.append(new_disk)
9728 def _ComputeFullBeParams(op, cluster):
9729 """Computes the full beparams.
9731 @param op: The instance opcode
9732 @param cluster: The cluster config object
9734 @return: The fully filled beparams
9737 default_beparams = cluster.beparams[constants.PP_DEFAULT]
9738 for param, value in op.beparams.iteritems():
9739 if value == constants.VALUE_AUTO:
9740 op.beparams[param] = default_beparams[param]
9741 objects.UpgradeBeParams(op.beparams)
9742 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
9743 return cluster.SimpleFillBE(op.beparams)
9746 def _CheckOpportunisticLocking(op):
9747 """Generate error if opportunistic locking is not possible.
9750 if op.opportunistic_locking and not op.iallocator:
9751 raise errors.OpPrereqError("Opportunistic locking is only available in"
9752 " combination with an instance allocator",
9756 class LUInstanceCreate(LogicalUnit):
9757 """Create an instance.
9760 HPATH = "instance-add"
9761 HTYPE = constants.HTYPE_INSTANCE
9764 def CheckArguments(self):
9768 # do not require name_check to ease forward/backward compatibility
9770 if self.op.no_install and self.op.start:
9771 self.LogInfo("No-installation mode selected, disabling startup")
9772 self.op.start = False
9773 # validate/normalize the instance name
9774 self.op.instance_name = \
9775 netutils.Hostname.GetNormalizedName(self.op.instance_name)
9777 if self.op.ip_check and not self.op.name_check:
9778 # TODO: make the ip check more flexible and not depend on the name check
9779 raise errors.OpPrereqError("Cannot do IP address check without a name"
9780 " check", errors.ECODE_INVAL)
9782 # check nics' parameter names
9783 for nic in self.op.nics:
9784 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
9786 # check disks. parameter names and consistent adopt/no-adopt strategy
9787 has_adopt = has_no_adopt = False
9788 for disk in self.op.disks:
9789 if self.op.disk_template != constants.DT_EXT:
9790 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
9791 if constants.IDISK_ADOPT in disk:
9795 if has_adopt and has_no_adopt:
9796 raise errors.OpPrereqError("Either all disks are adopted or none is",
9799 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
9800 raise errors.OpPrereqError("Disk adoption is not supported for the"
9801 " '%s' disk template" %
9802 self.op.disk_template,
9804 if self.op.iallocator is not None:
9805 raise errors.OpPrereqError("Disk adoption not allowed with an"
9806 " iallocator script", errors.ECODE_INVAL)
9807 if self.op.mode == constants.INSTANCE_IMPORT:
9808 raise errors.OpPrereqError("Disk adoption not allowed for"
9809 " instance import", errors.ECODE_INVAL)
9811 if self.op.disk_template in constants.DTS_MUST_ADOPT:
9812 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
9813 " but no 'adopt' parameter given" %
9814 self.op.disk_template,
9817 self.adopt_disks = has_adopt
9819 # instance name verification
9820 if self.op.name_check:
9821 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
9822 self.op.instance_name = self.hostname1.name
9823 # used in CheckPrereq for ip ping check
9824 self.check_ip = self.hostname1.ip
9826 self.check_ip = None
9828 # file storage checks
9829 if (self.op.file_driver and
9830 not self.op.file_driver in constants.FILE_DRIVER):
9831 raise errors.OpPrereqError("Invalid file driver name '%s'" %
9832 self.op.file_driver, errors.ECODE_INVAL)
9834 if self.op.disk_template == constants.DT_FILE:
9835 opcodes.RequireFileStorage()
9836 elif self.op.disk_template == constants.DT_SHARED_FILE:
9837 opcodes.RequireSharedFileStorage()
9839 ### Node/iallocator related checks
9840 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
9842 if self.op.pnode is not None:
9843 if self.op.disk_template in constants.DTS_INT_MIRROR:
9844 if self.op.snode is None:
9845 raise errors.OpPrereqError("The networked disk templates need"
9846 " a mirror node", errors.ECODE_INVAL)
9848 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
9850 self.op.snode = None
9852 _CheckOpportunisticLocking(self.op)
9854 self._cds = _GetClusterDomainSecret()
9856 if self.op.mode == constants.INSTANCE_IMPORT:
9857 # On import force_variant must be True, because if we forced it at
9858 # initial install, our only chance when importing it back is that it
9860 self.op.force_variant = True
9862 if self.op.no_install:
9863 self.LogInfo("No-installation mode has no effect during import")
9865 elif self.op.mode == constants.INSTANCE_CREATE:
9866 if self.op.os_type is None:
9867 raise errors.OpPrereqError("No guest OS specified",
9869 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
9870 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
9871 " installation" % self.op.os_type,
9873 if self.op.disk_template is None:
9874 raise errors.OpPrereqError("No disk template specified",
9877 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9878 # Check handshake to ensure both clusters have the same domain secret
9879 src_handshake = self.op.source_handshake
9880 if not src_handshake:
9881 raise errors.OpPrereqError("Missing source handshake",
9884 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
9887 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
9890 # Load and check source CA
9891 self.source_x509_ca_pem = self.op.source_x509_ca
9892 if not self.source_x509_ca_pem:
9893 raise errors.OpPrereqError("Missing source X509 CA",
9897 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
9899 except OpenSSL.crypto.Error, err:
9900 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
9901 (err, ), errors.ECODE_INVAL)
9903 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9904 if errcode is not None:
9905 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
9908 self.source_x509_ca = cert
9910 src_instance_name = self.op.source_instance_name
9911 if not src_instance_name:
9912 raise errors.OpPrereqError("Missing source instance name",
9915 self.source_instance_name = \
9916 netutils.GetHostname(name=src_instance_name).name
9919 raise errors.OpPrereqError("Invalid instance creation mode %r" %
9920 self.op.mode, errors.ECODE_INVAL)
9922 def ExpandNames(self):
9923 """ExpandNames for CreateInstance.
9925 Figure out the right locks for instance creation.
9928 self.needed_locks = {}
9930 instance_name = self.op.instance_name
9931 # this is just a preventive check, but someone might still add this
9932 # instance in the meantime, and creation will fail at lock-add time
9933 if instance_name in self.cfg.GetInstanceList():
9934 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
9935 instance_name, errors.ECODE_EXISTS)
9937 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
9939 if self.op.iallocator:
9940 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
9941 # specifying a group on instance creation and then selecting nodes from
9943 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9944 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
9946 if self.op.opportunistic_locking:
9947 self.opportunistic_locks[locking.LEVEL_NODE] = True
9948 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
9950 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
9951 nodelist = [self.op.pnode]
9952 if self.op.snode is not None:
9953 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
9954 nodelist.append(self.op.snode)
9955 self.needed_locks[locking.LEVEL_NODE] = nodelist
9957 # in case of import lock the source node too
9958 if self.op.mode == constants.INSTANCE_IMPORT:
9959 src_node = self.op.src_node
9960 src_path = self.op.src_path
9962 if src_path is None:
9963 self.op.src_path = src_path = self.op.instance_name
9965 if src_node is None:
9966 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9967 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
9968 self.op.src_node = None
9969 if os.path.isabs(src_path):
9970 raise errors.OpPrereqError("Importing an instance from a path"
9971 " requires a source node option",
9974 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
9975 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
9976 self.needed_locks[locking.LEVEL_NODE].append(src_node)
9977 if not os.path.isabs(src_path):
9978 self.op.src_path = src_path = \
9979 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
9981 self.needed_locks[locking.LEVEL_NODE_RES] = \
9982 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
9984 def _RunAllocator(self):
9985 """Run the allocator based on input opcode.
9988 if self.op.opportunistic_locking:
9989 # Only consider nodes for which a lock is held
9990 node_whitelist = self.owned_locks(locking.LEVEL_NODE)
9992 node_whitelist = None
9994 #TODO Export network to iallocator so that it chooses a pnode
9995 # in a nodegroup that has the desired network connected to
9996 req = _CreateInstanceAllocRequest(self.op, self.disks,
9997 self.nics, self.be_full,
9999 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
10001 ial.Run(self.op.iallocator)
10003 if not ial.success:
10004 # When opportunistic locks are used only a temporary failure is generated
10005 if self.op.opportunistic_locking:
10006 ecode = errors.ECODE_TEMP_NORES
10008 ecode = errors.ECODE_NORES
10010 raise errors.OpPrereqError("Can't compute nodes using"
10011 " iallocator '%s': %s" %
10012 (self.op.iallocator, ial.info),
10015 self.op.pnode = ial.result[0]
10016 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
10017 self.op.instance_name, self.op.iallocator,
10018 utils.CommaJoin(ial.result))
10020 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
10022 if req.RequiredNodes() == 2:
10023 self.op.snode = ial.result[1]
10025 def BuildHooksEnv(self):
10026 """Build hooks env.
10028 This runs on master, primary and secondary nodes of the instance.
10032 "ADD_MODE": self.op.mode,
10034 if self.op.mode == constants.INSTANCE_IMPORT:
10035 env["SRC_NODE"] = self.op.src_node
10036 env["SRC_PATH"] = self.op.src_path
10037 env["SRC_IMAGES"] = self.src_images
10039 env.update(_BuildInstanceHookEnv(
10040 name=self.op.instance_name,
10041 primary_node=self.op.pnode,
10042 secondary_nodes=self.secondaries,
10043 status=self.op.start,
10044 os_type=self.op.os_type,
10045 minmem=self.be_full[constants.BE_MINMEM],
10046 maxmem=self.be_full[constants.BE_MAXMEM],
10047 vcpus=self.be_full[constants.BE_VCPUS],
10048 nics=_NICListToTuple(self, self.nics),
10049 disk_template=self.op.disk_template,
10050 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
10051 for d in self.disks],
10054 hypervisor_name=self.op.hypervisor,
10060 def BuildHooksNodes(self):
10061 """Build hooks nodes.
10064 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
10067 def _ReadExportInfo(self):
10068 """Reads the export information from disk.
10070 It will override the opcode source node and path with the actual
10071 information, if these two were not specified before.
10073 @return: the export information
10076 assert self.op.mode == constants.INSTANCE_IMPORT
10078 src_node = self.op.src_node
10079 src_path = self.op.src_path
10081 if src_node is None:
10082 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
10083 exp_list = self.rpc.call_export_list(locked_nodes)
10085 for node in exp_list:
10086 if exp_list[node].fail_msg:
10088 if src_path in exp_list[node].payload:
10090 self.op.src_node = src_node = node
10091 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
10095 raise errors.OpPrereqError("No export found for relative path %s" %
10096 src_path, errors.ECODE_INVAL)
10098 _CheckNodeOnline(self, src_node)
10099 result = self.rpc.call_export_info(src_node, src_path)
10100 result.Raise("No export or invalid export found in dir %s" % src_path)
10102 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
10103 if not export_info.has_section(constants.INISECT_EXP):
10104 raise errors.ProgrammerError("Corrupted export config",
10105 errors.ECODE_ENVIRON)
10107 ei_version = export_info.get(constants.INISECT_EXP, "version")
10108 if (int(ei_version) != constants.EXPORT_VERSION):
10109 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
10110 (ei_version, constants.EXPORT_VERSION),
10111 errors.ECODE_ENVIRON)
10114 def _ReadExportParams(self, einfo):
10115 """Use export parameters as defaults.
10117 In case the opcode doesn't specify (as in override) some instance
10118 parameters, then try to use them from the export information, if
10119 that declares them.
10122 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
10124 if self.op.disk_template is None:
10125 if einfo.has_option(constants.INISECT_INS, "disk_template"):
10126 self.op.disk_template = einfo.get(constants.INISECT_INS,
10128 if self.op.disk_template not in constants.DISK_TEMPLATES:
10129 raise errors.OpPrereqError("Disk template specified in configuration"
10130 " file is not one of the allowed values:"
10132 " ".join(constants.DISK_TEMPLATES),
10133 errors.ECODE_INVAL)
10135 raise errors.OpPrereqError("No disk template specified and the export"
10136 " is missing the disk_template information",
10137 errors.ECODE_INVAL)
10139 if not self.op.disks:
10141 # TODO: import the disk iv_name too
10142 for idx in range(constants.MAX_DISKS):
10143 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
10144 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
10145 disks.append({constants.IDISK_SIZE: disk_sz})
10146 self.op.disks = disks
10147 if not disks and self.op.disk_template != constants.DT_DISKLESS:
10148 raise errors.OpPrereqError("No disk info specified and the export"
10149 " is missing the disk information",
10150 errors.ECODE_INVAL)
10152 if not self.op.nics:
10154 for idx in range(constants.MAX_NICS):
10155 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
10157 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
10158 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
10163 self.op.nics = nics
10165 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
10166 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
10168 if (self.op.hypervisor is None and
10169 einfo.has_option(constants.INISECT_INS, "hypervisor")):
10170 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
10172 if einfo.has_section(constants.INISECT_HYP):
10173 # use the export parameters but do not override the ones
10174 # specified by the user
10175 for name, value in einfo.items(constants.INISECT_HYP):
10176 if name not in self.op.hvparams:
10177 self.op.hvparams[name] = value
10179 if einfo.has_section(constants.INISECT_BEP):
10180 # use the parameters, without overriding
10181 for name, value in einfo.items(constants.INISECT_BEP):
10182 if name not in self.op.beparams:
10183 self.op.beparams[name] = value
10184 # Compatibility for the old "memory" be param
10185 if name == constants.BE_MEMORY:
10186 if constants.BE_MAXMEM not in self.op.beparams:
10187 self.op.beparams[constants.BE_MAXMEM] = value
10188 if constants.BE_MINMEM not in self.op.beparams:
10189 self.op.beparams[constants.BE_MINMEM] = value
10191 # try to read the parameters old style, from the main section
10192 for name in constants.BES_PARAMETERS:
10193 if (name not in self.op.beparams and
10194 einfo.has_option(constants.INISECT_INS, name)):
10195 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
10197 if einfo.has_section(constants.INISECT_OSP):
10198 # use the parameters, without overriding
10199 for name, value in einfo.items(constants.INISECT_OSP):
10200 if name not in self.op.osparams:
10201 self.op.osparams[name] = value
10203 def _RevertToDefaults(self, cluster):
10204 """Revert the instance parameters to the default values.
10208 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
10209 for name in self.op.hvparams.keys():
10210 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
10211 del self.op.hvparams[name]
10213 be_defs = cluster.SimpleFillBE({})
10214 for name in self.op.beparams.keys():
10215 if name in be_defs and be_defs[name] == self.op.beparams[name]:
10216 del self.op.beparams[name]
10218 nic_defs = cluster.SimpleFillNIC({})
10219 for nic in self.op.nics:
10220 for name in constants.NICS_PARAMETERS:
10221 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
10224 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
10225 for name in self.op.osparams.keys():
10226 if name in os_defs and os_defs[name] == self.op.osparams[name]:
10227 del self.op.osparams[name]
10229 def _CalculateFileStorageDir(self):
10230 """Calculate final instance file storage dir.
10233 # file storage dir calculation/check
10234 self.instance_file_storage_dir = None
10235 if self.op.disk_template in constants.DTS_FILEBASED:
10236 # build the full file storage dir path
10239 if self.op.disk_template == constants.DT_SHARED_FILE:
10240 get_fsd_fn = self.cfg.GetSharedFileStorageDir
10242 get_fsd_fn = self.cfg.GetFileStorageDir
10244 cfg_storagedir = get_fsd_fn()
10245 if not cfg_storagedir:
10246 raise errors.OpPrereqError("Cluster file storage dir not defined",
10247 errors.ECODE_STATE)
10248 joinargs.append(cfg_storagedir)
10250 if self.op.file_storage_dir is not None:
10251 joinargs.append(self.op.file_storage_dir)
10253 joinargs.append(self.op.instance_name)
10255 # pylint: disable=W0142
10256 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
10258 def CheckPrereq(self): # pylint: disable=R0914
10259 """Check prerequisites.
10262 self._CalculateFileStorageDir()
10264 if self.op.mode == constants.INSTANCE_IMPORT:
10265 export_info = self._ReadExportInfo()
10266 self._ReadExportParams(export_info)
10267 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
10269 self._old_instance_name = None
10271 if (not self.cfg.GetVGName() and
10272 self.op.disk_template not in constants.DTS_NOT_LVM):
10273 raise errors.OpPrereqError("Cluster does not support lvm-based"
10274 " instances", errors.ECODE_STATE)
10276 if (self.op.hypervisor is None or
10277 self.op.hypervisor == constants.VALUE_AUTO):
10278 self.op.hypervisor = self.cfg.GetHypervisorType()
10280 cluster = self.cfg.GetClusterInfo()
10281 enabled_hvs = cluster.enabled_hypervisors
10282 if self.op.hypervisor not in enabled_hvs:
10283 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
10285 (self.op.hypervisor, ",".join(enabled_hvs)),
10286 errors.ECODE_STATE)
10288 # Check tag validity
10289 for tag in self.op.tags:
10290 objects.TaggableObject.ValidateTag(tag)
10292 # check hypervisor parameter syntax (locally)
10293 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
10294 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
10296 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
10297 hv_type.CheckParameterSyntax(filled_hvp)
10298 self.hv_full = filled_hvp
10299 # check that we don't specify global parameters on an instance
10300 _CheckGlobalHvParams(self.op.hvparams)
10302 # fill and remember the beparams dict
10303 self.be_full = _ComputeFullBeParams(self.op, cluster)
10305 # build os parameters
10306 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
10308 # now that hvp/bep are in final format, let's reset to defaults,
10310 if self.op.identify_defaults:
10311 self._RevertToDefaults(cluster)
10314 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
10315 self.proc.GetECId())
10317 # disk checks/pre-build
10318 default_vg = self.cfg.GetVGName()
10319 self.disks = _ComputeDisks(self.op, default_vg)
10321 if self.op.mode == constants.INSTANCE_IMPORT:
10323 for idx in range(len(self.disks)):
10324 option = "disk%d_dump" % idx
10325 if export_info.has_option(constants.INISECT_INS, option):
10326 # FIXME: are the old os-es, disk sizes, etc. useful?
10327 export_name = export_info.get(constants.INISECT_INS, option)
10328 image = utils.PathJoin(self.op.src_path, export_name)
10329 disk_images.append(image)
10331 disk_images.append(False)
10333 self.src_images = disk_images
10335 if self.op.instance_name == self._old_instance_name:
10336 for idx, nic in enumerate(self.nics):
10337 if nic.mac == constants.VALUE_AUTO:
10338 nic_mac_ini = "nic%d_mac" % idx
10339 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
10341 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
10343 # ip ping checks (we use the same ip that was resolved in ExpandNames)
10344 if self.op.ip_check:
10345 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
10346 raise errors.OpPrereqError("IP %s of instance %s already in use" %
10347 (self.check_ip, self.op.instance_name),
10348 errors.ECODE_NOTUNIQUE)
10350 #### mac address generation
10351 # By generating here the mac address both the allocator and the hooks get
10352 # the real final mac address rather than the 'auto' or 'generate' value.
10353 # There is a race condition between the generation and the instance object
10354 # creation, which means that we know the mac is valid now, but we're not
10355 # sure it will be when we actually add the instance. If things go bad
10356 # adding the instance will abort because of a duplicate mac, and the
10357 # creation job will fail.
10358 for nic in self.nics:
10359 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10360 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
10364 if self.op.iallocator is not None:
10365 self._RunAllocator()
10367 # Release all unneeded node locks
10368 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
10369 _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
10370 _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
10371 _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
10373 assert (self.owned_locks(locking.LEVEL_NODE) ==
10374 self.owned_locks(locking.LEVEL_NODE_RES)), \
10375 "Node locks differ from node resource locks"
10377 #### node related checks
10379 # check primary node
10380 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
10381 assert self.pnode is not None, \
10382 "Cannot retrieve locked node %s" % self.op.pnode
10384 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
10385 pnode.name, errors.ECODE_STATE)
10387 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
10388 pnode.name, errors.ECODE_STATE)
10389 if not pnode.vm_capable:
10390 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
10391 " '%s'" % pnode.name, errors.ECODE_STATE)
10393 self.secondaries = []
10395 # Fill in any IPs from IP pools. This must happen here, because we need to
10396 # know the nic's primary node, as specified by the iallocator
10397 for idx, nic in enumerate(self.nics):
10399 if net is not None:
10400 netparams = self.cfg.GetGroupNetParams(net, self.pnode.name)
10401 if netparams is None:
10402 raise errors.OpPrereqError("No netparams found for network"
10403 " %s. Propably not connected to"
10404 " node's %s nodegroup" %
10405 (net, self.pnode.name),
10406 errors.ECODE_INVAL)
10407 self.LogInfo("NIC/%d inherits netparams %s" %
10408 (idx, netparams.values()))
10409 nic.nicparams = dict(netparams)
10410 if nic.ip is not None:
10411 if nic.ip.lower() == constants.NIC_IP_POOL:
10413 nic.ip = self.cfg.GenerateIp(net, self.proc.GetECId())
10414 except errors.ReservationError:
10415 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
10416 " from the address pool" % idx,
10417 errors.ECODE_STATE)
10418 self.LogInfo("Chose IP %s from network %s", nic.ip, net)
10421 self.cfg.ReserveIp(net, nic.ip, self.proc.GetECId())
10422 except errors.ReservationError:
10423 raise errors.OpPrereqError("IP address %s already in use"
10424 " or does not belong to network %s" %
10426 errors.ECODE_NOTUNIQUE)
10428 # net is None, ip None or given
10429 elif self.op.conflicts_check:
10430 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
10432 # mirror node verification
10433 if self.op.disk_template in constants.DTS_INT_MIRROR:
10434 if self.op.snode == pnode.name:
10435 raise errors.OpPrereqError("The secondary node cannot be the"
10436 " primary node", errors.ECODE_INVAL)
10437 _CheckNodeOnline(self, self.op.snode)
10438 _CheckNodeNotDrained(self, self.op.snode)
10439 _CheckNodeVmCapable(self, self.op.snode)
10440 self.secondaries.append(self.op.snode)
10442 snode = self.cfg.GetNodeInfo(self.op.snode)
10443 if pnode.group != snode.group:
10444 self.LogWarning("The primary and secondary nodes are in two"
10445 " different node groups; the disk parameters"
10446 " from the first disk's node group will be"
10449 nodenames = [pnode.name] + self.secondaries
10451 # Verify instance specs
10452 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
10454 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
10455 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
10456 constants.ISPEC_DISK_COUNT: len(self.disks),
10457 constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
10458 constants.ISPEC_NIC_COUNT: len(self.nics),
10459 constants.ISPEC_SPINDLE_USE: spindle_use,
10462 group_info = self.cfg.GetNodeGroup(pnode.group)
10463 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
10464 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
10465 if not self.op.ignore_ipolicy and res:
10466 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
10467 (pnode.group, group_info.name, utils.CommaJoin(res)))
10468 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10470 if not self.adopt_disks:
10471 if self.op.disk_template == constants.DT_RBD:
10472 # _CheckRADOSFreeSpace() is just a placeholder.
10473 # Any function that checks prerequisites can be placed here.
10474 # Check if there is enough space on the RADOS cluster.
10475 _CheckRADOSFreeSpace()
10476 elif self.op.disk_template == constants.DT_EXT:
10477 # FIXME: Function that checks prereqs if needed
10480 # Check lv size requirements, if not adopting
10481 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
10482 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
10484 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
10485 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
10486 disk[constants.IDISK_ADOPT])
10487 for disk in self.disks])
10488 if len(all_lvs) != len(self.disks):
10489 raise errors.OpPrereqError("Duplicate volume names given for adoption",
10490 errors.ECODE_INVAL)
10491 for lv_name in all_lvs:
10493 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
10494 # to ReserveLV uses the same syntax
10495 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
10496 except errors.ReservationError:
10497 raise errors.OpPrereqError("LV named %s used by another instance" %
10498 lv_name, errors.ECODE_NOTUNIQUE)
10500 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
10501 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
10503 node_lvs = self.rpc.call_lv_list([pnode.name],
10504 vg_names.payload.keys())[pnode.name]
10505 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
10506 node_lvs = node_lvs.payload
10508 delta = all_lvs.difference(node_lvs.keys())
10510 raise errors.OpPrereqError("Missing logical volume(s): %s" %
10511 utils.CommaJoin(delta),
10512 errors.ECODE_INVAL)
10513 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
10515 raise errors.OpPrereqError("Online logical volumes found, cannot"
10516 " adopt: %s" % utils.CommaJoin(online_lvs),
10517 errors.ECODE_STATE)
10518 # update the size of disk based on what is found
10519 for dsk in self.disks:
10520 dsk[constants.IDISK_SIZE] = \
10521 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
10522 dsk[constants.IDISK_ADOPT])][0]))
10524 elif self.op.disk_template == constants.DT_BLOCK:
10525 # Normalize and de-duplicate device paths
10526 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
10527 for disk in self.disks])
10528 if len(all_disks) != len(self.disks):
10529 raise errors.OpPrereqError("Duplicate disk names given for adoption",
10530 errors.ECODE_INVAL)
10531 baddisks = [d for d in all_disks
10532 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
10534 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
10535 " cannot be adopted" %
10536 (utils.CommaJoin(baddisks),
10537 constants.ADOPTABLE_BLOCKDEV_ROOT),
10538 errors.ECODE_INVAL)
10540 node_disks = self.rpc.call_bdev_sizes([pnode.name],
10541 list(all_disks))[pnode.name]
10542 node_disks.Raise("Cannot get block device information from node %s" %
10544 node_disks = node_disks.payload
10545 delta = all_disks.difference(node_disks.keys())
10547 raise errors.OpPrereqError("Missing block device(s): %s" %
10548 utils.CommaJoin(delta),
10549 errors.ECODE_INVAL)
10550 for dsk in self.disks:
10551 dsk[constants.IDISK_SIZE] = \
10552 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
10554 # Verify instance specs
10555 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
10557 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
10558 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
10559 constants.ISPEC_DISK_COUNT: len(self.disks),
10560 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
10561 for disk in self.disks],
10562 constants.ISPEC_NIC_COUNT: len(self.nics),
10563 constants.ISPEC_SPINDLE_USE: spindle_use,
10566 group_info = self.cfg.GetNodeGroup(pnode.group)
10567 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
10568 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
10569 if not self.op.ignore_ipolicy and res:
10570 raise errors.OpPrereqError(("Instance allocation to group %s violates"
10571 " policy: %s") % (pnode.group,
10572 utils.CommaJoin(res)),
10573 errors.ECODE_INVAL)
10575 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
10577 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
10578 # check OS parameters (remotely)
10579 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
10581 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
10583 #TODO: _CheckExtParams (remotely)
10584 # Check parameters for extstorage
10586 # memory check on primary node
10587 #TODO(dynmem): use MINMEM for checking
10589 _CheckNodeFreeMemory(self, self.pnode.name,
10590 "creating instance %s" % self.op.instance_name,
10591 self.be_full[constants.BE_MAXMEM],
10592 self.op.hypervisor)
10594 self.dry_run_result = list(nodenames)
10596 def Exec(self, feedback_fn):
10597 """Create and add the instance to the cluster.
10600 instance = self.op.instance_name
10601 pnode_name = self.pnode.name
10603 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
10604 self.owned_locks(locking.LEVEL_NODE)), \
10605 "Node locks differ from node resource locks"
10606 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
10608 ht_kind = self.op.hypervisor
10609 if ht_kind in constants.HTS_REQ_PORT:
10610 network_port = self.cfg.AllocatePort()
10612 network_port = None
10614 # This is ugly but we got a chicken-egg problem here
10615 # We can only take the group disk parameters, as the instance
10616 # has no disks yet (we are generating them right here).
10617 node = self.cfg.GetNodeInfo(pnode_name)
10618 nodegroup = self.cfg.GetNodeGroup(node.group)
10619 disks = _GenerateDiskTemplate(self,
10620 self.op.disk_template,
10621 instance, pnode_name,
10624 self.instance_file_storage_dir,
10625 self.op.file_driver,
10628 self.cfg.GetGroupDiskParams(nodegroup))
10630 iobj = objects.Instance(name=instance, os=self.op.os_type,
10631 primary_node=pnode_name,
10632 nics=self.nics, disks=disks,
10633 disk_template=self.op.disk_template,
10634 admin_state=constants.ADMINST_DOWN,
10635 network_port=network_port,
10636 beparams=self.op.beparams,
10637 hvparams=self.op.hvparams,
10638 hypervisor=self.op.hypervisor,
10639 osparams=self.op.osparams,
10643 for tag in self.op.tags:
10646 if self.adopt_disks:
10647 if self.op.disk_template == constants.DT_PLAIN:
10648 # rename LVs to the newly-generated names; we need to construct
10649 # 'fake' LV disks with the old data, plus the new unique_id
10650 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
10652 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
10653 rename_to.append(t_dsk.logical_id)
10654 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
10655 self.cfg.SetDiskID(t_dsk, pnode_name)
10656 result = self.rpc.call_blockdev_rename(pnode_name,
10657 zip(tmp_disks, rename_to))
10658 result.Raise("Failed to rename adoped LVs")
10660 feedback_fn("* creating instance disks...")
10662 _CreateDisks(self, iobj)
10663 except errors.OpExecError:
10664 self.LogWarning("Device creation failed, reverting...")
10666 _RemoveDisks(self, iobj)
10668 self.cfg.ReleaseDRBDMinors(instance)
10671 feedback_fn("adding instance %s to cluster config" % instance)
10673 self.cfg.AddInstance(iobj, self.proc.GetECId())
10675 # Declare that we don't want to remove the instance lock anymore, as we've
10676 # added the instance to the config
10677 del self.remove_locks[locking.LEVEL_INSTANCE]
10679 if self.op.mode == constants.INSTANCE_IMPORT:
10680 # Release unused nodes
10681 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
10683 # Release all nodes
10684 _ReleaseLocks(self, locking.LEVEL_NODE)
10687 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
10688 feedback_fn("* wiping instance disks...")
10690 _WipeDisks(self, iobj)
10691 except errors.OpExecError, err:
10692 logging.exception("Wiping disks failed")
10693 self.LogWarning("Wiping instance disks failed (%s)", err)
10697 # Something is already wrong with the disks, don't do anything else
10699 elif self.op.wait_for_sync:
10700 disk_abort = not _WaitForSync(self, iobj)
10701 elif iobj.disk_template in constants.DTS_INT_MIRROR:
10702 # make sure the disks are not degraded (still sync-ing is ok)
10703 feedback_fn("* checking mirrors status")
10704 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
10709 _RemoveDisks(self, iobj)
10710 self.cfg.RemoveInstance(iobj.name)
10711 # Make sure the instance lock gets removed
10712 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
10713 raise errors.OpExecError("There are some degraded disks for"
10716 # Release all node resource locks
10717 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
10719 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
10720 # we need to set the disks ID to the primary node, since the
10721 # preceding code might or might have not done it, depending on
10722 # disk template and other options
10723 for disk in iobj.disks:
10724 self.cfg.SetDiskID(disk, pnode_name)
10725 if self.op.mode == constants.INSTANCE_CREATE:
10726 if not self.op.no_install:
10727 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
10728 not self.op.wait_for_sync)
10730 feedback_fn("* pausing disk sync to install instance OS")
10731 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10734 for idx, success in enumerate(result.payload):
10736 logging.warn("pause-sync of instance %s for disk %d failed",
10739 feedback_fn("* running the instance OS create scripts...")
10740 # FIXME: pass debug option from opcode to backend
10742 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
10743 self.op.debug_level)
10745 feedback_fn("* resuming disk sync")
10746 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10749 for idx, success in enumerate(result.payload):
10751 logging.warn("resume-sync of instance %s for disk %d failed",
10754 os_add_result.Raise("Could not add os for instance %s"
10755 " on node %s" % (instance, pnode_name))
10758 if self.op.mode == constants.INSTANCE_IMPORT:
10759 feedback_fn("* running the instance OS import scripts...")
10763 for idx, image in enumerate(self.src_images):
10767 # FIXME: pass debug option from opcode to backend
10768 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
10769 constants.IEIO_FILE, (image, ),
10770 constants.IEIO_SCRIPT,
10771 (iobj.disks[idx], idx),
10773 transfers.append(dt)
10776 masterd.instance.TransferInstanceData(self, feedback_fn,
10777 self.op.src_node, pnode_name,
10778 self.pnode.secondary_ip,
10780 if not compat.all(import_result):
10781 self.LogWarning("Some disks for instance %s on node %s were not"
10782 " imported successfully" % (instance, pnode_name))
10784 rename_from = self._old_instance_name
10786 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
10787 feedback_fn("* preparing remote import...")
10788 # The source cluster will stop the instance before attempting to make
10789 # a connection. In some cases stopping an instance can take a long
10790 # time, hence the shutdown timeout is added to the connection
10792 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
10793 self.op.source_shutdown_timeout)
10794 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10796 assert iobj.primary_node == self.pnode.name
10798 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
10799 self.source_x509_ca,
10800 self._cds, timeouts)
10801 if not compat.all(disk_results):
10802 # TODO: Should the instance still be started, even if some disks
10803 # failed to import (valid for local imports, too)?
10804 self.LogWarning("Some disks for instance %s on node %s were not"
10805 " imported successfully" % (instance, pnode_name))
10807 rename_from = self.source_instance_name
10810 # also checked in the prereq part
10811 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
10814 # Run rename script on newly imported instance
10815 assert iobj.name == instance
10816 feedback_fn("Running rename script for %s" % instance)
10817 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
10819 self.op.debug_level)
10820 if result.fail_msg:
10821 self.LogWarning("Failed to run rename script for %s on node"
10822 " %s: %s" % (instance, pnode_name, result.fail_msg))
10824 assert not self.owned_locks(locking.LEVEL_NODE_RES)
10827 iobj.admin_state = constants.ADMINST_UP
10828 self.cfg.Update(iobj, feedback_fn)
10829 logging.info("Starting instance %s on node %s", instance, pnode_name)
10830 feedback_fn("* starting instance...")
10831 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
10833 result.Raise("Could not start instance")
10835 return list(iobj.all_nodes)
10838 class LUInstanceMultiAlloc(NoHooksLU):
10839 """Allocates multiple instances at the same time.
10844 def CheckArguments(self):
10845 """Check arguments.
10849 for inst in self.op.instances:
10850 if inst.iallocator is not None:
10851 raise errors.OpPrereqError("iallocator are not allowed to be set on"
10852 " instance objects", errors.ECODE_INVAL)
10853 nodes.append(bool(inst.pnode))
10854 if inst.disk_template in constants.DTS_INT_MIRROR:
10855 nodes.append(bool(inst.snode))
10857 has_nodes = compat.any(nodes)
10858 if compat.all(nodes) ^ has_nodes:
10859 raise errors.OpPrereqError("There are instance objects providing"
10860 " pnode/snode while others do not",
10861 errors.ECODE_INVAL)
10863 if self.op.iallocator is None:
10864 default_iallocator = self.cfg.GetDefaultIAllocator()
10865 if default_iallocator and has_nodes:
10866 self.op.iallocator = default_iallocator
10868 raise errors.OpPrereqError("No iallocator or nodes on the instances"
10869 " given and no cluster-wide default"
10870 " iallocator found; please specify either"
10871 " an iallocator or nodes on the instances"
10872 " or set a cluster-wide default iallocator",
10873 errors.ECODE_INVAL)
10875 _CheckOpportunisticLocking(self.op)
10877 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
10879 raise errors.OpPrereqError("There are duplicate instance names: %s" %
10880 utils.CommaJoin(dups), errors.ECODE_INVAL)
10882 def ExpandNames(self):
10883 """Calculate the locks.
10886 self.share_locks = _ShareAll()
10887 self.needed_locks = {
10888 # iallocator will select nodes and even if no iallocator is used,
10889 # collisions with LUInstanceCreate should be avoided
10890 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
10893 if self.op.iallocator:
10894 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10895 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
10897 if self.op.opportunistic_locking:
10898 self.opportunistic_locks[locking.LEVEL_NODE] = True
10899 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
10902 for inst in self.op.instances:
10903 inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
10904 nodeslist.append(inst.pnode)
10905 if inst.snode is not None:
10906 inst.snode = _ExpandNodeName(self.cfg, inst.snode)
10907 nodeslist.append(inst.snode)
10909 self.needed_locks[locking.LEVEL_NODE] = nodeslist
10910 # Lock resources of instance's primary and secondary nodes (copy to
10911 # prevent accidential modification)
10912 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
10914 def CheckPrereq(self):
10915 """Check prerequisite.
10918 cluster = self.cfg.GetClusterInfo()
10919 default_vg = self.cfg.GetVGName()
10920 ec_id = self.proc.GetECId()
10922 if self.op.opportunistic_locking:
10923 # Only consider nodes for which a lock is held
10924 node_whitelist = self.owned_locks(locking.LEVEL_NODE)
10926 node_whitelist = None
10928 insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
10929 _ComputeNics(op, cluster, None,
10931 _ComputeFullBeParams(op, cluster),
10933 for op in self.op.instances]
10935 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
10936 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
10938 ial.Run(self.op.iallocator)
10940 if not ial.success:
10941 raise errors.OpPrereqError("Can't compute nodes using"
10942 " iallocator '%s': %s" %
10943 (self.op.iallocator, ial.info),
10944 errors.ECODE_NORES)
10946 self.ia_result = ial.result
10948 if self.op.dry_run:
10949 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
10950 constants.JOB_IDS_KEY: [],
10953 def _ConstructPartialResult(self):
10954 """Contructs the partial result.
10957 (allocatable, failed) = self.ia_result
10959 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
10960 map(compat.fst, allocatable),
10961 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
10964 def Exec(self, feedback_fn):
10965 """Executes the opcode.
10968 op2inst = dict((op.instance_name, op) for op in self.op.instances)
10969 (allocatable, failed) = self.ia_result
10972 for (name, nodes) in allocatable:
10973 op = op2inst.pop(name)
10976 (op.pnode, op.snode) = nodes
10978 (op.pnode,) = nodes
10982 missing = set(op2inst.keys()) - set(failed)
10983 assert not missing, \
10984 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
10986 return ResultWithJobs(jobs, **self._ConstructPartialResult())
10989 def _CheckRADOSFreeSpace():
10990 """Compute disk size requirements inside the RADOS cluster.
10993 # For the RADOS cluster we assume there is always enough space.
10997 class LUInstanceConsole(NoHooksLU):
10998 """Connect to an instance's console.
11000 This is somewhat special in that it returns the command line that
11001 you need to run on the master node in order to connect to the
11007 def ExpandNames(self):
11008 self.share_locks = _ShareAll()
11009 self._ExpandAndLockInstance()
11011 def CheckPrereq(self):
11012 """Check prerequisites.
11014 This checks that the instance is in the cluster.
11017 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11018 assert self.instance is not None, \
11019 "Cannot retrieve locked instance %s" % self.op.instance_name
11020 _CheckNodeOnline(self, self.instance.primary_node)
11022 def Exec(self, feedback_fn):
11023 """Connect to the console of an instance
11026 instance = self.instance
11027 node = instance.primary_node
11029 node_insts = self.rpc.call_instance_list([node],
11030 [instance.hypervisor])[node]
11031 node_insts.Raise("Can't get node information from %s" % node)
11033 if instance.name not in node_insts.payload:
11034 if instance.admin_state == constants.ADMINST_UP:
11035 state = constants.INSTST_ERRORDOWN
11036 elif instance.admin_state == constants.ADMINST_DOWN:
11037 state = constants.INSTST_ADMINDOWN
11039 state = constants.INSTST_ADMINOFFLINE
11040 raise errors.OpExecError("Instance %s is not running (state %s)" %
11041 (instance.name, state))
11043 logging.debug("Connecting to console of %s on %s", instance.name, node)
11045 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
11048 def _GetInstanceConsole(cluster, instance):
11049 """Returns console information for an instance.
11051 @type cluster: L{objects.Cluster}
11052 @type instance: L{objects.Instance}
11056 hyper = hypervisor.GetHypervisor(instance.hypervisor)
11057 # beparams and hvparams are passed separately, to avoid editing the
11058 # instance and then saving the defaults in the instance itself.
11059 hvparams = cluster.FillHV(instance)
11060 beparams = cluster.FillBE(instance)
11061 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
11063 assert console.instance == instance.name
11064 assert console.Validate()
11066 return console.ToDict()
11069 class LUInstanceReplaceDisks(LogicalUnit):
11070 """Replace the disks of an instance.
11073 HPATH = "mirrors-replace"
11074 HTYPE = constants.HTYPE_INSTANCE
11077 def CheckArguments(self):
11078 """Check arguments.
11081 remote_node = self.op.remote_node
11082 ialloc = self.op.iallocator
11083 if self.op.mode == constants.REPLACE_DISK_CHG:
11084 if remote_node is None and ialloc is None:
11085 raise errors.OpPrereqError("When changing the secondary either an"
11086 " iallocator script must be used or the"
11087 " new node given", errors.ECODE_INVAL)
11089 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
11091 elif remote_node is not None or ialloc is not None:
11092 # Not replacing the secondary
11093 raise errors.OpPrereqError("The iallocator and new node options can"
11094 " only be used when changing the"
11095 " secondary node", errors.ECODE_INVAL)
11097 def ExpandNames(self):
11098 self._ExpandAndLockInstance()
11100 assert locking.LEVEL_NODE not in self.needed_locks
11101 assert locking.LEVEL_NODE_RES not in self.needed_locks
11102 assert locking.LEVEL_NODEGROUP not in self.needed_locks
11104 assert self.op.iallocator is None or self.op.remote_node is None, \
11105 "Conflicting options"
11107 if self.op.remote_node is not None:
11108 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
11110 # Warning: do not remove the locking of the new secondary here
11111 # unless DRBD8.AddChildren is changed to work in parallel;
11112 # currently it doesn't since parallel invocations of
11113 # FindUnusedMinor will conflict
11114 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
11115 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
11117 self.needed_locks[locking.LEVEL_NODE] = []
11118 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11120 if self.op.iallocator is not None:
11121 # iallocator will select a new node in the same group
11122 self.needed_locks[locking.LEVEL_NODEGROUP] = []
11123 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
11125 self.needed_locks[locking.LEVEL_NODE_RES] = []
11127 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
11128 self.op.iallocator, self.op.remote_node,
11129 self.op.disks, self.op.early_release,
11130 self.op.ignore_ipolicy)
11132 self.tasklets = [self.replacer]
11134 def DeclareLocks(self, level):
11135 if level == locking.LEVEL_NODEGROUP:
11136 assert self.op.remote_node is None
11137 assert self.op.iallocator is not None
11138 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
11140 self.share_locks[locking.LEVEL_NODEGROUP] = 1
11141 # Lock all groups used by instance optimistically; this requires going
11142 # via the node before it's locked, requiring verification later on
11143 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11144 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
11146 elif level == locking.LEVEL_NODE:
11147 if self.op.iallocator is not None:
11148 assert self.op.remote_node is None
11149 assert not self.needed_locks[locking.LEVEL_NODE]
11150 assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
11152 # Lock member nodes of all locked groups
11153 self.needed_locks[locking.LEVEL_NODE] = \
11155 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
11156 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
11158 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
11160 self._LockInstancesNodes()
11162 elif level == locking.LEVEL_NODE_RES:
11164 self.needed_locks[locking.LEVEL_NODE_RES] = \
11165 self.needed_locks[locking.LEVEL_NODE]
11167 def BuildHooksEnv(self):
11168 """Build hooks env.
11170 This runs on the master, the primary and all the secondaries.
11173 instance = self.replacer.instance
11175 "MODE": self.op.mode,
11176 "NEW_SECONDARY": self.op.remote_node,
11177 "OLD_SECONDARY": instance.secondary_nodes[0],
11179 env.update(_BuildInstanceHookEnvByObject(self, instance))
11182 def BuildHooksNodes(self):
11183 """Build hooks nodes.
11186 instance = self.replacer.instance
11188 self.cfg.GetMasterNode(),
11189 instance.primary_node,
11191 if self.op.remote_node is not None:
11192 nl.append(self.op.remote_node)
11195 def CheckPrereq(self):
11196 """Check prerequisites.
11199 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
11200 self.op.iallocator is None)
11202 # Verify if node group locks are still correct
11203 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
11205 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
11207 return LogicalUnit.CheckPrereq(self)
11210 class TLReplaceDisks(Tasklet):
11211 """Replaces disks for an instance.
11213 Note: Locking is not within the scope of this class.
11216 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
11217 disks, early_release, ignore_ipolicy):
11218 """Initializes this class.
11221 Tasklet.__init__(self, lu)
11224 self.instance_name = instance_name
11226 self.iallocator_name = iallocator_name
11227 self.remote_node = remote_node
11229 self.early_release = early_release
11230 self.ignore_ipolicy = ignore_ipolicy
11233 self.instance = None
11234 self.new_node = None
11235 self.target_node = None
11236 self.other_node = None
11237 self.remote_node_info = None
11238 self.node_secondary_ip = None
11241 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
11242 """Compute a new secondary node using an IAllocator.
11245 req = iallocator.IAReqRelocate(name=instance_name,
11246 relocate_from=list(relocate_from))
11247 ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
11249 ial.Run(iallocator_name)
11251 if not ial.success:
11252 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
11253 " %s" % (iallocator_name, ial.info),
11254 errors.ECODE_NORES)
11256 remote_node_name = ial.result[0]
11258 lu.LogInfo("Selected new secondary for instance '%s': %s",
11259 instance_name, remote_node_name)
11261 return remote_node_name
11263 def _FindFaultyDisks(self, node_name):
11264 """Wrapper for L{_FindFaultyInstanceDisks}.
11267 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
11270 def _CheckDisksActivated(self, instance):
11271 """Checks if the instance disks are activated.
11273 @param instance: The instance to check disks
11274 @return: True if they are activated, False otherwise
11277 nodes = instance.all_nodes
11279 for idx, dev in enumerate(instance.disks):
11281 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
11282 self.cfg.SetDiskID(dev, node)
11284 result = _BlockdevFind(self, node, dev, instance)
11288 elif result.fail_msg or not result.payload:
11293 def CheckPrereq(self):
11294 """Check prerequisites.
11296 This checks that the instance is in the cluster.
11299 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
11300 assert instance is not None, \
11301 "Cannot retrieve locked instance %s" % self.instance_name
11303 if instance.disk_template != constants.DT_DRBD8:
11304 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
11305 " instances", errors.ECODE_INVAL)
11307 if len(instance.secondary_nodes) != 1:
11308 raise errors.OpPrereqError("The instance has a strange layout,"
11309 " expected one secondary but found %d" %
11310 len(instance.secondary_nodes),
11311 errors.ECODE_FAULT)
11313 instance = self.instance
11314 secondary_node = instance.secondary_nodes[0]
11316 if self.iallocator_name is None:
11317 remote_node = self.remote_node
11319 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
11320 instance.name, instance.secondary_nodes)
11322 if remote_node is None:
11323 self.remote_node_info = None
11325 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
11326 "Remote node '%s' is not locked" % remote_node
11328 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
11329 assert self.remote_node_info is not None, \
11330 "Cannot retrieve locked node %s" % remote_node
11332 if remote_node == self.instance.primary_node:
11333 raise errors.OpPrereqError("The specified node is the primary node of"
11334 " the instance", errors.ECODE_INVAL)
11336 if remote_node == secondary_node:
11337 raise errors.OpPrereqError("The specified node is already the"
11338 " secondary node of the instance",
11339 errors.ECODE_INVAL)
11341 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
11342 constants.REPLACE_DISK_CHG):
11343 raise errors.OpPrereqError("Cannot specify disks to be replaced",
11344 errors.ECODE_INVAL)
11346 if self.mode == constants.REPLACE_DISK_AUTO:
11347 if not self._CheckDisksActivated(instance):
11348 raise errors.OpPrereqError("Please run activate-disks on instance %s"
11349 " first" % self.instance_name,
11350 errors.ECODE_STATE)
11351 faulty_primary = self._FindFaultyDisks(instance.primary_node)
11352 faulty_secondary = self._FindFaultyDisks(secondary_node)
11354 if faulty_primary and faulty_secondary:
11355 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
11356 " one node and can not be repaired"
11357 " automatically" % self.instance_name,
11358 errors.ECODE_STATE)
11361 self.disks = faulty_primary
11362 self.target_node = instance.primary_node
11363 self.other_node = secondary_node
11364 check_nodes = [self.target_node, self.other_node]
11365 elif faulty_secondary:
11366 self.disks = faulty_secondary
11367 self.target_node = secondary_node
11368 self.other_node = instance.primary_node
11369 check_nodes = [self.target_node, self.other_node]
11375 # Non-automatic modes
11376 if self.mode == constants.REPLACE_DISK_PRI:
11377 self.target_node = instance.primary_node
11378 self.other_node = secondary_node
11379 check_nodes = [self.target_node, self.other_node]
11381 elif self.mode == constants.REPLACE_DISK_SEC:
11382 self.target_node = secondary_node
11383 self.other_node = instance.primary_node
11384 check_nodes = [self.target_node, self.other_node]
11386 elif self.mode == constants.REPLACE_DISK_CHG:
11387 self.new_node = remote_node
11388 self.other_node = instance.primary_node
11389 self.target_node = secondary_node
11390 check_nodes = [self.new_node, self.other_node]
11392 _CheckNodeNotDrained(self.lu, remote_node)
11393 _CheckNodeVmCapable(self.lu, remote_node)
11395 old_node_info = self.cfg.GetNodeInfo(secondary_node)
11396 assert old_node_info is not None
11397 if old_node_info.offline and not self.early_release:
11398 # doesn't make sense to delay the release
11399 self.early_release = True
11400 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
11401 " early-release mode", secondary_node)
11404 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
11407 # If not specified all disks should be replaced
11409 self.disks = range(len(self.instance.disks))
11411 # TODO: This is ugly, but right now we can't distinguish between internal
11412 # submitted opcode and external one. We should fix that.
11413 if self.remote_node_info:
11414 # We change the node, lets verify it still meets instance policy
11415 new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
11416 cluster = self.cfg.GetClusterInfo()
11417 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
11419 _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
11420 ignore=self.ignore_ipolicy)
11422 for node in check_nodes:
11423 _CheckNodeOnline(self.lu, node)
11425 touched_nodes = frozenset(node_name for node_name in [self.new_node,
11428 if node_name is not None)
11430 # Release unneeded node and node resource locks
11431 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
11432 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
11433 _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
11435 # Release any owned node group
11436 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
11438 # Check whether disks are valid
11439 for disk_idx in self.disks:
11440 instance.FindDisk(disk_idx)
11442 # Get secondary node IP addresses
11443 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
11444 in self.cfg.GetMultiNodeInfo(touched_nodes))
11446 def Exec(self, feedback_fn):
11447 """Execute disk replacement.
11449 This dispatches the disk replacement to the appropriate handler.
11453 # Verify owned locks before starting operation
11454 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
11455 assert set(owned_nodes) == set(self.node_secondary_ip), \
11456 ("Incorrect node locks, owning %s, expected %s" %
11457 (owned_nodes, self.node_secondary_ip.keys()))
11458 assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
11459 self.lu.owned_locks(locking.LEVEL_NODE_RES))
11460 assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
11462 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
11463 assert list(owned_instances) == [self.instance_name], \
11464 "Instance '%s' not locked" % self.instance_name
11466 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
11467 "Should not own any node group lock at this point"
11470 feedback_fn("No disks need replacement for instance '%s'" %
11471 self.instance.name)
11474 feedback_fn("Replacing disk(s) %s for instance '%s'" %
11475 (utils.CommaJoin(self.disks), self.instance.name))
11476 feedback_fn("Current primary node: %s" % self.instance.primary_node)
11477 feedback_fn("Current seconary node: %s" %
11478 utils.CommaJoin(self.instance.secondary_nodes))
11480 activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
11482 # Activate the instance disks if we're replacing them on a down instance
11484 _StartInstanceDisks(self.lu, self.instance, True)
11487 # Should we replace the secondary node?
11488 if self.new_node is not None:
11489 fn = self._ExecDrbd8Secondary
11491 fn = self._ExecDrbd8DiskOnly
11493 result = fn(feedback_fn)
11495 # Deactivate the instance disks if we're replacing them on a
11498 _SafeShutdownInstanceDisks(self.lu, self.instance)
11500 assert not self.lu.owned_locks(locking.LEVEL_NODE)
11503 # Verify owned locks
11504 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
11505 nodes = frozenset(self.node_secondary_ip)
11506 assert ((self.early_release and not owned_nodes) or
11507 (not self.early_release and not (set(owned_nodes) - nodes))), \
11508 ("Not owning the correct locks, early_release=%s, owned=%r,"
11509 " nodes=%r" % (self.early_release, owned_nodes, nodes))
11513 def _CheckVolumeGroup(self, nodes):
11514 self.lu.LogInfo("Checking volume groups")
11516 vgname = self.cfg.GetVGName()
11518 # Make sure volume group exists on all involved nodes
11519 results = self.rpc.call_vg_list(nodes)
11521 raise errors.OpExecError("Can't list volume groups on the nodes")
11524 res = results[node]
11525 res.Raise("Error checking node %s" % node)
11526 if vgname not in res.payload:
11527 raise errors.OpExecError("Volume group '%s' not found on node %s" %
11530 def _CheckDisksExistence(self, nodes):
11531 # Check disk existence
11532 for idx, dev in enumerate(self.instance.disks):
11533 if idx not in self.disks:
11537 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
11538 self.cfg.SetDiskID(dev, node)
11540 result = _BlockdevFind(self, node, dev, self.instance)
11542 msg = result.fail_msg
11543 if msg or not result.payload:
11545 msg = "disk not found"
11546 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
11549 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
11550 for idx, dev in enumerate(self.instance.disks):
11551 if idx not in self.disks:
11554 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
11557 if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
11558 on_primary, ldisk=ldisk):
11559 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
11560 " replace disks for instance %s" %
11561 (node_name, self.instance.name))
11563 def _CreateNewStorage(self, node_name):
11564 """Create new storage on the primary or secondary node.
11566 This is only used for same-node replaces, not for changing the
11567 secondary node, hence we don't want to modify the existing disk.
11572 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
11573 for idx, dev in enumerate(disks):
11574 if idx not in self.disks:
11577 self.lu.LogInfo("Adding storage on %s for disk/%d", node_name, idx)
11579 self.cfg.SetDiskID(dev, node_name)
11581 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
11582 names = _GenerateUniqueNames(self.lu, lv_names)
11584 (data_disk, meta_disk) = dev.children
11585 vg_data = data_disk.logical_id[0]
11586 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
11587 logical_id=(vg_data, names[0]),
11588 params=data_disk.params)
11589 vg_meta = meta_disk.logical_id[0]
11590 lv_meta = objects.Disk(dev_type=constants.LD_LV,
11591 size=constants.DRBD_META_SIZE,
11592 logical_id=(vg_meta, names[1]),
11593 params=meta_disk.params)
11595 new_lvs = [lv_data, lv_meta]
11596 old_lvs = [child.Copy() for child in dev.children]
11597 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
11599 # we pass force_create=True to force the LVM creation
11600 for new_lv in new_lvs:
11601 _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
11602 _GetInstanceInfoText(self.instance), False)
11606 def _CheckDevices(self, node_name, iv_names):
11607 for name, (dev, _, _) in iv_names.iteritems():
11608 self.cfg.SetDiskID(dev, node_name)
11610 result = _BlockdevFind(self, node_name, dev, self.instance)
11612 msg = result.fail_msg
11613 if msg or not result.payload:
11615 msg = "disk not found"
11616 raise errors.OpExecError("Can't find DRBD device %s: %s" %
11619 if result.payload.is_degraded:
11620 raise errors.OpExecError("DRBD device %s is degraded!" % name)
11622 def _RemoveOldStorage(self, node_name, iv_names):
11623 for name, (_, old_lvs, _) in iv_names.iteritems():
11624 self.lu.LogInfo("Remove logical volumes for %s", name)
11627 self.cfg.SetDiskID(lv, node_name)
11629 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
11631 self.lu.LogWarning("Can't remove old LV: %s", msg,
11632 hint="remove unused LVs manually")
11634 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
11635 """Replace a disk on the primary or secondary for DRBD 8.
11637 The algorithm for replace is quite complicated:
11639 1. for each disk to be replaced:
11641 1. create new LVs on the target node with unique names
11642 1. detach old LVs from the drbd device
11643 1. rename old LVs to name_replaced.<time_t>
11644 1. rename new LVs to old LVs
11645 1. attach the new LVs (with the old names now) to the drbd device
11647 1. wait for sync across all devices
11649 1. for each modified disk:
11651 1. remove old LVs (which have the name name_replaces.<time_t>)
11653 Failures are not very well handled.
11658 # Step: check device activation
11659 self.lu.LogStep(1, steps_total, "Check device existence")
11660 self._CheckDisksExistence([self.other_node, self.target_node])
11661 self._CheckVolumeGroup([self.target_node, self.other_node])
11663 # Step: check other node consistency
11664 self.lu.LogStep(2, steps_total, "Check peer consistency")
11665 self._CheckDisksConsistency(self.other_node,
11666 self.other_node == self.instance.primary_node,
11669 # Step: create new storage
11670 self.lu.LogStep(3, steps_total, "Allocate new storage")
11671 iv_names = self._CreateNewStorage(self.target_node)
11673 # Step: for each lv, detach+rename*2+attach
11674 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11675 for dev, old_lvs, new_lvs in iv_names.itervalues():
11676 self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
11678 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
11680 result.Raise("Can't detach drbd from local storage on node"
11681 " %s for device %s" % (self.target_node, dev.iv_name))
11683 #cfg.Update(instance)
11685 # ok, we created the new LVs, so now we know we have the needed
11686 # storage; as such, we proceed on the target node to rename
11687 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
11688 # using the assumption that logical_id == physical_id (which in
11689 # turn is the unique_id on that node)
11691 # FIXME(iustin): use a better name for the replaced LVs
11692 temp_suffix = int(time.time())
11693 ren_fn = lambda d, suff: (d.physical_id[0],
11694 d.physical_id[1] + "_replaced-%s" % suff)
11696 # Build the rename list based on what LVs exist on the node
11697 rename_old_to_new = []
11698 for to_ren in old_lvs:
11699 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
11700 if not result.fail_msg and result.payload:
11702 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
11704 self.lu.LogInfo("Renaming the old LVs on the target node")
11705 result = self.rpc.call_blockdev_rename(self.target_node,
11707 result.Raise("Can't rename old LVs on node %s" % self.target_node)
11709 # Now we rename the new LVs to the old LVs
11710 self.lu.LogInfo("Renaming the new LVs on the target node")
11711 rename_new_to_old = [(new, old.physical_id)
11712 for old, new in zip(old_lvs, new_lvs)]
11713 result = self.rpc.call_blockdev_rename(self.target_node,
11715 result.Raise("Can't rename new LVs on node %s" % self.target_node)
11717 # Intermediate steps of in memory modifications
11718 for old, new in zip(old_lvs, new_lvs):
11719 new.logical_id = old.logical_id
11720 self.cfg.SetDiskID(new, self.target_node)
11722 # We need to modify old_lvs so that removal later removes the
11723 # right LVs, not the newly added ones; note that old_lvs is a
11725 for disk in old_lvs:
11726 disk.logical_id = ren_fn(disk, temp_suffix)
11727 self.cfg.SetDiskID(disk, self.target_node)
11729 # Now that the new lvs have the old name, we can add them to the device
11730 self.lu.LogInfo("Adding new mirror component on %s", self.target_node)
11731 result = self.rpc.call_blockdev_addchildren(self.target_node,
11732 (dev, self.instance), new_lvs)
11733 msg = result.fail_msg
11735 for new_lv in new_lvs:
11736 msg2 = self.rpc.call_blockdev_remove(self.target_node,
11739 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
11740 hint=("cleanup manually the unused logical"
11742 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
11744 cstep = itertools.count(5)
11746 if self.early_release:
11747 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11748 self._RemoveOldStorage(self.target_node, iv_names)
11749 # TODO: Check if releasing locks early still makes sense
11750 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11752 # Release all resource locks except those used by the instance
11753 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11754 keep=self.node_secondary_ip.keys())
11756 # Release all node locks while waiting for sync
11757 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11759 # TODO: Can the instance lock be downgraded here? Take the optional disk
11760 # shutdown in the caller into consideration.
11763 # This can fail as the old devices are degraded and _WaitForSync
11764 # does a combined result over all disks, so we don't check its return value
11765 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11766 _WaitForSync(self.lu, self.instance)
11768 # Check all devices manually
11769 self._CheckDevices(self.instance.primary_node, iv_names)
11771 # Step: remove old storage
11772 if not self.early_release:
11773 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11774 self._RemoveOldStorage(self.target_node, iv_names)
11776 def _ExecDrbd8Secondary(self, feedback_fn):
11777 """Replace the secondary node for DRBD 8.
11779 The algorithm for replace is quite complicated:
11780 - for all disks of the instance:
11781 - create new LVs on the new node with same names
11782 - shutdown the drbd device on the old secondary
11783 - disconnect the drbd network on the primary
11784 - create the drbd device on the new secondary
11785 - network attach the drbd on the primary, using an artifice:
11786 the drbd code for Attach() will connect to the network if it
11787 finds a device which is connected to the good local disks but
11788 not network enabled
11789 - wait for sync across all devices
11790 - remove all disks from the old secondary
11792 Failures are not very well handled.
11797 pnode = self.instance.primary_node
11799 # Step: check device activation
11800 self.lu.LogStep(1, steps_total, "Check device existence")
11801 self._CheckDisksExistence([self.instance.primary_node])
11802 self._CheckVolumeGroup([self.instance.primary_node])
11804 # Step: check other node consistency
11805 self.lu.LogStep(2, steps_total, "Check peer consistency")
11806 self._CheckDisksConsistency(self.instance.primary_node, True, True)
11808 # Step: create new storage
11809 self.lu.LogStep(3, steps_total, "Allocate new storage")
11810 disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
11811 for idx, dev in enumerate(disks):
11812 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
11813 (self.new_node, idx))
11814 # we pass force_create=True to force LVM creation
11815 for new_lv in dev.children:
11816 _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
11817 True, _GetInstanceInfoText(self.instance), False)
11819 # Step 4: dbrd minors and drbd setups changes
11820 # after this, we must manually remove the drbd minors on both the
11821 # error and the success paths
11822 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11823 minors = self.cfg.AllocateDRBDMinor([self.new_node
11824 for dev in self.instance.disks],
11825 self.instance.name)
11826 logging.debug("Allocated minors %r", minors)
11829 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
11830 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
11831 (self.new_node, idx))
11832 # create new devices on new_node; note that we create two IDs:
11833 # one without port, so the drbd will be activated without
11834 # networking information on the new node at this stage, and one
11835 # with network, for the latter activation in step 4
11836 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
11837 if self.instance.primary_node == o_node1:
11840 assert self.instance.primary_node == o_node2, "Three-node instance?"
11843 new_alone_id = (self.instance.primary_node, self.new_node, None,
11844 p_minor, new_minor, o_secret)
11845 new_net_id = (self.instance.primary_node, self.new_node, o_port,
11846 p_minor, new_minor, o_secret)
11848 iv_names[idx] = (dev, dev.children, new_net_id)
11849 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
11851 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
11852 logical_id=new_alone_id,
11853 children=dev.children,
11856 (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
11859 _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
11861 _GetInstanceInfoText(self.instance), False)
11862 except errors.GenericError:
11863 self.cfg.ReleaseDRBDMinors(self.instance.name)
11866 # We have new devices, shutdown the drbd on the old secondary
11867 for idx, dev in enumerate(self.instance.disks):
11868 self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
11869 self.cfg.SetDiskID(dev, self.target_node)
11870 msg = self.rpc.call_blockdev_shutdown(self.target_node,
11871 (dev, self.instance)).fail_msg
11873 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
11874 "node: %s" % (idx, msg),
11875 hint=("Please cleanup this device manually as"
11876 " soon as possible"))
11878 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
11879 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
11880 self.instance.disks)[pnode]
11882 msg = result.fail_msg
11884 # detaches didn't succeed (unlikely)
11885 self.cfg.ReleaseDRBDMinors(self.instance.name)
11886 raise errors.OpExecError("Can't detach the disks from the network on"
11887 " old node: %s" % (msg,))
11889 # if we managed to detach at least one, we update all the disks of
11890 # the instance to point to the new secondary
11891 self.lu.LogInfo("Updating instance configuration")
11892 for dev, _, new_logical_id in iv_names.itervalues():
11893 dev.logical_id = new_logical_id
11894 self.cfg.SetDiskID(dev, self.instance.primary_node)
11896 self.cfg.Update(self.instance, feedback_fn)
11898 # Release all node locks (the configuration has been updated)
11899 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11901 # and now perform the drbd attach
11902 self.lu.LogInfo("Attaching primary drbds to new secondary"
11903 " (standalone => connected)")
11904 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11906 self.node_secondary_ip,
11907 (self.instance.disks, self.instance),
11908 self.instance.name,
11910 for to_node, to_result in result.items():
11911 msg = to_result.fail_msg
11913 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
11915 hint=("please do a gnt-instance info to see the"
11916 " status of disks"))
11918 cstep = itertools.count(5)
11920 if self.early_release:
11921 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11922 self._RemoveOldStorage(self.target_node, iv_names)
11923 # TODO: Check if releasing locks early still makes sense
11924 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11926 # Release all resource locks except those used by the instance
11927 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11928 keep=self.node_secondary_ip.keys())
11930 # TODO: Can the instance lock be downgraded here? Take the optional disk
11931 # shutdown in the caller into consideration.
11934 # This can fail as the old devices are degraded and _WaitForSync
11935 # does a combined result over all disks, so we don't check its return value
11936 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11937 _WaitForSync(self.lu, self.instance)
11939 # Check all devices manually
11940 self._CheckDevices(self.instance.primary_node, iv_names)
11942 # Step: remove old storage
11943 if not self.early_release:
11944 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11945 self._RemoveOldStorage(self.target_node, iv_names)
11948 class LURepairNodeStorage(NoHooksLU):
11949 """Repairs the volume group on a node.
11954 def CheckArguments(self):
11955 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11957 storage_type = self.op.storage_type
11959 if (constants.SO_FIX_CONSISTENCY not in
11960 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
11961 raise errors.OpPrereqError("Storage units of type '%s' can not be"
11962 " repaired" % storage_type,
11963 errors.ECODE_INVAL)
11965 def ExpandNames(self):
11966 self.needed_locks = {
11967 locking.LEVEL_NODE: [self.op.node_name],
11970 def _CheckFaultyDisks(self, instance, node_name):
11971 """Ensure faulty disks abort the opcode or at least warn."""
11973 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
11975 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
11976 " node '%s'" % (instance.name, node_name),
11977 errors.ECODE_STATE)
11978 except errors.OpPrereqError, err:
11979 if self.op.ignore_consistency:
11980 self.LogWarning(str(err.args[0]))
11984 def CheckPrereq(self):
11985 """Check prerequisites.
11988 # Check whether any instance on this node has faulty disks
11989 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
11990 if inst.admin_state != constants.ADMINST_UP:
11992 check_nodes = set(inst.all_nodes)
11993 check_nodes.discard(self.op.node_name)
11994 for inst_node_name in check_nodes:
11995 self._CheckFaultyDisks(inst, inst_node_name)
11997 def Exec(self, feedback_fn):
11998 feedback_fn("Repairing storage unit '%s' on %s ..." %
11999 (self.op.name, self.op.node_name))
12001 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
12002 result = self.rpc.call_storage_execute(self.op.node_name,
12003 self.op.storage_type, st_args,
12005 constants.SO_FIX_CONSISTENCY)
12006 result.Raise("Failed to repair storage unit '%s' on %s" %
12007 (self.op.name, self.op.node_name))
12010 class LUNodeEvacuate(NoHooksLU):
12011 """Evacuates instances off a list of nodes.
12016 _MODE2IALLOCATOR = {
12017 constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
12018 constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
12019 constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
12021 assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
12022 assert (frozenset(_MODE2IALLOCATOR.values()) ==
12023 constants.IALLOCATOR_NEVAC_MODES)
12025 def CheckArguments(self):
12026 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
12028 def ExpandNames(self):
12029 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
12031 if self.op.remote_node is not None:
12032 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
12033 assert self.op.remote_node
12035 if self.op.remote_node == self.op.node_name:
12036 raise errors.OpPrereqError("Can not use evacuated node as a new"
12037 " secondary node", errors.ECODE_INVAL)
12039 if self.op.mode != constants.NODE_EVAC_SEC:
12040 raise errors.OpPrereqError("Without the use of an iallocator only"
12041 " secondary instances can be evacuated",
12042 errors.ECODE_INVAL)
12045 self.share_locks = _ShareAll()
12046 self.needed_locks = {
12047 locking.LEVEL_INSTANCE: [],
12048 locking.LEVEL_NODEGROUP: [],
12049 locking.LEVEL_NODE: [],
12052 # Determine nodes (via group) optimistically, needs verification once locks
12053 # have been acquired
12054 self.lock_nodes = self._DetermineNodes()
12056 def _DetermineNodes(self):
12057 """Gets the list of nodes to operate on.
12060 if self.op.remote_node is None:
12061 # Iallocator will choose any node(s) in the same group
12062 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
12064 group_nodes = frozenset([self.op.remote_node])
12066 # Determine nodes to be locked
12067 return set([self.op.node_name]) | group_nodes
12069 def _DetermineInstances(self):
12070 """Builds list of instances to operate on.
12073 assert self.op.mode in constants.NODE_EVAC_MODES
12075 if self.op.mode == constants.NODE_EVAC_PRI:
12076 # Primary instances only
12077 inst_fn = _GetNodePrimaryInstances
12078 assert self.op.remote_node is None, \
12079 "Evacuating primary instances requires iallocator"
12080 elif self.op.mode == constants.NODE_EVAC_SEC:
12081 # Secondary instances only
12082 inst_fn = _GetNodeSecondaryInstances
12085 assert self.op.mode == constants.NODE_EVAC_ALL
12086 inst_fn = _GetNodeInstances
12087 # TODO: In 2.6, change the iallocator interface to take an evacuation mode
12089 raise errors.OpPrereqError("Due to an issue with the iallocator"
12090 " interface it is not possible to evacuate"
12091 " all instances at once; specify explicitly"
12092 " whether to evacuate primary or secondary"
12094 errors.ECODE_INVAL)
12096 return inst_fn(self.cfg, self.op.node_name)
12098 def DeclareLocks(self, level):
12099 if level == locking.LEVEL_INSTANCE:
12100 # Lock instances optimistically, needs verification once node and group
12101 # locks have been acquired
12102 self.needed_locks[locking.LEVEL_INSTANCE] = \
12103 set(i.name for i in self._DetermineInstances())
12105 elif level == locking.LEVEL_NODEGROUP:
12106 # Lock node groups for all potential target nodes optimistically, needs
12107 # verification once nodes have been acquired
12108 self.needed_locks[locking.LEVEL_NODEGROUP] = \
12109 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
12111 elif level == locking.LEVEL_NODE:
12112 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
12114 def CheckPrereq(self):
12116 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
12117 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
12118 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
12120 need_nodes = self._DetermineNodes()
12122 if not owned_nodes.issuperset(need_nodes):
12123 raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
12124 " locks were acquired, current nodes are"
12125 " are '%s', used to be '%s'; retry the"
12127 (self.op.node_name,
12128 utils.CommaJoin(need_nodes),
12129 utils.CommaJoin(owned_nodes)),
12130 errors.ECODE_STATE)
12132 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
12133 if owned_groups != wanted_groups:
12134 raise errors.OpExecError("Node groups changed since locks were acquired,"
12135 " current groups are '%s', used to be '%s';"
12136 " retry the operation" %
12137 (utils.CommaJoin(wanted_groups),
12138 utils.CommaJoin(owned_groups)))
12140 # Determine affected instances
12141 self.instances = self._DetermineInstances()
12142 self.instance_names = [i.name for i in self.instances]
12144 if set(self.instance_names) != owned_instances:
12145 raise errors.OpExecError("Instances on node '%s' changed since locks"
12146 " were acquired, current instances are '%s',"
12147 " used to be '%s'; retry the operation" %
12148 (self.op.node_name,
12149 utils.CommaJoin(self.instance_names),
12150 utils.CommaJoin(owned_instances)))
12152 if self.instance_names:
12153 self.LogInfo("Evacuating instances from node '%s': %s",
12155 utils.CommaJoin(utils.NiceSort(self.instance_names)))
12157 self.LogInfo("No instances to evacuate from node '%s'",
12160 if self.op.remote_node is not None:
12161 for i in self.instances:
12162 if i.primary_node == self.op.remote_node:
12163 raise errors.OpPrereqError("Node %s is the primary node of"
12164 " instance %s, cannot use it as"
12166 (self.op.remote_node, i.name),
12167 errors.ECODE_INVAL)
12169 def Exec(self, feedback_fn):
12170 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
12172 if not self.instance_names:
12173 # No instances to evacuate
12176 elif self.op.iallocator is not None:
12177 # TODO: Implement relocation to other group
12178 evac_mode = self._MODE2IALLOCATOR[self.op.mode]
12179 req = iallocator.IAReqNodeEvac(evac_mode=evac_mode,
12180 instances=list(self.instance_names))
12181 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
12183 ial.Run(self.op.iallocator)
12185 if not ial.success:
12186 raise errors.OpPrereqError("Can't compute node evacuation using"
12187 " iallocator '%s': %s" %
12188 (self.op.iallocator, ial.info),
12189 errors.ECODE_NORES)
12191 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
12193 elif self.op.remote_node is not None:
12194 assert self.op.mode == constants.NODE_EVAC_SEC
12196 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
12197 remote_node=self.op.remote_node,
12199 mode=constants.REPLACE_DISK_CHG,
12200 early_release=self.op.early_release)]
12201 for instance_name in self.instance_names]
12204 raise errors.ProgrammerError("No iallocator or remote node")
12206 return ResultWithJobs(jobs)
12209 def _SetOpEarlyRelease(early_release, op):
12210 """Sets C{early_release} flag on opcodes if available.
12214 op.early_release = early_release
12215 except AttributeError:
12216 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
12221 def _NodeEvacDest(use_nodes, group, nodes):
12222 """Returns group or nodes depending on caller's choice.
12226 return utils.CommaJoin(nodes)
12231 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
12232 """Unpacks the result of change-group and node-evacuate iallocator requests.
12234 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
12235 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
12237 @type lu: L{LogicalUnit}
12238 @param lu: Logical unit instance
12239 @type alloc_result: tuple/list
12240 @param alloc_result: Result from iallocator
12241 @type early_release: bool
12242 @param early_release: Whether to release locks early if possible
12243 @type use_nodes: bool
12244 @param use_nodes: Whether to display node names instead of groups
12247 (moved, failed, jobs) = alloc_result
12250 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
12251 for (name, reason) in failed)
12252 lu.LogWarning("Unable to evacuate instances %s", failreason)
12253 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
12256 lu.LogInfo("Instances to be moved: %s",
12257 utils.CommaJoin("%s (to %s)" %
12258 (name, _NodeEvacDest(use_nodes, group, nodes))
12259 for (name, group, nodes) in moved))
12261 return [map(compat.partial(_SetOpEarlyRelease, early_release),
12262 map(opcodes.OpCode.LoadOpCode, ops))
12266 def _DiskSizeInBytesToMebibytes(lu, size):
12267 """Converts a disk size in bytes to mebibytes.
12269 Warns and rounds up if the size isn't an even multiple of 1 MiB.
12272 (mib, remainder) = divmod(size, 1024 * 1024)
12275 lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
12276 " to not overwrite existing data (%s bytes will not be"
12277 " wiped)", (1024 * 1024) - remainder)
12283 class LUInstanceGrowDisk(LogicalUnit):
12284 """Grow a disk of an instance.
12287 HPATH = "disk-grow"
12288 HTYPE = constants.HTYPE_INSTANCE
12291 def ExpandNames(self):
12292 self._ExpandAndLockInstance()
12293 self.needed_locks[locking.LEVEL_NODE] = []
12294 self.needed_locks[locking.LEVEL_NODE_RES] = []
12295 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12296 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
12298 def DeclareLocks(self, level):
12299 if level == locking.LEVEL_NODE:
12300 self._LockInstancesNodes()
12301 elif level == locking.LEVEL_NODE_RES:
12303 self.needed_locks[locking.LEVEL_NODE_RES] = \
12304 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
12306 def BuildHooksEnv(self):
12307 """Build hooks env.
12309 This runs on the master, the primary and all the secondaries.
12313 "DISK": self.op.disk,
12314 "AMOUNT": self.op.amount,
12315 "ABSOLUTE": self.op.absolute,
12317 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
12320 def BuildHooksNodes(self):
12321 """Build hooks nodes.
12324 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
12327 def CheckPrereq(self):
12328 """Check prerequisites.
12330 This checks that the instance is in the cluster.
12333 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12334 assert instance is not None, \
12335 "Cannot retrieve locked instance %s" % self.op.instance_name
12336 nodenames = list(instance.all_nodes)
12337 for node in nodenames:
12338 _CheckNodeOnline(self, node)
12340 self.instance = instance
12342 if instance.disk_template not in constants.DTS_GROWABLE:
12343 raise errors.OpPrereqError("Instance's disk layout does not support"
12344 " growing", errors.ECODE_INVAL)
12346 self.disk = instance.FindDisk(self.op.disk)
12348 if self.op.absolute:
12349 self.target = self.op.amount
12350 self.delta = self.target - self.disk.size
12352 raise errors.OpPrereqError("Requested size (%s) is smaller than "
12353 "current disk size (%s)" %
12354 (utils.FormatUnit(self.target, "h"),
12355 utils.FormatUnit(self.disk.size, "h")),
12356 errors.ECODE_STATE)
12358 self.delta = self.op.amount
12359 self.target = self.disk.size + self.delta
12361 raise errors.OpPrereqError("Requested increment (%s) is negative" %
12362 utils.FormatUnit(self.delta, "h"),
12363 errors.ECODE_INVAL)
12365 if instance.disk_template not in (constants.DT_FILE,
12366 constants.DT_SHARED_FILE,
12369 # TODO: check the free disk space for file, when that feature will be
12371 _CheckNodesFreeDiskPerVG(self, nodenames,
12372 self.disk.ComputeGrowth(self.delta))
12374 def Exec(self, feedback_fn):
12375 """Execute disk grow.
12378 instance = self.instance
12381 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
12382 assert (self.owned_locks(locking.LEVEL_NODE) ==
12383 self.owned_locks(locking.LEVEL_NODE_RES))
12385 wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
12387 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
12389 raise errors.OpExecError("Cannot activate block device to grow")
12391 feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
12392 (self.op.disk, instance.name,
12393 utils.FormatUnit(self.delta, "h"),
12394 utils.FormatUnit(self.target, "h")))
12396 # First run all grow ops in dry-run mode
12397 for node in instance.all_nodes:
12398 self.cfg.SetDiskID(disk, node)
12399 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
12401 result.Raise("Dry-run grow request failed to node %s" % node)
12404 # Get disk size from primary node for wiping
12405 result = self.rpc.call_blockdev_getsize(instance.primary_node, [disk])
12406 result.Raise("Failed to retrieve disk size from node '%s'" %
12407 instance.primary_node)
12409 (disk_size_in_bytes, ) = result.payload
12411 if disk_size_in_bytes is None:
12412 raise errors.OpExecError("Failed to retrieve disk size from primary"
12413 " node '%s'" % instance.primary_node)
12415 old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
12417 assert old_disk_size >= disk.size, \
12418 ("Retrieved disk size too small (got %s, should be at least %s)" %
12419 (old_disk_size, disk.size))
12421 old_disk_size = None
12423 # We know that (as far as we can test) operations across different
12424 # nodes will succeed, time to run it for real on the backing storage
12425 for node in instance.all_nodes:
12426 self.cfg.SetDiskID(disk, node)
12427 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
12429 result.Raise("Grow request failed to node %s" % node)
12431 # And now execute it for logical storage, on the primary node
12432 node = instance.primary_node
12433 self.cfg.SetDiskID(disk, node)
12434 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
12436 result.Raise("Grow request failed to node %s" % node)
12438 disk.RecordGrow(self.delta)
12439 self.cfg.Update(instance, feedback_fn)
12441 # Changes have been recorded, release node lock
12442 _ReleaseLocks(self, locking.LEVEL_NODE)
12444 # Downgrade lock while waiting for sync
12445 self.glm.downgrade(locking.LEVEL_INSTANCE)
12447 assert wipe_disks ^ (old_disk_size is None)
12450 assert instance.disks[self.op.disk] == disk
12452 # Wipe newly added disk space
12453 _WipeDisks(self, instance,
12454 disks=[(self.op.disk, disk, old_disk_size)])
12456 if self.op.wait_for_sync:
12457 disk_abort = not _WaitForSync(self, instance, disks=[disk])
12459 self.LogWarning("Disk syncing has not returned a good status; check"
12461 if instance.admin_state != constants.ADMINST_UP:
12462 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
12463 elif instance.admin_state != constants.ADMINST_UP:
12464 self.LogWarning("Not shutting down the disk even if the instance is"
12465 " not supposed to be running because no wait for"
12466 " sync mode was requested")
12468 assert self.owned_locks(locking.LEVEL_NODE_RES)
12469 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
12472 class LUInstanceQueryData(NoHooksLU):
12473 """Query runtime instance data.
12478 def ExpandNames(self):
12479 self.needed_locks = {}
12481 # Use locking if requested or when non-static information is wanted
12482 if not (self.op.static or self.op.use_locking):
12483 self.LogWarning("Non-static data requested, locks need to be acquired")
12484 self.op.use_locking = True
12486 if self.op.instances or not self.op.use_locking:
12487 # Expand instance names right here
12488 self.wanted_names = _GetWantedInstances(self, self.op.instances)
12490 # Will use acquired locks
12491 self.wanted_names = None
12493 if self.op.use_locking:
12494 self.share_locks = _ShareAll()
12496 if self.wanted_names is None:
12497 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
12499 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
12501 self.needed_locks[locking.LEVEL_NODEGROUP] = []
12502 self.needed_locks[locking.LEVEL_NODE] = []
12503 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12505 def DeclareLocks(self, level):
12506 if self.op.use_locking:
12507 if level == locking.LEVEL_NODEGROUP:
12508 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
12510 # Lock all groups used by instances optimistically; this requires going
12511 # via the node before it's locked, requiring verification later on
12512 self.needed_locks[locking.LEVEL_NODEGROUP] = \
12513 frozenset(group_uuid
12514 for instance_name in owned_instances
12516 self.cfg.GetInstanceNodeGroups(instance_name))
12518 elif level == locking.LEVEL_NODE:
12519 self._LockInstancesNodes()
12521 def CheckPrereq(self):
12522 """Check prerequisites.
12524 This only checks the optional instance list against the existing names.
12527 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
12528 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12529 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
12531 if self.wanted_names is None:
12532 assert self.op.use_locking, "Locking was not used"
12533 self.wanted_names = owned_instances
12535 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
12537 if self.op.use_locking:
12538 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
12541 assert not (owned_instances or owned_groups or owned_nodes)
12543 self.wanted_instances = instances.values()
12545 def _ComputeBlockdevStatus(self, node, instance, dev):
12546 """Returns the status of a block device
12549 if self.op.static or not node:
12552 self.cfg.SetDiskID(dev, node)
12554 result = self.rpc.call_blockdev_find(node, dev)
12558 result.Raise("Can't compute disk status for %s" % instance.name)
12560 status = result.payload
12564 return (status.dev_path, status.major, status.minor,
12565 status.sync_percent, status.estimated_time,
12566 status.is_degraded, status.ldisk_status)
12568 def _ComputeDiskStatus(self, instance, snode, dev):
12569 """Compute block device status.
12572 (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
12574 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
12576 def _ComputeDiskStatusInner(self, instance, snode, dev):
12577 """Compute block device status.
12579 @attention: The device has to be annotated already.
12582 if dev.dev_type in constants.LDS_DRBD:
12583 # we change the snode then (otherwise we use the one passed in)
12584 if dev.logical_id[0] == instance.primary_node:
12585 snode = dev.logical_id[1]
12587 snode = dev.logical_id[0]
12589 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
12591 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
12594 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
12601 "iv_name": dev.iv_name,
12602 "dev_type": dev.dev_type,
12603 "logical_id": dev.logical_id,
12604 "physical_id": dev.physical_id,
12605 "pstatus": dev_pstatus,
12606 "sstatus": dev_sstatus,
12607 "children": dev_children,
12612 def Exec(self, feedback_fn):
12613 """Gather and return data"""
12616 cluster = self.cfg.GetClusterInfo()
12618 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
12619 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
12621 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
12622 for node in nodes.values()))
12624 group2name_fn = lambda uuid: groups[uuid].name
12626 for instance in self.wanted_instances:
12627 pnode = nodes[instance.primary_node]
12629 if self.op.static or pnode.offline:
12630 remote_state = None
12632 self.LogWarning("Primary node %s is marked offline, returning static"
12633 " information only for instance %s" %
12634 (pnode.name, instance.name))
12636 remote_info = self.rpc.call_instance_info(instance.primary_node,
12638 instance.hypervisor)
12639 remote_info.Raise("Error checking node %s" % instance.primary_node)
12640 remote_info = remote_info.payload
12641 if remote_info and "state" in remote_info:
12642 remote_state = "up"
12644 if instance.admin_state == constants.ADMINST_UP:
12645 remote_state = "down"
12647 remote_state = instance.admin_state
12649 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
12652 snodes_group_uuids = [nodes[snode_name].group
12653 for snode_name in instance.secondary_nodes]
12655 result[instance.name] = {
12656 "name": instance.name,
12657 "config_state": instance.admin_state,
12658 "run_state": remote_state,
12659 "pnode": instance.primary_node,
12660 "pnode_group_uuid": pnode.group,
12661 "pnode_group_name": group2name_fn(pnode.group),
12662 "snodes": instance.secondary_nodes,
12663 "snodes_group_uuids": snodes_group_uuids,
12664 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
12666 # this happens to be the same format used for hooks
12667 "nics": _NICListToTuple(self, instance.nics),
12668 "disk_template": instance.disk_template,
12670 "hypervisor": instance.hypervisor,
12671 "network_port": instance.network_port,
12672 "hv_instance": instance.hvparams,
12673 "hv_actual": cluster.FillHV(instance, skip_globals=True),
12674 "be_instance": instance.beparams,
12675 "be_actual": cluster.FillBE(instance),
12676 "os_instance": instance.osparams,
12677 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
12678 "serial_no": instance.serial_no,
12679 "mtime": instance.mtime,
12680 "ctime": instance.ctime,
12681 "uuid": instance.uuid,
12687 def PrepareContainerMods(mods, private_fn):
12688 """Prepares a list of container modifications by adding a private data field.
12690 @type mods: list of tuples; (operation, index, parameters)
12691 @param mods: List of modifications
12692 @type private_fn: callable or None
12693 @param private_fn: Callable for constructing a private data field for a
12698 if private_fn is None:
12703 return [(op, idx, params, fn()) for (op, idx, params) in mods]
12706 #: Type description for changes as returned by L{ApplyContainerMods}'s
12708 _TApplyContModsCbChanges = \
12709 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
12710 ht.TNonEmptyString,
12715 def ApplyContainerMods(kind, container, chgdesc, mods,
12716 create_fn, modify_fn, remove_fn):
12717 """Applies descriptions in C{mods} to C{container}.
12720 @param kind: One-word item description
12721 @type container: list
12722 @param container: Container to modify
12723 @type chgdesc: None or list
12724 @param chgdesc: List of applied changes
12726 @param mods: Modifications as returned by L{PrepareContainerMods}
12727 @type create_fn: callable
12728 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
12729 receives absolute item index, parameters and private data object as added
12730 by L{PrepareContainerMods}, returns tuple containing new item and changes
12732 @type modify_fn: callable
12733 @param modify_fn: Callback for modifying an existing item
12734 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
12735 and private data object as added by L{PrepareContainerMods}, returns
12737 @type remove_fn: callable
12738 @param remove_fn: Callback on removing item; receives absolute item index,
12739 item and private data object as added by L{PrepareContainerMods}
12742 for (op, idx, params, private) in mods:
12745 absidx = len(container) - 1
12747 raise IndexError("Not accepting negative indices other than -1")
12748 elif idx > len(container):
12749 raise IndexError("Got %s index %s, but there are only %s" %
12750 (kind, idx, len(container)))
12756 if op == constants.DDM_ADD:
12757 # Calculate where item will be added
12759 addidx = len(container)
12763 if create_fn is None:
12766 (item, changes) = create_fn(addidx, params, private)
12769 container.append(item)
12772 assert idx <= len(container)
12773 # list.insert does so before the specified index
12774 container.insert(idx, item)
12776 # Retrieve existing item
12778 item = container[absidx]
12780 raise IndexError("Invalid %s index %s" % (kind, idx))
12782 if op == constants.DDM_REMOVE:
12785 if remove_fn is not None:
12786 remove_fn(absidx, item, private)
12788 changes = [("%s/%s" % (kind, absidx), "remove")]
12790 assert container[absidx] == item
12791 del container[absidx]
12792 elif op == constants.DDM_MODIFY:
12793 if modify_fn is not None:
12794 changes = modify_fn(absidx, item, params, private)
12796 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12798 assert _TApplyContModsCbChanges(changes)
12800 if not (chgdesc is None or changes is None):
12801 chgdesc.extend(changes)
12804 def _UpdateIvNames(base_index, disks):
12805 """Updates the C{iv_name} attribute of disks.
12807 @type disks: list of L{objects.Disk}
12810 for (idx, disk) in enumerate(disks):
12811 disk.iv_name = "disk/%s" % (base_index + idx, )
12814 class _InstNicModPrivate:
12815 """Data structure for network interface modifications.
12817 Used by L{LUInstanceSetParams}.
12820 def __init__(self):
12825 class LUInstanceSetParams(LogicalUnit):
12826 """Modifies an instances's parameters.
12829 HPATH = "instance-modify"
12830 HTYPE = constants.HTYPE_INSTANCE
12834 def _UpgradeDiskNicMods(kind, mods, verify_fn):
12835 assert ht.TList(mods)
12836 assert not mods or len(mods[0]) in (2, 3)
12838 if mods and len(mods[0]) == 2:
12842 for op, params in mods:
12843 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
12844 result.append((op, -1, params))
12848 raise errors.OpPrereqError("Only one %s add or remove operation is"
12849 " supported at a time" % kind,
12850 errors.ECODE_INVAL)
12852 result.append((constants.DDM_MODIFY, op, params))
12854 assert verify_fn(result)
12861 def _CheckMods(kind, mods, key_types, item_fn):
12862 """Ensures requested disk/NIC modifications are valid.
12865 for (op, _, params) in mods:
12866 assert ht.TDict(params)
12868 utils.ForceDictType(params, key_types)
12870 if op == constants.DDM_REMOVE:
12872 raise errors.OpPrereqError("No settings should be passed when"
12873 " removing a %s" % kind,
12874 errors.ECODE_INVAL)
12875 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
12876 item_fn(op, params)
12878 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12881 def _VerifyDiskModification(op, params):
12882 """Verifies a disk modification.
12885 if op == constants.DDM_ADD:
12886 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
12887 if mode not in constants.DISK_ACCESS_SET:
12888 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
12889 errors.ECODE_INVAL)
12891 size = params.get(constants.IDISK_SIZE, None)
12893 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
12894 constants.IDISK_SIZE, errors.ECODE_INVAL)
12898 except (TypeError, ValueError), err:
12899 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
12900 errors.ECODE_INVAL)
12902 params[constants.IDISK_SIZE] = size
12904 elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
12905 raise errors.OpPrereqError("Disk size change not possible, use"
12906 " grow-disk", errors.ECODE_INVAL)
12909 def _VerifyNicModification(op, params):
12910 """Verifies a network interface modification.
12913 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
12914 ip = params.get(constants.INIC_IP, None)
12915 req_net = params.get(constants.INIC_NETWORK, None)
12916 link = params.get(constants.NIC_LINK, None)
12917 mode = params.get(constants.NIC_MODE, None)
12918 if req_net is not None:
12919 if req_net.lower() == constants.VALUE_NONE:
12920 params[constants.INIC_NETWORK] = None
12922 elif link is not None or mode is not None:
12923 raise errors.OpPrereqError("If network is given"
12924 " mode or link should not",
12925 errors.ECODE_INVAL)
12927 if op == constants.DDM_ADD:
12928 macaddr = params.get(constants.INIC_MAC, None)
12929 if macaddr is None:
12930 params[constants.INIC_MAC] = constants.VALUE_AUTO
12933 if ip.lower() == constants.VALUE_NONE:
12934 params[constants.INIC_IP] = None
12936 if ip.lower() == constants.NIC_IP_POOL:
12937 if op == constants.DDM_ADD and req_net is None:
12938 raise errors.OpPrereqError("If ip=pool, parameter network"
12940 errors.ECODE_INVAL)
12942 if not netutils.IPAddress.IsValid(ip):
12943 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
12944 errors.ECODE_INVAL)
12946 if constants.INIC_MAC in params:
12947 macaddr = params[constants.INIC_MAC]
12948 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12949 macaddr = utils.NormalizeAndValidateMac(macaddr)
12951 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
12952 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
12953 " modifying an existing NIC",
12954 errors.ECODE_INVAL)
12956 def CheckArguments(self):
12957 if not (self.op.nics or self.op.disks or self.op.disk_template or
12958 self.op.hvparams or self.op.beparams or self.op.os_name or
12959 self.op.offline is not None or self.op.runtime_mem):
12960 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
12962 if self.op.hvparams:
12963 _CheckGlobalHvParams(self.op.hvparams)
12965 self.op.disks = self._UpgradeDiskNicMods(
12966 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
12967 self.op.nics = self._UpgradeDiskNicMods(
12968 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
12970 # Check disk modifications
12971 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
12972 self._VerifyDiskModification)
12974 if self.op.disks and self.op.disk_template is not None:
12975 raise errors.OpPrereqError("Disk template conversion and other disk"
12976 " changes not supported at the same time",
12977 errors.ECODE_INVAL)
12979 if (self.op.disk_template and
12980 self.op.disk_template in constants.DTS_INT_MIRROR and
12981 self.op.remote_node is None):
12982 raise errors.OpPrereqError("Changing the disk template to a mirrored"
12983 " one requires specifying a secondary node",
12984 errors.ECODE_INVAL)
12986 # Check NIC modifications
12987 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
12988 self._VerifyNicModification)
12990 def ExpandNames(self):
12991 self._ExpandAndLockInstance()
12992 self.needed_locks[locking.LEVEL_NODEGROUP] = []
12993 # Can't even acquire node locks in shared mode as upcoming changes in
12994 # Ganeti 2.6 will start to modify the node object on disk conversion
12995 self.needed_locks[locking.LEVEL_NODE] = []
12996 self.needed_locks[locking.LEVEL_NODE_RES] = []
12997 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12998 # Look node group to look up the ipolicy
12999 self.share_locks[locking.LEVEL_NODEGROUP] = 1
13001 def DeclareLocks(self, level):
13002 if level == locking.LEVEL_NODEGROUP:
13003 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
13004 # Acquire locks for the instance's nodegroups optimistically. Needs
13005 # to be verified in CheckPrereq
13006 self.needed_locks[locking.LEVEL_NODEGROUP] = \
13007 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
13008 elif level == locking.LEVEL_NODE:
13009 self._LockInstancesNodes()
13010 if self.op.disk_template and self.op.remote_node:
13011 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
13012 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
13013 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
13015 self.needed_locks[locking.LEVEL_NODE_RES] = \
13016 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
13018 def BuildHooksEnv(self):
13019 """Build hooks env.
13021 This runs on the master, primary and secondaries.
13025 if constants.BE_MINMEM in self.be_new:
13026 args["minmem"] = self.be_new[constants.BE_MINMEM]
13027 if constants.BE_MAXMEM in self.be_new:
13028 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
13029 if constants.BE_VCPUS in self.be_new:
13030 args["vcpus"] = self.be_new[constants.BE_VCPUS]
13031 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
13032 # information at all.
13034 if self._new_nics is not None:
13037 for nic in self._new_nics:
13038 n = copy.deepcopy(nic)
13039 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
13040 n.nicparams = nicparams
13041 nics.append(_NICToTuple(self, n))
13043 args["nics"] = nics
13045 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
13046 if self.op.disk_template:
13047 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
13048 if self.op.runtime_mem:
13049 env["RUNTIME_MEMORY"] = self.op.runtime_mem
13053 def BuildHooksNodes(self):
13054 """Build hooks nodes.
13057 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
13060 def _PrepareNicModification(self, params, private, old_ip, old_net,
13061 old_params, cluster, pnode):
13063 update_params_dict = dict([(key, params[key])
13064 for key in constants.NICS_PARAMETERS
13067 req_link = update_params_dict.get(constants.NIC_LINK, None)
13068 req_mode = update_params_dict.get(constants.NIC_MODE, None)
13070 new_net = params.get(constants.INIC_NETWORK, old_net)
13071 if new_net is not None:
13072 netparams = self.cfg.GetGroupNetParams(new_net, pnode)
13073 if netparams is None:
13074 raise errors.OpPrereqError("No netparams found for the network"
13075 " %s, probably not connected" % new_net,
13076 errors.ECODE_INVAL)
13077 new_params = dict(netparams)
13079 new_params = _GetUpdatedParams(old_params, update_params_dict)
13081 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
13083 new_filled_params = cluster.SimpleFillNIC(new_params)
13084 objects.NIC.CheckParameterSyntax(new_filled_params)
13086 new_mode = new_filled_params[constants.NIC_MODE]
13087 if new_mode == constants.NIC_MODE_BRIDGED:
13088 bridge = new_filled_params[constants.NIC_LINK]
13089 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
13091 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
13093 self.warn.append(msg)
13095 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
13097 elif new_mode == constants.NIC_MODE_ROUTED:
13098 ip = params.get(constants.INIC_IP, old_ip)
13100 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
13101 " on a routed NIC", errors.ECODE_INVAL)
13103 elif new_mode == constants.NIC_MODE_OVS:
13104 # TODO: check OVS link
13105 self.LogInfo("OVS links are currently not checked for correctness")
13107 if constants.INIC_MAC in params:
13108 mac = params[constants.INIC_MAC]
13110 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
13111 errors.ECODE_INVAL)
13112 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
13113 # otherwise generate the MAC address
13114 params[constants.INIC_MAC] = \
13115 self.cfg.GenerateMAC(new_net, self.proc.GetECId())
13117 # or validate/reserve the current one
13119 self.cfg.ReserveMAC(mac, self.proc.GetECId())
13120 except errors.ReservationError:
13121 raise errors.OpPrereqError("MAC address '%s' already in use"
13122 " in cluster" % mac,
13123 errors.ECODE_NOTUNIQUE)
13124 elif new_net != old_net:
13126 def get_net_prefix(net):
13128 uuid = self.cfg.LookupNetwork(net)
13130 nobj = self.cfg.GetNetwork(uuid)
13131 return nobj.mac_prefix
13134 new_prefix = get_net_prefix(new_net)
13135 old_prefix = get_net_prefix(old_net)
13136 if old_prefix != new_prefix:
13137 params[constants.INIC_MAC] = \
13138 self.cfg.GenerateMAC(new_net, self.proc.GetECId())
13140 #if there is a change in nic-network configuration
13141 new_ip = params.get(constants.INIC_IP, old_ip)
13142 if (new_ip, new_net) != (old_ip, old_net):
13145 if new_ip.lower() == constants.NIC_IP_POOL:
13147 new_ip = self.cfg.GenerateIp(new_net, self.proc.GetECId())
13148 except errors.ReservationError:
13149 raise errors.OpPrereqError("Unable to get a free IP"
13150 " from the address pool",
13151 errors.ECODE_STATE)
13152 self.LogInfo("Chose IP %s from pool %s", new_ip, new_net)
13153 params[constants.INIC_IP] = new_ip
13154 elif new_ip != old_ip or new_net != old_net:
13156 self.LogInfo("Reserving IP %s in pool %s", new_ip, new_net)
13157 self.cfg.ReserveIp(new_net, new_ip, self.proc.GetECId())
13158 except errors.ReservationError:
13159 raise errors.OpPrereqError("IP %s not available in network %s" %
13161 errors.ECODE_NOTUNIQUE)
13162 elif new_ip.lower() == constants.NIC_IP_POOL:
13163 raise errors.OpPrereqError("ip=pool, but no network found",
13164 errors.ECODE_INVAL)
13167 elif self.op.conflicts_check:
13168 _CheckForConflictingIp(self, new_ip, pnode)
13173 self.cfg.ReleaseIp(old_net, old_ip, self.proc.GetECId())
13174 except errors.AddressPoolError:
13175 logging.warning("Release IP %s not contained in network %s",
13178 # there are no changes in (net, ip) tuple
13179 elif (old_net is not None and
13180 (req_link is not None or req_mode is not None)):
13181 raise errors.OpPrereqError("Not allowed to change link or mode of"
13182 " a NIC that is connected to a network",
13183 errors.ECODE_INVAL)
13185 private.params = new_params
13186 private.filled = new_filled_params
13188 def CheckPrereq(self):
13189 """Check prerequisites.
13191 This only checks the instance list against the existing names.
13194 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
13195 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
13197 cluster = self.cluster = self.cfg.GetClusterInfo()
13198 assert self.instance is not None, \
13199 "Cannot retrieve locked instance %s" % self.op.instance_name
13201 pnode = instance.primary_node
13202 assert pnode in self.owned_locks(locking.LEVEL_NODE)
13203 nodelist = list(instance.all_nodes)
13204 pnode_info = self.cfg.GetNodeInfo(pnode)
13205 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
13207 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
13208 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
13209 group_info = self.cfg.GetNodeGroup(pnode_info.group)
13211 # dictionary with instance information after the modification
13214 # Prepare disk/NIC modifications
13215 self.diskmod = PrepareContainerMods(self.op.disks, None)
13216 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
13219 if self.op.os_name and not self.op.force:
13220 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
13221 self.op.force_variant)
13222 instance_os = self.op.os_name
13224 instance_os = instance.os
13226 assert not (self.op.disk_template and self.op.disks), \
13227 "Can't modify disk template and apply disk changes at the same time"
13229 if self.op.disk_template:
13230 if instance.disk_template == self.op.disk_template:
13231 raise errors.OpPrereqError("Instance already has disk template %s" %
13232 instance.disk_template, errors.ECODE_INVAL)
13234 if (instance.disk_template,
13235 self.op.disk_template) not in self._DISK_CONVERSIONS:
13236 raise errors.OpPrereqError("Unsupported disk template conversion from"
13237 " %s to %s" % (instance.disk_template,
13238 self.op.disk_template),
13239 errors.ECODE_INVAL)
13240 _CheckInstanceState(self, instance, INSTANCE_DOWN,
13241 msg="cannot change disk template")
13242 if self.op.disk_template in constants.DTS_INT_MIRROR:
13243 if self.op.remote_node == pnode:
13244 raise errors.OpPrereqError("Given new secondary node %s is the same"
13245 " as the primary node of the instance" %
13246 self.op.remote_node, errors.ECODE_STATE)
13247 _CheckNodeOnline(self, self.op.remote_node)
13248 _CheckNodeNotDrained(self, self.op.remote_node)
13249 # FIXME: here we assume that the old instance type is DT_PLAIN
13250 assert instance.disk_template == constants.DT_PLAIN
13251 disks = [{constants.IDISK_SIZE: d.size,
13252 constants.IDISK_VG: d.logical_id[0]}
13253 for d in instance.disks]
13254 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
13255 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
13257 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
13258 snode_group = self.cfg.GetNodeGroup(snode_info.group)
13259 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
13261 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
13262 ignore=self.op.ignore_ipolicy)
13263 if pnode_info.group != snode_info.group:
13264 self.LogWarning("The primary and secondary nodes are in two"
13265 " different node groups; the disk parameters"
13266 " from the first disk's node group will be"
13269 # hvparams processing
13270 if self.op.hvparams:
13271 hv_type = instance.hypervisor
13272 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
13273 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
13274 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
13277 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
13278 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
13279 self.hv_proposed = self.hv_new = hv_new # the new actual values
13280 self.hv_inst = i_hvdict # the new dict (without defaults)
13282 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
13284 self.hv_new = self.hv_inst = {}
13286 # beparams processing
13287 if self.op.beparams:
13288 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
13290 objects.UpgradeBeParams(i_bedict)
13291 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
13292 be_new = cluster.SimpleFillBE(i_bedict)
13293 self.be_proposed = self.be_new = be_new # the new actual values
13294 self.be_inst = i_bedict # the new dict (without defaults)
13296 self.be_new = self.be_inst = {}
13297 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
13298 be_old = cluster.FillBE(instance)
13300 # CPU param validation -- checking every time a parameter is
13301 # changed to cover all cases where either CPU mask or vcpus have
13303 if (constants.BE_VCPUS in self.be_proposed and
13304 constants.HV_CPU_MASK in self.hv_proposed):
13306 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
13307 # Verify mask is consistent with number of vCPUs. Can skip this
13308 # test if only 1 entry in the CPU mask, which means same mask
13309 # is applied to all vCPUs.
13310 if (len(cpu_list) > 1 and
13311 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
13312 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
13314 (self.be_proposed[constants.BE_VCPUS],
13315 self.hv_proposed[constants.HV_CPU_MASK]),
13316 errors.ECODE_INVAL)
13318 # Only perform this test if a new CPU mask is given
13319 if constants.HV_CPU_MASK in self.hv_new:
13320 # Calculate the largest CPU number requested
13321 max_requested_cpu = max(map(max, cpu_list))
13322 # Check that all of the instance's nodes have enough physical CPUs to
13323 # satisfy the requested CPU mask
13324 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
13325 max_requested_cpu + 1, instance.hypervisor)
13327 # osparams processing
13328 if self.op.osparams:
13329 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
13330 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
13331 self.os_inst = i_osdict # the new dict (without defaults)
13337 #TODO(dynmem): do the appropriate check involving MINMEM
13338 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
13339 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
13340 mem_check_list = [pnode]
13341 if be_new[constants.BE_AUTO_BALANCE]:
13342 # either we changed auto_balance to yes or it was from before
13343 mem_check_list.extend(instance.secondary_nodes)
13344 instance_info = self.rpc.call_instance_info(pnode, instance.name,
13345 instance.hypervisor)
13346 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
13347 [instance.hypervisor])
13348 pninfo = nodeinfo[pnode]
13349 msg = pninfo.fail_msg
13351 # Assume the primary node is unreachable and go ahead
13352 self.warn.append("Can't get info from primary node %s: %s" %
13355 (_, _, (pnhvinfo, )) = pninfo.payload
13356 if not isinstance(pnhvinfo.get("memory_free", None), int):
13357 self.warn.append("Node data from primary node %s doesn't contain"
13358 " free memory information" % pnode)
13359 elif instance_info.fail_msg:
13360 self.warn.append("Can't get instance runtime information: %s" %
13361 instance_info.fail_msg)
13363 if instance_info.payload:
13364 current_mem = int(instance_info.payload["memory"])
13366 # Assume instance not running
13367 # (there is a slight race condition here, but it's not very
13368 # probable, and we have no other way to check)
13369 # TODO: Describe race condition
13371 #TODO(dynmem): do the appropriate check involving MINMEM
13372 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
13373 pnhvinfo["memory_free"])
13375 raise errors.OpPrereqError("This change will prevent the instance"
13376 " from starting, due to %d MB of memory"
13377 " missing on its primary node" %
13378 miss_mem, errors.ECODE_NORES)
13380 if be_new[constants.BE_AUTO_BALANCE]:
13381 for node, nres in nodeinfo.items():
13382 if node not in instance.secondary_nodes:
13384 nres.Raise("Can't get info from secondary node %s" % node,
13385 prereq=True, ecode=errors.ECODE_STATE)
13386 (_, _, (nhvinfo, )) = nres.payload
13387 if not isinstance(nhvinfo.get("memory_free", None), int):
13388 raise errors.OpPrereqError("Secondary node %s didn't return free"
13389 " memory information" % node,
13390 errors.ECODE_STATE)
13391 #TODO(dynmem): do the appropriate check involving MINMEM
13392 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
13393 raise errors.OpPrereqError("This change will prevent the instance"
13394 " from failover to its secondary node"
13395 " %s, due to not enough memory" % node,
13396 errors.ECODE_STATE)
13398 if self.op.runtime_mem:
13399 remote_info = self.rpc.call_instance_info(instance.primary_node,
13401 instance.hypervisor)
13402 remote_info.Raise("Error checking node %s" % instance.primary_node)
13403 if not remote_info.payload: # not running already
13404 raise errors.OpPrereqError("Instance %s is not running" %
13405 instance.name, errors.ECODE_STATE)
13407 current_memory = remote_info.payload["memory"]
13408 if (not self.op.force and
13409 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
13410 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
13411 raise errors.OpPrereqError("Instance %s must have memory between %d"
13412 " and %d MB of memory unless --force is"
13415 self.be_proposed[constants.BE_MINMEM],
13416 self.be_proposed[constants.BE_MAXMEM]),
13417 errors.ECODE_INVAL)
13419 delta = self.op.runtime_mem - current_memory
13421 _CheckNodeFreeMemory(self, instance.primary_node,
13422 "ballooning memory for instance %s" %
13423 instance.name, delta, instance.hypervisor)
13425 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
13426 raise errors.OpPrereqError("Disk operations not supported for"
13427 " diskless instances", errors.ECODE_INVAL)
13429 def _PrepareNicCreate(_, params, private):
13430 self._PrepareNicModification(params, private, None, None,
13431 {}, cluster, pnode)
13432 return (None, None)
13434 def _PrepareNicMod(_, nic, params, private):
13435 self._PrepareNicModification(params, private, nic.ip, nic.network,
13436 nic.nicparams, cluster, pnode)
13439 def _PrepareNicRemove(_, params, __):
13441 net = params.network
13442 if net is not None and ip is not None:
13443 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
13445 # Verify NIC changes (operating on copy)
13446 nics = instance.nics[:]
13447 ApplyContainerMods("NIC", nics, None, self.nicmod,
13448 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
13449 if len(nics) > constants.MAX_NICS:
13450 raise errors.OpPrereqError("Instance has too many network interfaces"
13451 " (%d), cannot add more" % constants.MAX_NICS,
13452 errors.ECODE_STATE)
13454 # Verify disk changes (operating on a copy)
13455 disks = instance.disks[:]
13456 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
13457 if len(disks) > constants.MAX_DISKS:
13458 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
13459 " more" % constants.MAX_DISKS,
13460 errors.ECODE_STATE)
13461 disk_sizes = [disk.size for disk in instance.disks]
13462 disk_sizes.extend(params["size"] for (op, idx, params, private) in
13463 self.diskmod if op == constants.DDM_ADD)
13464 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
13465 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
13467 if self.op.offline is not None and self.op.offline:
13468 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
13469 msg="can't change to offline")
13471 # Pre-compute NIC changes (necessary to use result in hooks)
13472 self._nic_chgdesc = []
13474 # Operate on copies as this is still in prereq
13475 nics = [nic.Copy() for nic in instance.nics]
13476 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
13477 self._CreateNewNic, self._ApplyNicMods, None)
13478 self._new_nics = nics
13479 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
13481 self._new_nics = None
13482 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
13484 if not self.op.ignore_ipolicy:
13485 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
13488 # Fill ispec with backend parameters
13489 ispec[constants.ISPEC_SPINDLE_USE] = \
13490 self.be_new.get(constants.BE_SPINDLE_USE, None)
13491 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
13494 # Copy ispec to verify parameters with min/max values separately
13495 ispec_max = ispec.copy()
13496 ispec_max[constants.ISPEC_MEM_SIZE] = \
13497 self.be_new.get(constants.BE_MAXMEM, None)
13498 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max)
13499 ispec_min = ispec.copy()
13500 ispec_min[constants.ISPEC_MEM_SIZE] = \
13501 self.be_new.get(constants.BE_MINMEM, None)
13502 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min)
13504 if (res_max or res_min):
13505 # FIXME: Improve error message by including information about whether
13506 # the upper or lower limit of the parameter fails the ipolicy.
13507 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
13508 (group_info, group_info.name,
13509 utils.CommaJoin(set(res_max + res_min))))
13510 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
13512 def _ConvertPlainToDrbd(self, feedback_fn):
13513 """Converts an instance from plain to drbd.
13516 feedback_fn("Converting template to drbd")
13517 instance = self.instance
13518 pnode = instance.primary_node
13519 snode = self.op.remote_node
13521 assert instance.disk_template == constants.DT_PLAIN
13523 # create a fake disk info for _GenerateDiskTemplate
13524 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
13525 constants.IDISK_VG: d.logical_id[0]}
13526 for d in instance.disks]
13527 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
13528 instance.name, pnode, [snode],
13529 disk_info, None, None, 0, feedback_fn,
13531 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
13533 info = _GetInstanceInfoText(instance)
13534 feedback_fn("Creating additional volumes...")
13535 # first, create the missing data and meta devices
13536 for disk in anno_disks:
13537 # unfortunately this is... not too nice
13538 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
13540 for child in disk.children:
13541 _CreateSingleBlockDev(self, snode, instance, child, info, True)
13542 # at this stage, all new LVs have been created, we can rename the
13544 feedback_fn("Renaming original volumes...")
13545 rename_list = [(o, n.children[0].logical_id)
13546 for (o, n) in zip(instance.disks, new_disks)]
13547 result = self.rpc.call_blockdev_rename(pnode, rename_list)
13548 result.Raise("Failed to rename original LVs")
13550 feedback_fn("Initializing DRBD devices...")
13551 # all child devices are in place, we can now create the DRBD devices
13552 for disk in anno_disks:
13553 for node in [pnode, snode]:
13554 f_create = node == pnode
13555 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
13557 # at this point, the instance has been modified
13558 instance.disk_template = constants.DT_DRBD8
13559 instance.disks = new_disks
13560 self.cfg.Update(instance, feedback_fn)
13562 # Release node locks while waiting for sync
13563 _ReleaseLocks(self, locking.LEVEL_NODE)
13565 # disks are created, waiting for sync
13566 disk_abort = not _WaitForSync(self, instance,
13567 oneshot=not self.op.wait_for_sync)
13569 raise errors.OpExecError("There are some degraded disks for"
13570 " this instance, please cleanup manually")
13572 # Node resource locks will be released by caller
13574 def _ConvertDrbdToPlain(self, feedback_fn):
13575 """Converts an instance from drbd to plain.
13578 instance = self.instance
13580 assert len(instance.secondary_nodes) == 1
13581 assert instance.disk_template == constants.DT_DRBD8
13583 pnode = instance.primary_node
13584 snode = instance.secondary_nodes[0]
13585 feedback_fn("Converting template to plain")
13587 old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
13588 new_disks = [d.children[0] for d in instance.disks]
13590 # copy over size and mode
13591 for parent, child in zip(old_disks, new_disks):
13592 child.size = parent.size
13593 child.mode = parent.mode
13595 # this is a DRBD disk, return its port to the pool
13596 # NOTE: this must be done right before the call to cfg.Update!
13597 for disk in old_disks:
13598 tcp_port = disk.logical_id[2]
13599 self.cfg.AddTcpUdpPort(tcp_port)
13601 # update instance structure
13602 instance.disks = new_disks
13603 instance.disk_template = constants.DT_PLAIN
13604 self.cfg.Update(instance, feedback_fn)
13606 # Release locks in case removing disks takes a while
13607 _ReleaseLocks(self, locking.LEVEL_NODE)
13609 feedback_fn("Removing volumes on the secondary node...")
13610 for disk in old_disks:
13611 self.cfg.SetDiskID(disk, snode)
13612 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
13614 self.LogWarning("Could not remove block device %s on node %s,"
13615 " continuing anyway: %s", disk.iv_name, snode, msg)
13617 feedback_fn("Removing unneeded volumes on the primary node...")
13618 for idx, disk in enumerate(old_disks):
13619 meta = disk.children[1]
13620 self.cfg.SetDiskID(meta, pnode)
13621 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
13623 self.LogWarning("Could not remove metadata for disk %d on node %s,"
13624 " continuing anyway: %s", idx, pnode, msg)
13626 def _CreateNewDisk(self, idx, params, _):
13627 """Creates a new disk.
13630 instance = self.instance
13633 if instance.disk_template in constants.DTS_FILEBASED:
13634 (file_driver, file_path) = instance.disks[0].logical_id
13635 file_path = os.path.dirname(file_path)
13637 file_driver = file_path = None
13640 _GenerateDiskTemplate(self, instance.disk_template, instance.name,
13641 instance.primary_node, instance.secondary_nodes,
13642 [params], file_path, file_driver, idx,
13643 self.Log, self.diskparams)[0]
13645 info = _GetInstanceInfoText(instance)
13647 logging.info("Creating volume %s for instance %s",
13648 disk.iv_name, instance.name)
13649 # Note: this needs to be kept in sync with _CreateDisks
13651 for node in instance.all_nodes:
13652 f_create = (node == instance.primary_node)
13654 _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
13655 except errors.OpExecError, err:
13656 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
13657 disk.iv_name, disk, node, err)
13660 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
13664 def _ModifyDisk(idx, disk, params, _):
13665 """Modifies a disk.
13668 disk.mode = params[constants.IDISK_MODE]
13671 ("disk.mode/%d" % idx, disk.mode),
13674 def _RemoveDisk(self, idx, root, _):
13678 (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
13679 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
13680 self.cfg.SetDiskID(disk, node)
13681 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
13683 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
13684 " continuing anyway", idx, node, msg)
13686 # if this is a DRBD disk, return its port to the pool
13687 if root.dev_type in constants.LDS_DRBD:
13688 self.cfg.AddTcpUdpPort(root.logical_id[2])
13691 def _CreateNewNic(idx, params, private):
13692 """Creates data structure for a new network interface.
13695 mac = params[constants.INIC_MAC]
13696 ip = params.get(constants.INIC_IP, None)
13697 net = params.get(constants.INIC_NETWORK, None)
13698 #TODO: not private.filled?? can a nic have no nicparams??
13699 nicparams = private.filled
13701 return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
13703 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
13704 (mac, ip, private.filled[constants.NIC_MODE],
13705 private.filled[constants.NIC_LINK],
13710 def _ApplyNicMods(idx, nic, params, private):
13711 """Modifies a network interface.
13716 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
13718 changes.append(("nic.%s/%d" % (key, idx), params[key]))
13719 setattr(nic, key, params[key])
13722 nic.nicparams = private.filled
13724 for (key, val) in nic.nicparams.items():
13725 changes.append(("nic.%s/%d" % (key, idx), val))
13729 def Exec(self, feedback_fn):
13730 """Modifies an instance.
13732 All parameters take effect only at the next restart of the instance.
13735 # Process here the warnings from CheckPrereq, as we don't have a
13736 # feedback_fn there.
13737 # TODO: Replace with self.LogWarning
13738 for warn in self.warn:
13739 feedback_fn("WARNING: %s" % warn)
13741 assert ((self.op.disk_template is None) ^
13742 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
13743 "Not owning any node resource locks"
13746 instance = self.instance
13749 if self.op.runtime_mem:
13750 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
13752 self.op.runtime_mem)
13753 rpcres.Raise("Cannot modify instance runtime memory")
13754 result.append(("runtime_memory", self.op.runtime_mem))
13756 # Apply disk changes
13757 ApplyContainerMods("disk", instance.disks, result, self.diskmod,
13758 self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
13759 _UpdateIvNames(0, instance.disks)
13761 if self.op.disk_template:
13763 check_nodes = set(instance.all_nodes)
13764 if self.op.remote_node:
13765 check_nodes.add(self.op.remote_node)
13766 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
13767 owned = self.owned_locks(level)
13768 assert not (check_nodes - owned), \
13769 ("Not owning the correct locks, owning %r, expected at least %r" %
13770 (owned, check_nodes))
13772 r_shut = _ShutdownInstanceDisks(self, instance)
13774 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
13775 " proceed with disk template conversion")
13776 mode = (instance.disk_template, self.op.disk_template)
13778 self._DISK_CONVERSIONS[mode](self, feedback_fn)
13780 self.cfg.ReleaseDRBDMinors(instance.name)
13782 result.append(("disk_template", self.op.disk_template))
13784 assert instance.disk_template == self.op.disk_template, \
13785 ("Expected disk template '%s', found '%s'" %
13786 (self.op.disk_template, instance.disk_template))
13788 # Release node and resource locks if there are any (they might already have
13789 # been released during disk conversion)
13790 _ReleaseLocks(self, locking.LEVEL_NODE)
13791 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
13793 # Apply NIC changes
13794 if self._new_nics is not None:
13795 instance.nics = self._new_nics
13796 result.extend(self._nic_chgdesc)
13799 if self.op.hvparams:
13800 instance.hvparams = self.hv_inst
13801 for key, val in self.op.hvparams.iteritems():
13802 result.append(("hv/%s" % key, val))
13805 if self.op.beparams:
13806 instance.beparams = self.be_inst
13807 for key, val in self.op.beparams.iteritems():
13808 result.append(("be/%s" % key, val))
13811 if self.op.os_name:
13812 instance.os = self.op.os_name
13815 if self.op.osparams:
13816 instance.osparams = self.os_inst
13817 for key, val in self.op.osparams.iteritems():
13818 result.append(("os/%s" % key, val))
13820 if self.op.offline is None:
13823 elif self.op.offline:
13824 # Mark instance as offline
13825 self.cfg.MarkInstanceOffline(instance.name)
13826 result.append(("admin_state", constants.ADMINST_OFFLINE))
13828 # Mark instance as online, but stopped
13829 self.cfg.MarkInstanceDown(instance.name)
13830 result.append(("admin_state", constants.ADMINST_DOWN))
13832 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
13834 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
13835 self.owned_locks(locking.LEVEL_NODE)), \
13836 "All node locks should have been released by now"
13840 _DISK_CONVERSIONS = {
13841 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
13842 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
13846 class LUInstanceChangeGroup(LogicalUnit):
13847 HPATH = "instance-change-group"
13848 HTYPE = constants.HTYPE_INSTANCE
13851 def ExpandNames(self):
13852 self.share_locks = _ShareAll()
13854 self.needed_locks = {
13855 locking.LEVEL_NODEGROUP: [],
13856 locking.LEVEL_NODE: [],
13857 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
13860 self._ExpandAndLockInstance()
13862 if self.op.target_groups:
13863 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
13864 self.op.target_groups)
13866 self.req_target_uuids = None
13868 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
13870 def DeclareLocks(self, level):
13871 if level == locking.LEVEL_NODEGROUP:
13872 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
13874 if self.req_target_uuids:
13875 lock_groups = set(self.req_target_uuids)
13877 # Lock all groups used by instance optimistically; this requires going
13878 # via the node before it's locked, requiring verification later on
13879 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
13880 lock_groups.update(instance_groups)
13882 # No target groups, need to lock all of them
13883 lock_groups = locking.ALL_SET
13885 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
13887 elif level == locking.LEVEL_NODE:
13888 if self.req_target_uuids:
13889 # Lock all nodes used by instances
13890 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
13891 self._LockInstancesNodes()
13893 # Lock all nodes in all potential target groups
13894 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
13895 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
13896 member_nodes = [node_name
13897 for group in lock_groups
13898 for node_name in self.cfg.GetNodeGroup(group).members]
13899 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
13901 # Lock all nodes as all groups are potential targets
13902 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13904 def CheckPrereq(self):
13905 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13906 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
13907 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
13909 assert (self.req_target_uuids is None or
13910 owned_groups.issuperset(self.req_target_uuids))
13911 assert owned_instances == set([self.op.instance_name])
13913 # Get instance information
13914 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
13916 # Check if node groups for locked instance are still correct
13917 assert owned_nodes.issuperset(self.instance.all_nodes), \
13918 ("Instance %s's nodes changed while we kept the lock" %
13919 self.op.instance_name)
13921 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
13924 if self.req_target_uuids:
13925 # User requested specific target groups
13926 self.target_uuids = frozenset(self.req_target_uuids)
13928 # All groups except those used by the instance are potential targets
13929 self.target_uuids = owned_groups - inst_groups
13931 conflicting_groups = self.target_uuids & inst_groups
13932 if conflicting_groups:
13933 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
13934 " used by the instance '%s'" %
13935 (utils.CommaJoin(conflicting_groups),
13936 self.op.instance_name),
13937 errors.ECODE_INVAL)
13939 if not self.target_uuids:
13940 raise errors.OpPrereqError("There are no possible target groups",
13941 errors.ECODE_INVAL)
13943 def BuildHooksEnv(self):
13944 """Build hooks env.
13947 assert self.target_uuids
13950 "TARGET_GROUPS": " ".join(self.target_uuids),
13953 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13957 def BuildHooksNodes(self):
13958 """Build hooks nodes.
13961 mn = self.cfg.GetMasterNode()
13962 return ([mn], [mn])
13964 def Exec(self, feedback_fn):
13965 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
13967 assert instances == [self.op.instance_name], "Instance not locked"
13969 req = iallocator.IAReqGroupChange(instances=instances,
13970 target_groups=list(self.target_uuids))
13971 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
13973 ial.Run(self.op.iallocator)
13975 if not ial.success:
13976 raise errors.OpPrereqError("Can't compute solution for changing group of"
13977 " instance '%s' using iallocator '%s': %s" %
13978 (self.op.instance_name, self.op.iallocator,
13979 ial.info), errors.ECODE_NORES)
13981 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
13983 self.LogInfo("Iallocator returned %s job(s) for changing group of"
13984 " instance '%s'", len(jobs), self.op.instance_name)
13986 return ResultWithJobs(jobs)
13989 class LUBackupQuery(NoHooksLU):
13990 """Query the exports list
13995 def CheckArguments(self):
13996 self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
13997 ["node", "export"], self.op.use_locking)
13999 def ExpandNames(self):
14000 self.expq.ExpandNames(self)
14002 def DeclareLocks(self, level):
14003 self.expq.DeclareLocks(self, level)
14005 def Exec(self, feedback_fn):
14008 for (node, expname) in self.expq.OldStyleQuery(self):
14009 if expname is None:
14010 result[node] = False
14012 result.setdefault(node, []).append(expname)
14017 class _ExportQuery(_QueryBase):
14018 FIELDS = query.EXPORT_FIELDS
14020 #: The node name is not a unique key for this query
14021 SORT_FIELD = "node"
14023 def ExpandNames(self, lu):
14024 lu.needed_locks = {}
14026 # The following variables interact with _QueryBase._GetNames
14028 self.wanted = _GetWantedNodes(lu, self.names)
14030 self.wanted = locking.ALL_SET
14032 self.do_locking = self.use_locking
14034 if self.do_locking:
14035 lu.share_locks = _ShareAll()
14036 lu.needed_locks = {
14037 locking.LEVEL_NODE: self.wanted,
14041 lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
14043 def DeclareLocks(self, lu, level):
14046 def _GetQueryData(self, lu):
14047 """Computes the list of nodes and their attributes.
14050 # Locking is not used
14052 assert not (compat.any(lu.glm.is_owned(level)
14053 for level in locking.LEVELS
14054 if level != locking.LEVEL_CLUSTER) or
14055 self.do_locking or self.use_locking)
14057 nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
14061 for (node, nres) in lu.rpc.call_export_list(nodes).items():
14063 result.append((node, None))
14065 result.extend((node, expname) for expname in nres.payload)
14070 class LUBackupPrepare(NoHooksLU):
14071 """Prepares an instance for an export and returns useful information.
14076 def ExpandNames(self):
14077 self._ExpandAndLockInstance()
14079 def CheckPrereq(self):
14080 """Check prerequisites.
14083 instance_name = self.op.instance_name
14085 self.instance = self.cfg.GetInstanceInfo(instance_name)
14086 assert self.instance is not None, \
14087 "Cannot retrieve locked instance %s" % self.op.instance_name
14088 _CheckNodeOnline(self, self.instance.primary_node)
14090 self._cds = _GetClusterDomainSecret()
14092 def Exec(self, feedback_fn):
14093 """Prepares an instance for an export.
14096 instance = self.instance
14098 if self.op.mode == constants.EXPORT_MODE_REMOTE:
14099 salt = utils.GenerateSecret(8)
14101 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
14102 result = self.rpc.call_x509_cert_create(instance.primary_node,
14103 constants.RIE_CERT_VALIDITY)
14104 result.Raise("Can't create X509 key and certificate on %s" % result.node)
14106 (name, cert_pem) = result.payload
14108 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
14112 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
14113 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
14115 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
14121 class LUBackupExport(LogicalUnit):
14122 """Export an instance to an image in the cluster.
14125 HPATH = "instance-export"
14126 HTYPE = constants.HTYPE_INSTANCE
14129 def CheckArguments(self):
14130 """Check the arguments.
14133 self.x509_key_name = self.op.x509_key_name
14134 self.dest_x509_ca_pem = self.op.destination_x509_ca
14136 if self.op.mode == constants.EXPORT_MODE_REMOTE:
14137 if not self.x509_key_name:
14138 raise errors.OpPrereqError("Missing X509 key name for encryption",
14139 errors.ECODE_INVAL)
14141 if not self.dest_x509_ca_pem:
14142 raise errors.OpPrereqError("Missing destination X509 CA",
14143 errors.ECODE_INVAL)
14145 def ExpandNames(self):
14146 self._ExpandAndLockInstance()
14148 # Lock all nodes for local exports
14149 if self.op.mode == constants.EXPORT_MODE_LOCAL:
14150 # FIXME: lock only instance primary and destination node
14152 # Sad but true, for now we have do lock all nodes, as we don't know where
14153 # the previous export might be, and in this LU we search for it and
14154 # remove it from its current node. In the future we could fix this by:
14155 # - making a tasklet to search (share-lock all), then create the
14156 # new one, then one to remove, after
14157 # - removing the removal operation altogether
14158 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
14160 # Allocations should be stopped while this LU runs with node locks, but
14161 # it doesn't have to be exclusive
14162 self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
14163 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
14165 def DeclareLocks(self, level):
14166 """Last minute lock declaration."""
14167 # All nodes are locked anyway, so nothing to do here.
14169 def BuildHooksEnv(self):
14170 """Build hooks env.
14172 This will run on the master, primary node and target node.
14176 "EXPORT_MODE": self.op.mode,
14177 "EXPORT_NODE": self.op.target_node,
14178 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
14179 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
14180 # TODO: Generic function for boolean env variables
14181 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
14184 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
14188 def BuildHooksNodes(self):
14189 """Build hooks nodes.
14192 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
14194 if self.op.mode == constants.EXPORT_MODE_LOCAL:
14195 nl.append(self.op.target_node)
14199 def CheckPrereq(self):
14200 """Check prerequisites.
14202 This checks that the instance and node names are valid.
14205 instance_name = self.op.instance_name
14207 self.instance = self.cfg.GetInstanceInfo(instance_name)
14208 assert self.instance is not None, \
14209 "Cannot retrieve locked instance %s" % self.op.instance_name
14210 _CheckNodeOnline(self, self.instance.primary_node)
14212 if (self.op.remove_instance and
14213 self.instance.admin_state == constants.ADMINST_UP and
14214 not self.op.shutdown):
14215 raise errors.OpPrereqError("Can not remove instance without shutting it"
14216 " down before", errors.ECODE_STATE)
14218 if self.op.mode == constants.EXPORT_MODE_LOCAL:
14219 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
14220 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
14221 assert self.dst_node is not None
14223 _CheckNodeOnline(self, self.dst_node.name)
14224 _CheckNodeNotDrained(self, self.dst_node.name)
14227 self.dest_disk_info = None
14228 self.dest_x509_ca = None
14230 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
14231 self.dst_node = None
14233 if len(self.op.target_node) != len(self.instance.disks):
14234 raise errors.OpPrereqError(("Received destination information for %s"
14235 " disks, but instance %s has %s disks") %
14236 (len(self.op.target_node), instance_name,
14237 len(self.instance.disks)),
14238 errors.ECODE_INVAL)
14240 cds = _GetClusterDomainSecret()
14242 # Check X509 key name
14244 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
14245 except (TypeError, ValueError), err:
14246 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err,
14247 errors.ECODE_INVAL)
14249 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
14250 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
14251 errors.ECODE_INVAL)
14253 # Load and verify CA
14255 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
14256 except OpenSSL.crypto.Error, err:
14257 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
14258 (err, ), errors.ECODE_INVAL)
14260 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
14261 if errcode is not None:
14262 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
14263 (msg, ), errors.ECODE_INVAL)
14265 self.dest_x509_ca = cert
14267 # Verify target information
14269 for idx, disk_data in enumerate(self.op.target_node):
14271 (host, port, magic) = \
14272 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
14273 except errors.GenericError, err:
14274 raise errors.OpPrereqError("Target info for disk %s: %s" %
14275 (idx, err), errors.ECODE_INVAL)
14277 disk_info.append((host, port, magic))
14279 assert len(disk_info) == len(self.op.target_node)
14280 self.dest_disk_info = disk_info
14283 raise errors.ProgrammerError("Unhandled export mode %r" %
14286 # instance disk type verification
14287 # TODO: Implement export support for file-based disks
14288 for disk in self.instance.disks:
14289 if disk.dev_type == constants.LD_FILE:
14290 raise errors.OpPrereqError("Export not supported for instances with"
14291 " file-based disks", errors.ECODE_INVAL)
14293 def _CleanupExports(self, feedback_fn):
14294 """Removes exports of current instance from all other nodes.
14296 If an instance in a cluster with nodes A..D was exported to node C, its
14297 exports will be removed from the nodes A, B and D.
14300 assert self.op.mode != constants.EXPORT_MODE_REMOTE
14302 nodelist = self.cfg.GetNodeList()
14303 nodelist.remove(self.dst_node.name)
14305 # on one-node clusters nodelist will be empty after the removal
14306 # if we proceed the backup would be removed because OpBackupQuery
14307 # substitutes an empty list with the full cluster node list.
14308 iname = self.instance.name
14310 feedback_fn("Removing old exports for instance %s" % iname)
14311 exportlist = self.rpc.call_export_list(nodelist)
14312 for node in exportlist:
14313 if exportlist[node].fail_msg:
14315 if iname in exportlist[node].payload:
14316 msg = self.rpc.call_export_remove(node, iname).fail_msg
14318 self.LogWarning("Could not remove older export for instance %s"
14319 " on node %s: %s", iname, node, msg)
14321 def Exec(self, feedback_fn):
14322 """Export an instance to an image in the cluster.
14325 assert self.op.mode in constants.EXPORT_MODES
14327 instance = self.instance
14328 src_node = instance.primary_node
14330 if self.op.shutdown:
14331 # shutdown the instance, but not the disks
14332 feedback_fn("Shutting down instance %s" % instance.name)
14333 result = self.rpc.call_instance_shutdown(src_node, instance,
14334 self.op.shutdown_timeout)
14335 # TODO: Maybe ignore failures if ignore_remove_failures is set
14336 result.Raise("Could not shutdown instance %s on"
14337 " node %s" % (instance.name, src_node))
14339 # set the disks ID correctly since call_instance_start needs the
14340 # correct drbd minor to create the symlinks
14341 for disk in instance.disks:
14342 self.cfg.SetDiskID(disk, src_node)
14344 activate_disks = (instance.admin_state != constants.ADMINST_UP)
14347 # Activate the instance disks if we'exporting a stopped instance
14348 feedback_fn("Activating disks for %s" % instance.name)
14349 _StartInstanceDisks(self, instance, None)
14352 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
14355 helper.CreateSnapshots()
14357 if (self.op.shutdown and
14358 instance.admin_state == constants.ADMINST_UP and
14359 not self.op.remove_instance):
14360 assert not activate_disks
14361 feedback_fn("Starting instance %s" % instance.name)
14362 result = self.rpc.call_instance_start(src_node,
14363 (instance, None, None), False)
14364 msg = result.fail_msg
14366 feedback_fn("Failed to start instance: %s" % msg)
14367 _ShutdownInstanceDisks(self, instance)
14368 raise errors.OpExecError("Could not start instance: %s" % msg)
14370 if self.op.mode == constants.EXPORT_MODE_LOCAL:
14371 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
14372 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
14373 connect_timeout = constants.RIE_CONNECT_TIMEOUT
14374 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
14376 (key_name, _, _) = self.x509_key_name
14379 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
14382 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
14383 key_name, dest_ca_pem,
14388 # Check for backwards compatibility
14389 assert len(dresults) == len(instance.disks)
14390 assert compat.all(isinstance(i, bool) for i in dresults), \
14391 "Not all results are boolean: %r" % dresults
14395 feedback_fn("Deactivating disks for %s" % instance.name)
14396 _ShutdownInstanceDisks(self, instance)
14398 if not (compat.all(dresults) and fin_resu):
14401 failures.append("export finalization")
14402 if not compat.all(dresults):
14403 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
14405 failures.append("disk export: disk(s) %s" % fdsk)
14407 raise errors.OpExecError("Export failed, errors in %s" %
14408 utils.CommaJoin(failures))
14410 # At this point, the export was successful, we can cleanup/finish
14412 # Remove instance if requested
14413 if self.op.remove_instance:
14414 feedback_fn("Removing instance %s" % instance.name)
14415 _RemoveInstance(self, feedback_fn, instance,
14416 self.op.ignore_remove_failures)
14418 if self.op.mode == constants.EXPORT_MODE_LOCAL:
14419 self._CleanupExports(feedback_fn)
14421 return fin_resu, dresults
14424 class LUBackupRemove(NoHooksLU):
14425 """Remove exports related to the named instance.
14430 def ExpandNames(self):
14431 self.needed_locks = {
14432 # We need all nodes to be locked in order for RemoveExport to work, but
14433 # we don't need to lock the instance itself, as nothing will happen to it
14434 # (and we can remove exports also for a removed instance)
14435 locking.LEVEL_NODE: locking.ALL_SET,
14437 # Removing backups is quick, so blocking allocations is justified
14438 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
14441 # Allocations should be stopped while this LU runs with node locks, but it
14442 # doesn't have to be exclusive
14443 self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
14445 def Exec(self, feedback_fn):
14446 """Remove any export.
14449 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
14450 # If the instance was not found we'll try with the name that was passed in.
14451 # This will only work if it was an FQDN, though.
14453 if not instance_name:
14455 instance_name = self.op.instance_name
14457 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
14458 exportlist = self.rpc.call_export_list(locked_nodes)
14460 for node in exportlist:
14461 msg = exportlist[node].fail_msg
14463 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
14465 if instance_name in exportlist[node].payload:
14467 result = self.rpc.call_export_remove(node, instance_name)
14468 msg = result.fail_msg
14470 logging.error("Could not remove export for instance %s"
14471 " on node %s: %s", instance_name, node, msg)
14473 if fqdn_warn and not found:
14474 feedback_fn("Export not found. If trying to remove an export belonging"
14475 " to a deleted instance please use its Fully Qualified"
14479 class LUGroupAdd(LogicalUnit):
14480 """Logical unit for creating node groups.
14483 HPATH = "group-add"
14484 HTYPE = constants.HTYPE_GROUP
14487 def ExpandNames(self):
14488 # We need the new group's UUID here so that we can create and acquire the
14489 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
14490 # that it should not check whether the UUID exists in the configuration.
14491 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
14492 self.needed_locks = {}
14493 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
14495 def CheckPrereq(self):
14496 """Check prerequisites.
14498 This checks that the given group name is not an existing node group
14503 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14504 except errors.OpPrereqError:
14507 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
14508 " node group (UUID: %s)" %
14509 (self.op.group_name, existing_uuid),
14510 errors.ECODE_EXISTS)
14512 if self.op.ndparams:
14513 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
14515 if self.op.hv_state:
14516 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
14518 self.new_hv_state = None
14520 if self.op.disk_state:
14521 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
14523 self.new_disk_state = None
14525 if self.op.diskparams:
14526 for templ in constants.DISK_TEMPLATES:
14527 if templ in self.op.diskparams:
14528 utils.ForceDictType(self.op.diskparams[templ],
14529 constants.DISK_DT_TYPES)
14530 self.new_diskparams = self.op.diskparams
14532 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
14533 except errors.OpPrereqError, err:
14534 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
14535 errors.ECODE_INVAL)
14537 self.new_diskparams = {}
14539 if self.op.ipolicy:
14540 cluster = self.cfg.GetClusterInfo()
14541 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
14543 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
14544 except errors.ConfigurationError, err:
14545 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
14546 errors.ECODE_INVAL)
14548 def BuildHooksEnv(self):
14549 """Build hooks env.
14553 "GROUP_NAME": self.op.group_name,
14556 def BuildHooksNodes(self):
14557 """Build hooks nodes.
14560 mn = self.cfg.GetMasterNode()
14561 return ([mn], [mn])
14563 def Exec(self, feedback_fn):
14564 """Add the node group to the cluster.
14567 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
14568 uuid=self.group_uuid,
14569 alloc_policy=self.op.alloc_policy,
14570 ndparams=self.op.ndparams,
14571 diskparams=self.new_diskparams,
14572 ipolicy=self.op.ipolicy,
14573 hv_state_static=self.new_hv_state,
14574 disk_state_static=self.new_disk_state)
14576 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
14577 del self.remove_locks[locking.LEVEL_NODEGROUP]
14580 class LUGroupAssignNodes(NoHooksLU):
14581 """Logical unit for assigning nodes to groups.
14586 def ExpandNames(self):
14587 # These raise errors.OpPrereqError on their own:
14588 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14589 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
14591 # We want to lock all the affected nodes and groups. We have readily
14592 # available the list of nodes, and the *destination* group. To gather the
14593 # list of "source" groups, we need to fetch node information later on.
14594 self.needed_locks = {
14595 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
14596 locking.LEVEL_NODE: self.op.nodes,
14599 def DeclareLocks(self, level):
14600 if level == locking.LEVEL_NODEGROUP:
14601 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
14603 # Try to get all affected nodes' groups without having the group or node
14604 # lock yet. Needs verification later in the code flow.
14605 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
14607 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
14609 def CheckPrereq(self):
14610 """Check prerequisites.
14613 assert self.needed_locks[locking.LEVEL_NODEGROUP]
14614 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
14615 frozenset(self.op.nodes))
14617 expected_locks = (set([self.group_uuid]) |
14618 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
14619 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
14620 if actual_locks != expected_locks:
14621 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
14622 " current groups are '%s', used to be '%s'" %
14623 (utils.CommaJoin(expected_locks),
14624 utils.CommaJoin(actual_locks)))
14626 self.node_data = self.cfg.GetAllNodesInfo()
14627 self.group = self.cfg.GetNodeGroup(self.group_uuid)
14628 instance_data = self.cfg.GetAllInstancesInfo()
14630 if self.group is None:
14631 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14632 (self.op.group_name, self.group_uuid))
14634 (new_splits, previous_splits) = \
14635 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
14636 for node in self.op.nodes],
14637 self.node_data, instance_data)
14640 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
14642 if not self.op.force:
14643 raise errors.OpExecError("The following instances get split by this"
14644 " change and --force was not given: %s" %
14647 self.LogWarning("This operation will split the following instances: %s",
14650 if previous_splits:
14651 self.LogWarning("In addition, these already-split instances continue"
14652 " to be split across groups: %s",
14653 utils.CommaJoin(utils.NiceSort(previous_splits)))
14655 def Exec(self, feedback_fn):
14656 """Assign nodes to a new group.
14659 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
14661 self.cfg.AssignGroupNodes(mods)
14664 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
14665 """Check for split instances after a node assignment.
14667 This method considers a series of node assignments as an atomic operation,
14668 and returns information about split instances after applying the set of
14671 In particular, it returns information about newly split instances, and
14672 instances that were already split, and remain so after the change.
14674 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
14677 @type changes: list of (node_name, new_group_uuid) pairs.
14678 @param changes: list of node assignments to consider.
14679 @param node_data: a dict with data for all nodes
14680 @param instance_data: a dict with all instances to consider
14681 @rtype: a two-tuple
14682 @return: a list of instances that were previously okay and result split as a
14683 consequence of this change, and a list of instances that were previously
14684 split and this change does not fix.
14687 changed_nodes = dict((node, group) for node, group in changes
14688 if node_data[node].group != group)
14690 all_split_instances = set()
14691 previously_split_instances = set()
14693 def InstanceNodes(instance):
14694 return [instance.primary_node] + list(instance.secondary_nodes)
14696 for inst in instance_data.values():
14697 if inst.disk_template not in constants.DTS_INT_MIRROR:
14700 instance_nodes = InstanceNodes(inst)
14702 if len(set(node_data[node].group for node in instance_nodes)) > 1:
14703 previously_split_instances.add(inst.name)
14705 if len(set(changed_nodes.get(node, node_data[node].group)
14706 for node in instance_nodes)) > 1:
14707 all_split_instances.add(inst.name)
14709 return (list(all_split_instances - previously_split_instances),
14710 list(previously_split_instances & all_split_instances))
14713 class _GroupQuery(_QueryBase):
14714 FIELDS = query.GROUP_FIELDS
14716 def ExpandNames(self, lu):
14717 lu.needed_locks = {}
14719 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
14720 self._cluster = lu.cfg.GetClusterInfo()
14721 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
14724 self.wanted = [name_to_uuid[name]
14725 for name in utils.NiceSort(name_to_uuid.keys())]
14727 # Accept names to be either names or UUIDs.
14730 all_uuid = frozenset(self._all_groups.keys())
14732 for name in self.names:
14733 if name in all_uuid:
14734 self.wanted.append(name)
14735 elif name in name_to_uuid:
14736 self.wanted.append(name_to_uuid[name])
14738 missing.append(name)
14741 raise errors.OpPrereqError("Some groups do not exist: %s" %
14742 utils.CommaJoin(missing),
14743 errors.ECODE_NOENT)
14745 def DeclareLocks(self, lu, level):
14748 def _GetQueryData(self, lu):
14749 """Computes the list of node groups and their attributes.
14752 do_nodes = query.GQ_NODE in self.requested_data
14753 do_instances = query.GQ_INST in self.requested_data
14755 group_to_nodes = None
14756 group_to_instances = None
14758 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
14759 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
14760 # latter GetAllInstancesInfo() is not enough, for we have to go through
14761 # instance->node. Hence, we will need to process nodes even if we only need
14762 # instance information.
14763 if do_nodes or do_instances:
14764 all_nodes = lu.cfg.GetAllNodesInfo()
14765 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
14768 for node in all_nodes.values():
14769 if node.group in group_to_nodes:
14770 group_to_nodes[node.group].append(node.name)
14771 node_to_group[node.name] = node.group
14774 all_instances = lu.cfg.GetAllInstancesInfo()
14775 group_to_instances = dict((uuid, []) for uuid in self.wanted)
14777 for instance in all_instances.values():
14778 node = instance.primary_node
14779 if node in node_to_group:
14780 group_to_instances[node_to_group[node]].append(instance.name)
14783 # Do not pass on node information if it was not requested.
14784 group_to_nodes = None
14786 return query.GroupQueryData(self._cluster,
14787 [self._all_groups[uuid]
14788 for uuid in self.wanted],
14789 group_to_nodes, group_to_instances,
14790 query.GQ_DISKPARAMS in self.requested_data)
14793 class LUGroupQuery(NoHooksLU):
14794 """Logical unit for querying node groups.
14799 def CheckArguments(self):
14800 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
14801 self.op.output_fields, False)
14803 def ExpandNames(self):
14804 self.gq.ExpandNames(self)
14806 def DeclareLocks(self, level):
14807 self.gq.DeclareLocks(self, level)
14809 def Exec(self, feedback_fn):
14810 return self.gq.OldStyleQuery(self)
14813 class LUGroupSetParams(LogicalUnit):
14814 """Modifies the parameters of a node group.
14817 HPATH = "group-modify"
14818 HTYPE = constants.HTYPE_GROUP
14821 def CheckArguments(self):
14824 self.op.diskparams,
14825 self.op.alloc_policy,
14827 self.op.disk_state,
14831 if all_changes.count(None) == len(all_changes):
14832 raise errors.OpPrereqError("Please pass at least one modification",
14833 errors.ECODE_INVAL)
14835 def ExpandNames(self):
14836 # This raises errors.OpPrereqError on its own:
14837 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14839 self.needed_locks = {
14840 locking.LEVEL_INSTANCE: [],
14841 locking.LEVEL_NODEGROUP: [self.group_uuid],
14844 self.share_locks[locking.LEVEL_INSTANCE] = 1
14846 def DeclareLocks(self, level):
14847 if level == locking.LEVEL_INSTANCE:
14848 assert not self.needed_locks[locking.LEVEL_INSTANCE]
14850 # Lock instances optimistically, needs verification once group lock has
14852 self.needed_locks[locking.LEVEL_INSTANCE] = \
14853 self.cfg.GetNodeGroupInstances(self.group_uuid)
14856 def _UpdateAndVerifyDiskParams(old, new):
14857 """Updates and verifies disk parameters.
14860 new_params = _GetUpdatedParams(old, new)
14861 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
14864 def CheckPrereq(self):
14865 """Check prerequisites.
14868 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
14870 # Check if locked instances are still correct
14871 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
14873 self.group = self.cfg.GetNodeGroup(self.group_uuid)
14874 cluster = self.cfg.GetClusterInfo()
14876 if self.group is None:
14877 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14878 (self.op.group_name, self.group_uuid))
14880 if self.op.ndparams:
14881 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
14882 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
14883 self.new_ndparams = new_ndparams
14885 if self.op.diskparams:
14886 diskparams = self.group.diskparams
14887 uavdp = self._UpdateAndVerifyDiskParams
14888 # For each disktemplate subdict update and verify the values
14889 new_diskparams = dict((dt,
14890 uavdp(diskparams.get(dt, {}),
14891 self.op.diskparams[dt]))
14892 for dt in constants.DISK_TEMPLATES
14893 if dt in self.op.diskparams)
14894 # As we've all subdicts of diskparams ready, lets merge the actual
14895 # dict with all updated subdicts
14896 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
14898 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
14899 except errors.OpPrereqError, err:
14900 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
14901 errors.ECODE_INVAL)
14903 if self.op.hv_state:
14904 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
14905 self.group.hv_state_static)
14907 if self.op.disk_state:
14908 self.new_disk_state = \
14909 _MergeAndVerifyDiskState(self.op.disk_state,
14910 self.group.disk_state_static)
14912 if self.op.ipolicy:
14913 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
14917 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
14918 inst_filter = lambda inst: inst.name in owned_instances
14919 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
14920 gmi = ganeti.masterd.instance
14922 _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
14924 new_ipolicy, instances)
14927 self.LogWarning("After the ipolicy change the following instances"
14928 " violate them: %s",
14929 utils.CommaJoin(violations))
14931 def BuildHooksEnv(self):
14932 """Build hooks env.
14936 "GROUP_NAME": self.op.group_name,
14937 "NEW_ALLOC_POLICY": self.op.alloc_policy,
14940 def BuildHooksNodes(self):
14941 """Build hooks nodes.
14944 mn = self.cfg.GetMasterNode()
14945 return ([mn], [mn])
14947 def Exec(self, feedback_fn):
14948 """Modifies the node group.
14953 if self.op.ndparams:
14954 self.group.ndparams = self.new_ndparams
14955 result.append(("ndparams", str(self.group.ndparams)))
14957 if self.op.diskparams:
14958 self.group.diskparams = self.new_diskparams
14959 result.append(("diskparams", str(self.group.diskparams)))
14961 if self.op.alloc_policy:
14962 self.group.alloc_policy = self.op.alloc_policy
14964 if self.op.hv_state:
14965 self.group.hv_state_static = self.new_hv_state
14967 if self.op.disk_state:
14968 self.group.disk_state_static = self.new_disk_state
14970 if self.op.ipolicy:
14971 self.group.ipolicy = self.new_ipolicy
14973 self.cfg.Update(self.group, feedback_fn)
14977 class LUGroupRemove(LogicalUnit):
14978 HPATH = "group-remove"
14979 HTYPE = constants.HTYPE_GROUP
14982 def ExpandNames(self):
14983 # This will raises errors.OpPrereqError on its own:
14984 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14985 self.needed_locks = {
14986 locking.LEVEL_NODEGROUP: [self.group_uuid],
14989 def CheckPrereq(self):
14990 """Check prerequisites.
14992 This checks that the given group name exists as a node group, that is
14993 empty (i.e., contains no nodes), and that is not the last group of the
14997 # Verify that the group is empty.
14998 group_nodes = [node.name
14999 for node in self.cfg.GetAllNodesInfo().values()
15000 if node.group == self.group_uuid]
15003 raise errors.OpPrereqError("Group '%s' not empty, has the following"
15005 (self.op.group_name,
15006 utils.CommaJoin(utils.NiceSort(group_nodes))),
15007 errors.ECODE_STATE)
15009 # Verify the cluster would not be left group-less.
15010 if len(self.cfg.GetNodeGroupList()) == 1:
15011 raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
15012 " removed" % self.op.group_name,
15013 errors.ECODE_STATE)
15015 def BuildHooksEnv(self):
15016 """Build hooks env.
15020 "GROUP_NAME": self.op.group_name,
15023 def BuildHooksNodes(self):
15024 """Build hooks nodes.
15027 mn = self.cfg.GetMasterNode()
15028 return ([mn], [mn])
15030 def Exec(self, feedback_fn):
15031 """Remove the node group.
15035 self.cfg.RemoveNodeGroup(self.group_uuid)
15036 except errors.ConfigurationError:
15037 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
15038 (self.op.group_name, self.group_uuid))
15040 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
15043 class LUGroupRename(LogicalUnit):
15044 HPATH = "group-rename"
15045 HTYPE = constants.HTYPE_GROUP
15048 def ExpandNames(self):
15049 # This raises errors.OpPrereqError on its own:
15050 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
15052 self.needed_locks = {
15053 locking.LEVEL_NODEGROUP: [self.group_uuid],
15056 def CheckPrereq(self):
15057 """Check prerequisites.
15059 Ensures requested new name is not yet used.
15063 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
15064 except errors.OpPrereqError:
15067 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
15068 " node group (UUID: %s)" %
15069 (self.op.new_name, new_name_uuid),
15070 errors.ECODE_EXISTS)
15072 def BuildHooksEnv(self):
15073 """Build hooks env.
15077 "OLD_NAME": self.op.group_name,
15078 "NEW_NAME": self.op.new_name,
15081 def BuildHooksNodes(self):
15082 """Build hooks nodes.
15085 mn = self.cfg.GetMasterNode()
15087 all_nodes = self.cfg.GetAllNodesInfo()
15088 all_nodes.pop(mn, None)
15091 run_nodes.extend(node.name for node in all_nodes.values()
15092 if node.group == self.group_uuid)
15094 return (run_nodes, run_nodes)
15096 def Exec(self, feedback_fn):
15097 """Rename the node group.
15100 group = self.cfg.GetNodeGroup(self.group_uuid)
15103 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
15104 (self.op.group_name, self.group_uuid))
15106 group.name = self.op.new_name
15107 self.cfg.Update(group, feedback_fn)
15109 return self.op.new_name
15112 class LUGroupEvacuate(LogicalUnit):
15113 HPATH = "group-evacuate"
15114 HTYPE = constants.HTYPE_GROUP
15117 def ExpandNames(self):
15118 # This raises errors.OpPrereqError on its own:
15119 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
15121 if self.op.target_groups:
15122 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
15123 self.op.target_groups)
15125 self.req_target_uuids = []
15127 if self.group_uuid in self.req_target_uuids:
15128 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
15129 " as a target group (targets are %s)" %
15131 utils.CommaJoin(self.req_target_uuids)),
15132 errors.ECODE_INVAL)
15134 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
15136 self.share_locks = _ShareAll()
15137 self.needed_locks = {
15138 locking.LEVEL_INSTANCE: [],
15139 locking.LEVEL_NODEGROUP: [],
15140 locking.LEVEL_NODE: [],
15143 def DeclareLocks(self, level):
15144 if level == locking.LEVEL_INSTANCE:
15145 assert not self.needed_locks[locking.LEVEL_INSTANCE]
15147 # Lock instances optimistically, needs verification once node and group
15148 # locks have been acquired
15149 self.needed_locks[locking.LEVEL_INSTANCE] = \
15150 self.cfg.GetNodeGroupInstances(self.group_uuid)
15152 elif level == locking.LEVEL_NODEGROUP:
15153 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
15155 if self.req_target_uuids:
15156 lock_groups = set([self.group_uuid] + self.req_target_uuids)
15158 # Lock all groups used by instances optimistically; this requires going
15159 # via the node before it's locked, requiring verification later on
15160 lock_groups.update(group_uuid
15161 for instance_name in
15162 self.owned_locks(locking.LEVEL_INSTANCE)
15164 self.cfg.GetInstanceNodeGroups(instance_name))
15166 # No target groups, need to lock all of them
15167 lock_groups = locking.ALL_SET
15169 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
15171 elif level == locking.LEVEL_NODE:
15172 # This will only lock the nodes in the group to be evacuated which
15173 # contain actual instances
15174 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
15175 self._LockInstancesNodes()
15177 # Lock all nodes in group to be evacuated and target groups
15178 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
15179 assert self.group_uuid in owned_groups
15180 member_nodes = [node_name
15181 for group in owned_groups
15182 for node_name in self.cfg.GetNodeGroup(group).members]
15183 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
15185 def CheckPrereq(self):
15186 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
15187 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
15188 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
15190 assert owned_groups.issuperset(self.req_target_uuids)
15191 assert self.group_uuid in owned_groups
15193 # Check if locked instances are still correct
15194 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
15196 # Get instance information
15197 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
15199 # Check if node groups for locked instances are still correct
15200 _CheckInstancesNodeGroups(self.cfg, self.instances,
15201 owned_groups, owned_nodes, self.group_uuid)
15203 if self.req_target_uuids:
15204 # User requested specific target groups
15205 self.target_uuids = self.req_target_uuids
15207 # All groups except the one to be evacuated are potential targets
15208 self.target_uuids = [group_uuid for group_uuid in owned_groups
15209 if group_uuid != self.group_uuid]
15211 if not self.target_uuids:
15212 raise errors.OpPrereqError("There are no possible target groups",
15213 errors.ECODE_INVAL)
15215 def BuildHooksEnv(self):
15216 """Build hooks env.
15220 "GROUP_NAME": self.op.group_name,
15221 "TARGET_GROUPS": " ".join(self.target_uuids),
15224 def BuildHooksNodes(self):
15225 """Build hooks nodes.
15228 mn = self.cfg.GetMasterNode()
15230 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
15232 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
15234 return (run_nodes, run_nodes)
15236 def Exec(self, feedback_fn):
15237 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
15239 assert self.group_uuid not in self.target_uuids
15241 req = iallocator.IAReqGroupChange(instances=instances,
15242 target_groups=self.target_uuids)
15243 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
15245 ial.Run(self.op.iallocator)
15247 if not ial.success:
15248 raise errors.OpPrereqError("Can't compute group evacuation using"
15249 " iallocator '%s': %s" %
15250 (self.op.iallocator, ial.info),
15251 errors.ECODE_NORES)
15253 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
15255 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
15256 len(jobs), self.op.group_name)
15258 return ResultWithJobs(jobs)
15261 class TagsLU(NoHooksLU): # pylint: disable=W0223
15262 """Generic tags LU.
15264 This is an abstract class which is the parent of all the other tags LUs.
15267 def ExpandNames(self):
15268 self.group_uuid = None
15269 self.needed_locks = {}
15271 if self.op.kind == constants.TAG_NODE:
15272 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
15273 lock_level = locking.LEVEL_NODE
15274 lock_name = self.op.name
15275 elif self.op.kind == constants.TAG_INSTANCE:
15276 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
15277 lock_level = locking.LEVEL_INSTANCE
15278 lock_name = self.op.name
15279 elif self.op.kind == constants.TAG_NODEGROUP:
15280 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
15281 lock_level = locking.LEVEL_NODEGROUP
15282 lock_name = self.group_uuid
15283 elif self.op.kind == constants.TAG_NETWORK:
15284 self.network_uuid = self.cfg.LookupNetwork(self.op.name)
15285 lock_level = locking.LEVEL_NETWORK
15286 lock_name = self.network_uuid
15291 if lock_level and getattr(self.op, "use_locking", True):
15292 self.needed_locks[lock_level] = lock_name
15294 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
15295 # not possible to acquire the BGL based on opcode parameters)
15297 def CheckPrereq(self):
15298 """Check prerequisites.
15301 if self.op.kind == constants.TAG_CLUSTER:
15302 self.target = self.cfg.GetClusterInfo()
15303 elif self.op.kind == constants.TAG_NODE:
15304 self.target = self.cfg.GetNodeInfo(self.op.name)
15305 elif self.op.kind == constants.TAG_INSTANCE:
15306 self.target = self.cfg.GetInstanceInfo(self.op.name)
15307 elif self.op.kind == constants.TAG_NODEGROUP:
15308 self.target = self.cfg.GetNodeGroup(self.group_uuid)
15309 elif self.op.kind == constants.TAG_NETWORK:
15310 self.target = self.cfg.GetNetwork(self.network_uuid)
15312 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
15313 str(self.op.kind), errors.ECODE_INVAL)
15316 class LUTagsGet(TagsLU):
15317 """Returns the tags of a given object.
15322 def ExpandNames(self):
15323 TagsLU.ExpandNames(self)
15325 # Share locks as this is only a read operation
15326 self.share_locks = _ShareAll()
15328 def Exec(self, feedback_fn):
15329 """Returns the tag list.
15332 return list(self.target.GetTags())
15335 class LUTagsSearch(NoHooksLU):
15336 """Searches the tags for a given pattern.
15341 def ExpandNames(self):
15342 self.needed_locks = {}
15344 def CheckPrereq(self):
15345 """Check prerequisites.
15347 This checks the pattern passed for validity by compiling it.
15351 self.re = re.compile(self.op.pattern)
15352 except re.error, err:
15353 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
15354 (self.op.pattern, err), errors.ECODE_INVAL)
15356 def Exec(self, feedback_fn):
15357 """Returns the tag list.
15361 tgts = [("/cluster", cfg.GetClusterInfo())]
15362 ilist = cfg.GetAllInstancesInfo().values()
15363 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
15364 nlist = cfg.GetAllNodesInfo().values()
15365 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
15366 tgts.extend(("/nodegroup/%s" % n.name, n)
15367 for n in cfg.GetAllNodeGroupsInfo().values())
15369 for path, target in tgts:
15370 for tag in target.GetTags():
15371 if self.re.search(tag):
15372 results.append((path, tag))
15376 class LUTagsSet(TagsLU):
15377 """Sets a tag on a given object.
15382 def CheckPrereq(self):
15383 """Check prerequisites.
15385 This checks the type and length of the tag name and value.
15388 TagsLU.CheckPrereq(self)
15389 for tag in self.op.tags:
15390 objects.TaggableObject.ValidateTag(tag)
15392 def Exec(self, feedback_fn):
15397 for tag in self.op.tags:
15398 self.target.AddTag(tag)
15399 except errors.TagError, err:
15400 raise errors.OpExecError("Error while setting tag: %s" % str(err))
15401 self.cfg.Update(self.target, feedback_fn)
15404 class LUTagsDel(TagsLU):
15405 """Delete a list of tags from a given object.
15410 def CheckPrereq(self):
15411 """Check prerequisites.
15413 This checks that we have the given tag.
15416 TagsLU.CheckPrereq(self)
15417 for tag in self.op.tags:
15418 objects.TaggableObject.ValidateTag(tag)
15419 del_tags = frozenset(self.op.tags)
15420 cur_tags = self.target.GetTags()
15422 diff_tags = del_tags - cur_tags
15424 diff_names = ("'%s'" % i for i in sorted(diff_tags))
15425 raise errors.OpPrereqError("Tag(s) %s not found" %
15426 (utils.CommaJoin(diff_names), ),
15427 errors.ECODE_NOENT)
15429 def Exec(self, feedback_fn):
15430 """Remove the tag from the object.
15433 for tag in self.op.tags:
15434 self.target.RemoveTag(tag)
15435 self.cfg.Update(self.target, feedback_fn)
15438 class LUTestDelay(NoHooksLU):
15439 """Sleep for a specified amount of time.
15441 This LU sleeps on the master and/or nodes for a specified amount of
15447 def ExpandNames(self):
15448 """Expand names and set required locks.
15450 This expands the node list, if any.
15453 self.needed_locks = {}
15454 if self.op.on_nodes:
15455 # _GetWantedNodes can be used here, but is not always appropriate to use
15456 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
15457 # more information.
15458 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
15459 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
15461 def _TestDelay(self):
15462 """Do the actual sleep.
15465 if self.op.on_master:
15466 if not utils.TestDelay(self.op.duration):
15467 raise errors.OpExecError("Error during master delay test")
15468 if self.op.on_nodes:
15469 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
15470 for node, node_result in result.items():
15471 node_result.Raise("Failure during rpc call to node %s" % node)
15473 def Exec(self, feedback_fn):
15474 """Execute the test delay opcode, with the wanted repetitions.
15477 if self.op.repeat == 0:
15480 top_value = self.op.repeat - 1
15481 for i in range(self.op.repeat):
15482 self.LogInfo("Test delay iteration %d/%d", i, top_value)
15486 class LURestrictedCommand(NoHooksLU):
15487 """Logical unit for executing restricted commands.
15492 def ExpandNames(self):
15494 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
15496 self.needed_locks = {
15497 locking.LEVEL_NODE: self.op.nodes,
15499 self.share_locks = {
15500 locking.LEVEL_NODE: not self.op.use_locking,
15503 def CheckPrereq(self):
15504 """Check prerequisites.
15508 def Exec(self, feedback_fn):
15509 """Execute restricted command and return output.
15512 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
15514 # Check if correct locks are held
15515 assert set(self.op.nodes).issubset(owned_nodes)
15517 rpcres = self.rpc.call_restricted_command(self.op.nodes, self.op.command)
15521 for node_name in self.op.nodes:
15522 nres = rpcres[node_name]
15524 msg = ("Command '%s' on node '%s' failed: %s" %
15525 (self.op.command, node_name, nres.fail_msg))
15526 result.append((False, msg))
15528 result.append((True, nres.payload))
15533 class LUTestJqueue(NoHooksLU):
15534 """Utility LU to test some aspects of the job queue.
15539 # Must be lower than default timeout for WaitForJobChange to see whether it
15540 # notices changed jobs
15541 _CLIENT_CONNECT_TIMEOUT = 20.0
15542 _CLIENT_CONFIRM_TIMEOUT = 60.0
15545 def _NotifyUsingSocket(cls, cb, errcls):
15546 """Opens a Unix socket and waits for another program to connect.
15549 @param cb: Callback to send socket name to client
15550 @type errcls: class
15551 @param errcls: Exception class to use for errors
15554 # Using a temporary directory as there's no easy way to create temporary
15555 # sockets without writing a custom loop around tempfile.mktemp and
15557 tmpdir = tempfile.mkdtemp()
15559 tmpsock = utils.PathJoin(tmpdir, "sock")
15561 logging.debug("Creating temporary socket at %s", tmpsock)
15562 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
15567 # Send details to client
15570 # Wait for client to connect before continuing
15571 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
15573 (conn, _) = sock.accept()
15574 except socket.error, err:
15575 raise errcls("Client didn't connect in time (%s)" % err)
15579 # Remove as soon as client is connected
15580 shutil.rmtree(tmpdir)
15582 # Wait for client to close
15585 # pylint: disable=E1101
15586 # Instance of '_socketobject' has no ... member
15587 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
15589 except socket.error, err:
15590 raise errcls("Client failed to confirm notification (%s)" % err)
15594 def _SendNotification(self, test, arg, sockname):
15595 """Sends a notification to the client.
15598 @param test: Test name
15599 @param arg: Test argument (depends on test)
15600 @type sockname: string
15601 @param sockname: Socket path
15604 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
15606 def _Notify(self, prereq, test, arg):
15607 """Notifies the client of a test.
15610 @param prereq: Whether this is a prereq-phase test
15612 @param test: Test name
15613 @param arg: Test argument (depends on test)
15617 errcls = errors.OpPrereqError
15619 errcls = errors.OpExecError
15621 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
15625 def CheckArguments(self):
15626 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
15627 self.expandnames_calls = 0
15629 def ExpandNames(self):
15630 checkargs_calls = getattr(self, "checkargs_calls", 0)
15631 if checkargs_calls < 1:
15632 raise errors.ProgrammerError("CheckArguments was not called")
15634 self.expandnames_calls += 1
15636 if self.op.notify_waitlock:
15637 self._Notify(True, constants.JQT_EXPANDNAMES, None)
15639 self.LogInfo("Expanding names")
15641 # Get lock on master node (just to get a lock, not for a particular reason)
15642 self.needed_locks = {
15643 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
15646 def Exec(self, feedback_fn):
15647 if self.expandnames_calls < 1:
15648 raise errors.ProgrammerError("ExpandNames was not called")
15650 if self.op.notify_exec:
15651 self._Notify(False, constants.JQT_EXEC, None)
15653 self.LogInfo("Executing")
15655 if self.op.log_messages:
15656 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
15657 for idx, msg in enumerate(self.op.log_messages):
15658 self.LogInfo("Sending log message %s", idx + 1)
15659 feedback_fn(constants.JQT_MSGPREFIX + msg)
15660 # Report how many test messages have been sent
15661 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
15664 raise errors.OpExecError("Opcode failure was requested")
15669 class LUTestAllocator(NoHooksLU):
15670 """Run allocator tests.
15672 This LU runs the allocator tests
15675 def CheckPrereq(self):
15676 """Check prerequisites.
15678 This checks the opcode parameters depending on the director and mode test.
15681 if self.op.mode in (constants.IALLOCATOR_MODE_ALLOC,
15682 constants.IALLOCATOR_MODE_MULTI_ALLOC):
15683 for attr in ["memory", "disks", "disk_template",
15684 "os", "tags", "nics", "vcpus"]:
15685 if not hasattr(self.op, attr):
15686 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
15687 attr, errors.ECODE_INVAL)
15688 iname = self.cfg.ExpandInstanceName(self.op.name)
15689 if iname is not None:
15690 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
15691 iname, errors.ECODE_EXISTS)
15692 if not isinstance(self.op.nics, list):
15693 raise errors.OpPrereqError("Invalid parameter 'nics'",
15694 errors.ECODE_INVAL)
15695 if not isinstance(self.op.disks, list):
15696 raise errors.OpPrereqError("Invalid parameter 'disks'",
15697 errors.ECODE_INVAL)
15698 for row in self.op.disks:
15699 if (not isinstance(row, dict) or
15700 constants.IDISK_SIZE not in row or
15701 not isinstance(row[constants.IDISK_SIZE], int) or
15702 constants.IDISK_MODE not in row or
15703 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
15704 raise errors.OpPrereqError("Invalid contents of the 'disks'"
15705 " parameter", errors.ECODE_INVAL)
15706 if self.op.hypervisor is None:
15707 self.op.hypervisor = self.cfg.GetHypervisorType()
15708 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15709 fname = _ExpandInstanceName(self.cfg, self.op.name)
15710 self.op.name = fname
15711 self.relocate_from = \
15712 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
15713 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
15714 constants.IALLOCATOR_MODE_NODE_EVAC):
15715 if not self.op.instances:
15716 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
15717 self.op.instances = _GetWantedInstances(self, self.op.instances)
15719 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
15720 self.op.mode, errors.ECODE_INVAL)
15722 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
15723 if self.op.iallocator is None:
15724 raise errors.OpPrereqError("Missing allocator name",
15725 errors.ECODE_INVAL)
15726 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
15727 raise errors.OpPrereqError("Wrong allocator test '%s'" %
15728 self.op.direction, errors.ECODE_INVAL)
15730 def Exec(self, feedback_fn):
15731 """Run the allocator test.
15734 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15735 req = iallocator.IAReqInstanceAlloc(name=self.op.name,
15736 memory=self.op.memory,
15737 disks=self.op.disks,
15738 disk_template=self.op.disk_template,
15742 vcpus=self.op.vcpus,
15743 spindle_use=self.op.spindle_use,
15744 hypervisor=self.op.hypervisor)
15745 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15746 req = iallocator.IAReqRelocate(name=self.op.name,
15747 relocate_from=list(self.relocate_from))
15748 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
15749 req = iallocator.IAReqGroupChange(instances=self.op.instances,
15750 target_groups=self.op.target_groups)
15751 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15752 req = iallocator.IAReqNodeEvac(instances=self.op.instances,
15753 evac_mode=self.op.evac_mode)
15754 elif self.op.mode == constants.IALLOCATOR_MODE_MULTI_ALLOC:
15755 disk_template = self.op.disk_template
15756 insts = [iallocator.IAReqInstanceAlloc(name="%s%s" % (self.op.name, idx),
15757 memory=self.op.memory,
15758 disks=self.op.disks,
15759 disk_template=disk_template,
15763 vcpus=self.op.vcpus,
15764 spindle_use=self.op.spindle_use,
15765 hypervisor=self.op.hypervisor)
15766 for idx in range(self.op.count)]
15767 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
15769 raise errors.ProgrammerError("Uncatched mode %s in"
15770 " LUTestAllocator.Exec", self.op.mode)
15772 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
15773 if self.op.direction == constants.IALLOCATOR_DIR_IN:
15774 result = ial.in_text
15776 ial.Run(self.op.iallocator, validate=False)
15777 result = ial.out_text
15781 class LUNetworkAdd(LogicalUnit):
15782 """Logical unit for creating networks.
15785 HPATH = "network-add"
15786 HTYPE = constants.HTYPE_NETWORK
15789 def BuildHooksNodes(self):
15790 """Build hooks nodes.
15793 mn = self.cfg.GetMasterNode()
15794 return ([mn], [mn])
15796 def CheckArguments(self):
15797 if self.op.mac_prefix:
15798 self.op.mac_prefix = \
15799 utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
15801 def ExpandNames(self):
15802 self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
15804 if self.op.conflicts_check:
15805 self.share_locks[locking.LEVEL_NODE] = 1
15806 self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
15807 self.needed_locks = {
15808 locking.LEVEL_NODE: locking.ALL_SET,
15809 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
15812 self.needed_locks = {}
15814 self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
15816 def CheckPrereq(self):
15817 if self.op.network is None:
15818 raise errors.OpPrereqError("Network must be given",
15819 errors.ECODE_INVAL)
15821 uuid = self.cfg.LookupNetwork(self.op.network_name)
15824 raise errors.OpPrereqError("Network '%s' already defined" %
15825 self.op.network, errors.ECODE_EXISTS)
15827 # Check tag validity
15828 for tag in self.op.tags:
15829 objects.TaggableObject.ValidateTag(tag)
15831 def BuildHooksEnv(self):
15832 """Build hooks env.
15836 "name": self.op.network_name,
15837 "subnet": self.op.network,
15838 "gateway": self.op.gateway,
15839 "network6": self.op.network6,
15840 "gateway6": self.op.gateway6,
15841 "mac_prefix": self.op.mac_prefix,
15842 "network_type": self.op.network_type,
15843 "tags": self.op.tags,
15845 return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
15847 def Exec(self, feedback_fn):
15848 """Add the ip pool to the cluster.
15851 nobj = objects.Network(name=self.op.network_name,
15852 network=self.op.network,
15853 gateway=self.op.gateway,
15854 network6=self.op.network6,
15855 gateway6=self.op.gateway6,
15856 mac_prefix=self.op.mac_prefix,
15857 network_type=self.op.network_type,
15858 uuid=self.network_uuid,
15859 family=constants.IP4_VERSION)
15860 # Initialize the associated address pool
15862 pool = network.AddressPool.InitializeNetwork(nobj)
15863 except errors.AddressPoolError, e:
15864 raise errors.OpExecError("Cannot create IP pool for this network: %s" % e)
15866 # Check if we need to reserve the nodes and the cluster master IP
15867 # These may not be allocated to any instances in routed mode, as
15868 # they wouldn't function anyway.
15869 if self.op.conflicts_check:
15870 for node in self.cfg.GetAllNodesInfo().values():
15871 for ip in [node.primary_ip, node.secondary_ip]:
15873 if pool.Contains(ip):
15875 self.LogInfo("Reserved IP address of node '%s' (%s)",
15877 except errors.AddressPoolError:
15878 self.LogWarning("Cannot reserve IP address of node '%s' (%s)",
15881 master_ip = self.cfg.GetClusterInfo().master_ip
15883 if pool.Contains(master_ip):
15884 pool.Reserve(master_ip)
15885 self.LogInfo("Reserved cluster master IP address (%s)", master_ip)
15886 except errors.AddressPoolError:
15887 self.LogWarning("Cannot reserve cluster master IP address (%s)",
15890 if self.op.add_reserved_ips:
15891 for ip in self.op.add_reserved_ips:
15893 pool.Reserve(ip, external=True)
15894 except errors.AddressPoolError, e:
15895 raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
15898 for tag in self.op.tags:
15901 self.cfg.AddNetwork(nobj, self.proc.GetECId(), check_uuid=False)
15902 del self.remove_locks[locking.LEVEL_NETWORK]
15905 class LUNetworkRemove(LogicalUnit):
15906 HPATH = "network-remove"
15907 HTYPE = constants.HTYPE_NETWORK
15910 def ExpandNames(self):
15911 self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
15913 if not self.network_uuid:
15914 raise errors.OpPrereqError(("Network '%s' not found" %
15915 self.op.network_name), errors.ECODE_NOENT)
15917 self.share_locks[locking.LEVEL_NODEGROUP] = 1
15918 self.needed_locks = {
15919 locking.LEVEL_NETWORK: [self.network_uuid],
15920 locking.LEVEL_NODEGROUP: locking.ALL_SET,
15923 def CheckPrereq(self):
15924 """Check prerequisites.
15926 This checks that the given network name exists as a network, that is
15927 empty (i.e., contains no nodes), and that is not the last group of the
15931 # Verify that the network is not conncted.
15932 node_groups = [group.name
15933 for group in self.cfg.GetAllNodeGroupsInfo().values()
15934 if self.network_uuid in group.networks]
15937 self.LogWarning("Network '%s' is connected to the following"
15938 " node groups: %s" %
15939 (self.op.network_name,
15940 utils.CommaJoin(utils.NiceSort(node_groups))))
15941 raise errors.OpPrereqError("Network still connected", errors.ECODE_STATE)
15943 def BuildHooksEnv(self):
15944 """Build hooks env.
15948 "NETWORK_NAME": self.op.network_name,
15951 def BuildHooksNodes(self):
15952 """Build hooks nodes.
15955 mn = self.cfg.GetMasterNode()
15956 return ([mn], [mn])
15958 def Exec(self, feedback_fn):
15959 """Remove the network.
15963 self.cfg.RemoveNetwork(self.network_uuid)
15964 except errors.ConfigurationError:
15965 raise errors.OpExecError("Network '%s' with UUID %s disappeared" %
15966 (self.op.network_name, self.network_uuid))
15969 class LUNetworkSetParams(LogicalUnit):
15970 """Modifies the parameters of a network.
15973 HPATH = "network-modify"
15974 HTYPE = constants.HTYPE_NETWORK
15977 def CheckArguments(self):
15978 if (self.op.gateway and
15979 (self.op.add_reserved_ips or self.op.remove_reserved_ips)):
15980 raise errors.OpPrereqError("Cannot modify gateway and reserved ips"
15981 " at once", errors.ECODE_INVAL)
15983 def ExpandNames(self):
15984 self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
15985 if self.network_uuid is None:
15986 raise errors.OpPrereqError(("Network '%s' not found" %
15987 self.op.network_name), errors.ECODE_NOENT)
15989 self.needed_locks = {
15990 locking.LEVEL_NETWORK: [self.network_uuid],
15993 def CheckPrereq(self):
15994 """Check prerequisites.
15997 self.network = self.cfg.GetNetwork(self.network_uuid)
15998 self.gateway = self.network.gateway
15999 self.network_type = self.network.network_type
16000 self.mac_prefix = self.network.mac_prefix
16001 self.network6 = self.network.network6
16002 self.gateway6 = self.network.gateway6
16003 self.tags = self.network.tags
16005 self.pool = network.AddressPool(self.network)
16007 if self.op.gateway:
16008 if self.op.gateway == constants.VALUE_NONE:
16009 self.gateway = None
16011 self.gateway = self.op.gateway
16012 if self.pool.IsReserved(self.gateway):
16013 raise errors.OpPrereqError("Gateway IP address '%s' is already"
16014 " reserved" % self.gateway,
16015 errors.ECODE_STATE)
16017 if self.op.network_type:
16018 if self.op.network_type == constants.VALUE_NONE:
16019 self.network_type = None
16021 self.network_type = self.op.network_type
16023 if self.op.mac_prefix:
16024 if self.op.mac_prefix == constants.VALUE_NONE:
16025 self.mac_prefix = None
16027 self.mac_prefix = \
16028 utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
16030 if self.op.gateway6:
16031 if self.op.gateway6 == constants.VALUE_NONE:
16032 self.gateway6 = None
16034 self.gateway6 = self.op.gateway6
16036 if self.op.network6:
16037 if self.op.network6 == constants.VALUE_NONE:
16038 self.network6 = None
16040 self.network6 = self.op.network6
16042 def BuildHooksEnv(self):
16043 """Build hooks env.
16047 "name": self.op.network_name,
16048 "subnet": self.network.network,
16049 "gateway": self.gateway,
16050 "network6": self.network6,
16051 "gateway6": self.gateway6,
16052 "mac_prefix": self.mac_prefix,
16053 "network_type": self.network_type,
16056 return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
16058 def BuildHooksNodes(self):
16059 """Build hooks nodes.
16062 mn = self.cfg.GetMasterNode()
16063 return ([mn], [mn])
16065 def Exec(self, feedback_fn):
16066 """Modifies the network.
16069 #TODO: reserve/release via temporary reservation manager
16070 # extend cfg.ReserveIp/ReleaseIp with the external flag
16071 if self.op.gateway:
16072 if self.gateway == self.network.gateway:
16073 self.LogWarning("Gateway is already %s", self.gateway)
16076 self.pool.Reserve(self.gateway, external=True)
16077 if self.network.gateway:
16078 self.pool.Release(self.network.gateway, external=True)
16079 self.network.gateway = self.gateway
16081 if self.op.add_reserved_ips:
16082 for ip in self.op.add_reserved_ips:
16084 if self.pool.IsReserved(ip):
16085 self.LogWarning("IP address %s is already reserved", ip)
16087 self.pool.Reserve(ip, external=True)
16088 except errors.AddressPoolError, err:
16089 self.LogWarning("Cannot reserve IP address %s: %s", ip, err)
16091 if self.op.remove_reserved_ips:
16092 for ip in self.op.remove_reserved_ips:
16093 if ip == self.network.gateway:
16094 self.LogWarning("Cannot unreserve Gateway's IP")
16097 if not self.pool.IsReserved(ip):
16098 self.LogWarning("IP address %s is already unreserved", ip)
16100 self.pool.Release(ip, external=True)
16101 except errors.AddressPoolError, err:
16102 self.LogWarning("Cannot release IP address %s: %s", ip, err)
16104 if self.op.mac_prefix:
16105 self.network.mac_prefix = self.mac_prefix
16107 if self.op.network6:
16108 self.network.network6 = self.network6
16110 if self.op.gateway6:
16111 self.network.gateway6 = self.gateway6
16113 if self.op.network_type:
16114 self.network.network_type = self.network_type
16116 self.pool.Validate()
16118 self.cfg.Update(self.network, feedback_fn)
16121 class _NetworkQuery(_QueryBase):
16122 FIELDS = query.NETWORK_FIELDS
16124 def ExpandNames(self, lu):
16125 lu.needed_locks = {}
16127 self._all_networks = lu.cfg.GetAllNetworksInfo()
16128 name_to_uuid = dict((n.name, n.uuid) for n in self._all_networks.values())
16131 self.wanted = [name_to_uuid[name]
16132 for name in utils.NiceSort(name_to_uuid.keys())]
16134 # Accept names to be either names or UUIDs.
16137 all_uuid = frozenset(self._all_networks.keys())
16139 for name in self.names:
16140 if name in all_uuid:
16141 self.wanted.append(name)
16142 elif name in name_to_uuid:
16143 self.wanted.append(name_to_uuid[name])
16145 missing.append(name)
16148 raise errors.OpPrereqError("Some networks do not exist: %s" % missing,
16149 errors.ECODE_NOENT)
16151 def DeclareLocks(self, lu, level):
16154 def _GetQueryData(self, lu):
16155 """Computes the list of networks and their attributes.
16158 do_instances = query.NETQ_INST in self.requested_data
16159 do_groups = do_instances or (query.NETQ_GROUP in self.requested_data)
16161 network_to_groups = None
16162 network_to_instances = None
16164 # For NETQ_GROUP, we need to map network->[groups]
16166 all_groups = lu.cfg.GetAllNodeGroupsInfo()
16167 network_to_groups = dict((uuid, []) for uuid in self.wanted)
16170 all_instances = lu.cfg.GetAllInstancesInfo()
16171 all_nodes = lu.cfg.GetAllNodesInfo()
16172 network_to_instances = dict((uuid, []) for uuid in self.wanted)
16174 for group in all_groups.values():
16176 group_nodes = [node.name for node in all_nodes.values() if
16177 node.group == group.uuid]
16178 group_instances = [instance for instance in all_instances.values()
16179 if instance.primary_node in group_nodes]
16181 for net_uuid in group.networks.keys():
16182 if net_uuid in network_to_groups:
16183 netparams = group.networks[net_uuid]
16184 mode = netparams[constants.NIC_MODE]
16185 link = netparams[constants.NIC_LINK]
16186 info = group.name + "(" + mode + ", " + link + ")"
16187 network_to_groups[net_uuid].append(info)
16190 for instance in group_instances:
16191 for nic in instance.nics:
16192 if nic.network == self._all_networks[net_uuid].name:
16193 network_to_instances[net_uuid].append(instance.name)
16196 if query.NETQ_STATS in self.requested_data:
16199 self._GetStats(network.AddressPool(self._all_networks[uuid])))
16200 for uuid in self.wanted)
16204 return query.NetworkQueryData([self._all_networks[uuid]
16205 for uuid in self.wanted],
16207 network_to_instances,
16211 def _GetStats(pool):
16212 """Returns statistics for a network address pool.
16216 "free_count": pool.GetFreeCount(),
16217 "reserved_count": pool.GetReservedCount(),
16218 "map": pool.GetMap(),
16219 "external_reservations":
16220 utils.CommaJoin(pool.GetExternalReservations()),
16224 class LUNetworkQuery(NoHooksLU):
16225 """Logical unit for querying networks.
16230 def CheckArguments(self):
16231 self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
16232 self.op.output_fields, False)
16234 def ExpandNames(self):
16235 self.nq.ExpandNames(self)
16237 def Exec(self, feedback_fn):
16238 return self.nq.OldStyleQuery(self)
16241 class LUNetworkConnect(LogicalUnit):
16242 """Connect a network to a nodegroup
16245 HPATH = "network-connect"
16246 HTYPE = constants.HTYPE_NETWORK
16249 def ExpandNames(self):
16250 self.network_name = self.op.network_name
16251 self.group_name = self.op.group_name
16252 self.network_mode = self.op.network_mode
16253 self.network_link = self.op.network_link
16255 self.network_uuid = self.cfg.LookupNetwork(self.network_name)
16256 if self.network_uuid is None:
16257 raise errors.OpPrereqError("Network '%s' does not exist" %
16258 self.network_name, errors.ECODE_NOENT)
16260 self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
16261 if self.group_uuid is None:
16262 raise errors.OpPrereqError("Group '%s' does not exist" %
16263 self.group_name, errors.ECODE_NOENT)
16265 self.needed_locks = {
16266 locking.LEVEL_INSTANCE: [],
16267 locking.LEVEL_NODEGROUP: [self.group_uuid],
16269 self.share_locks[locking.LEVEL_INSTANCE] = 1
16271 if self.op.conflicts_check:
16272 self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
16273 self.share_locks[locking.LEVEL_NETWORK] = 1
16275 def DeclareLocks(self, level):
16276 if level == locking.LEVEL_INSTANCE:
16277 assert not self.needed_locks[locking.LEVEL_INSTANCE]
16279 # Lock instances optimistically, needs verification once group lock has
16281 if self.op.conflicts_check:
16282 self.needed_locks[locking.LEVEL_INSTANCE] = \
16283 self.cfg.GetNodeGroupInstances(self.group_uuid)
16285 def BuildHooksEnv(self):
16287 "GROUP_NAME": self.group_name,
16288 "GROUP_NETWORK_MODE": self.network_mode,
16289 "GROUP_NETWORK_LINK": self.network_link,
16293 def BuildHooksNodes(self):
16294 nodes = self.cfg.GetNodeGroup(self.group_uuid).members
16295 return (nodes, nodes)
16297 def CheckPrereq(self):
16298 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
16300 assert self.group_uuid in owned_groups
16303 constants.NIC_MODE: self.network_mode,
16304 constants.NIC_LINK: self.network_link,
16306 objects.NIC.CheckParameterSyntax(self.netparams)
16308 self.group = self.cfg.GetNodeGroup(self.group_uuid)
16309 #if self.network_mode == constants.NIC_MODE_BRIDGED:
16310 # _CheckNodeGroupBridgesExist(self, self.network_link, self.group_uuid)
16311 self.connected = False
16312 if self.network_uuid in self.group.networks:
16313 self.LogWarning("Network '%s' is already mapped to group '%s'" %
16314 (self.network_name, self.group.name))
16315 self.connected = True
16318 if self.op.conflicts_check:
16319 pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
16321 _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
16324 def Exec(self, feedback_fn):
16328 self.group.networks[self.network_uuid] = self.netparams
16329 self.cfg.Update(self.group, feedback_fn)
16332 def _NetworkConflictCheck(lu, check_fn, action):
16333 """Checks for network interface conflicts with a network.
16335 @type lu: L{LogicalUnit}
16336 @type check_fn: callable receiving one parameter (L{objects.NIC}) and
16338 @param check_fn: Function checking for conflict
16339 @type action: string
16340 @param action: Part of error message (see code)
16341 @raise errors.OpPrereqError: If conflicting IP addresses are found.
16344 # Check if locked instances are still correct
16345 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
16346 _CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
16350 for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
16351 instconflicts = [(idx, nic.ip)
16352 for (idx, nic) in enumerate(instance.nics)
16356 conflicts.append((instance.name, instconflicts))
16359 lu.LogWarning("IP addresses from network '%s', which is about to %s"
16360 " node group '%s', are in use: %s" %
16361 (lu.network_name, action, lu.group.name,
16362 utils.CommaJoin(("%s: %s" %
16363 (name, _FmtNetworkConflict(details)))
16364 for (name, details) in conflicts)))
16366 raise errors.OpPrereqError("Conflicting IP addresses found; "
16367 " remove/modify the corresponding network"
16368 " interfaces", errors.ECODE_STATE)
16371 def _FmtNetworkConflict(details):
16372 """Utility for L{_NetworkConflictCheck}.
16375 return utils.CommaJoin("nic%s/%s" % (idx, ipaddr)
16376 for (idx, ipaddr) in details)
16379 class LUNetworkDisconnect(LogicalUnit):
16380 """Disconnect a network to a nodegroup
16383 HPATH = "network-disconnect"
16384 HTYPE = constants.HTYPE_NETWORK
16387 def ExpandNames(self):
16388 self.network_name = self.op.network_name
16389 self.group_name = self.op.group_name
16391 self.network_uuid = self.cfg.LookupNetwork(self.network_name)
16392 if self.network_uuid is None:
16393 raise errors.OpPrereqError("Network '%s' does not exist" %
16394 self.network_name, errors.ECODE_NOENT)
16396 self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
16397 if self.group_uuid is None:
16398 raise errors.OpPrereqError("Group '%s' does not exist" %
16399 self.group_name, errors.ECODE_NOENT)
16401 self.needed_locks = {
16402 locking.LEVEL_INSTANCE: [],
16403 locking.LEVEL_NODEGROUP: [self.group_uuid],
16405 self.share_locks[locking.LEVEL_INSTANCE] = 1
16407 def DeclareLocks(self, level):
16408 if level == locking.LEVEL_INSTANCE:
16409 assert not self.needed_locks[locking.LEVEL_INSTANCE]
16411 # Lock instances optimistically, needs verification once group lock has
16413 if self.op.conflicts_check:
16414 self.needed_locks[locking.LEVEL_INSTANCE] = \
16415 self.cfg.GetNodeGroupInstances(self.group_uuid)
16417 def BuildHooksEnv(self):
16419 "GROUP_NAME": self.group_name,
16423 def BuildHooksNodes(self):
16424 nodes = self.cfg.GetNodeGroup(self.group_uuid).members
16425 return (nodes, nodes)
16427 def CheckPrereq(self):
16428 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
16430 assert self.group_uuid in owned_groups
16432 self.group = self.cfg.GetNodeGroup(self.group_uuid)
16433 self.connected = True
16434 if self.network_uuid not in self.group.networks:
16435 self.LogWarning("Network '%s' is not mapped to group '%s'",
16436 self.network_name, self.group.name)
16437 self.connected = False
16440 if self.op.conflicts_check:
16441 _NetworkConflictCheck(self, lambda nic: nic.network == self.network_name,
16444 def Exec(self, feedback_fn):
16445 if not self.connected:
16448 del self.group.networks[self.network_uuid]
16449 self.cfg.Update(self.group, feedback_fn)
16452 #: Query type implementations
16454 constants.QR_CLUSTER: _ClusterQuery,
16455 constants.QR_INSTANCE: _InstanceQuery,
16456 constants.QR_NODE: _NodeQuery,
16457 constants.QR_GROUP: _GroupQuery,
16458 constants.QR_NETWORK: _NetworkQuery,
16459 constants.QR_OS: _OsQuery,
16460 constants.QR_EXPORT: _ExportQuery,
16463 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
16466 def _GetQueryImplementation(name):
16467 """Returns the implemtnation for a query type.
16469 @param name: Query type, must be one of L{constants.QR_VIA_OP}
16473 return _QUERY_IMPL[name]
16475 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
16476 errors.ECODE_INVAL)
16479 def _CheckForConflictingIp(lu, ip, node):
16480 """In case of conflicting IP address raise error.
16483 @param ip: IP address
16485 @param node: node name
16488 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
16489 if conf_net is not None:
16490 raise errors.OpPrereqError(("Conflicting IP address found: '%s' != '%s'" %
16492 errors.ECODE_STATE)
16494 return (None, None)