4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 import ganeti.masterd.instance # pylint: disable-msg=W0611
64 def _SupportsOob(cfg, node):
65 """Tells if node supports OOB.
67 @type cfg: L{config.ConfigWriter}
68 @param cfg: The cluster configuration
69 @type node: L{objects.Node}
71 @return: The OOB script if supported or an empty string otherwise
74 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
78 class LogicalUnit(object):
79 """Logical Unit base class.
81 Subclasses must follow these rules:
82 - implement ExpandNames
83 - implement CheckPrereq (except when tasklets are used)
84 - implement Exec (except when tasklets are used)
85 - implement BuildHooksEnv
86 - redefine HPATH and HTYPE
87 - optionally redefine their run requirements:
88 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
90 Note that all commands require root permissions.
92 @ivar dry_run_result: the value (if any) that will be returned to the caller
93 in dry-run mode (signalled by opcode dry_run parameter)
100 def __init__(self, processor, op, context, rpc):
101 """Constructor for LogicalUnit.
103 This needs to be overridden in derived classes in order to check op
107 self.proc = processor
109 self.cfg = context.cfg
110 self.context = context
112 # Dicts used to declare locking needs to mcpu
113 self.needed_locks = None
114 self.acquired_locks = {}
115 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
117 self.remove_locks = {}
118 # Used to force good behavior when calling helper functions
119 self.recalculate_locks = {}
122 self.Log = processor.Log # pylint: disable-msg=C0103
123 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126 # support for dry-run
127 self.dry_run_result = None
128 # support for generic debug attribute
129 if (not hasattr(self.op, "debug_level") or
130 not isinstance(self.op.debug_level, int)):
131 self.op.debug_level = 0
136 # Validate opcode parameters and set defaults
137 self.op.Validate(True)
139 self.CheckArguments()
142 """Returns the SshRunner object
146 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
149 ssh = property(fget=__GetSSH)
151 def CheckArguments(self):
152 """Check syntactic validity for the opcode arguments.
154 This method is for doing a simple syntactic check and ensure
155 validity of opcode parameters, without any cluster-related
156 checks. While the same can be accomplished in ExpandNames and/or
157 CheckPrereq, doing these separate is better because:
159 - ExpandNames is left as as purely a lock-related function
160 - CheckPrereq is run after we have acquired locks (and possible
163 The function is allowed to change the self.op attribute so that
164 later methods can no longer worry about missing parameters.
169 def ExpandNames(self):
170 """Expand names for this LU.
172 This method is called before starting to execute the opcode, and it should
173 update all the parameters of the opcode to their canonical form (e.g. a
174 short node name must be fully expanded after this method has successfully
175 completed). This way locking, hooks, logging, etc. can work correctly.
177 LUs which implement this method must also populate the self.needed_locks
178 member, as a dict with lock levels as keys, and a list of needed lock names
181 - use an empty dict if you don't need any lock
182 - if you don't need any lock at a particular level omit that level
183 - don't put anything for the BGL level
184 - if you want all locks at a level use locking.ALL_SET as a value
186 If you need to share locks (rather than acquire them exclusively) at one
187 level you can modify self.share_locks, setting a true value (usually 1) for
188 that level. By default locks are not shared.
190 This function can also define a list of tasklets, which then will be
191 executed in order instead of the usual LU-level CheckPrereq and Exec
192 functions, if those are not defined by the LU.
196 # Acquire all nodes and one instance
197 self.needed_locks = {
198 locking.LEVEL_NODE: locking.ALL_SET,
199 locking.LEVEL_INSTANCE: ['instance1.example.com'],
201 # Acquire just two nodes
202 self.needed_locks = {
203 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206 self.needed_locks = {} # No, you can't leave it to the default value None
209 # The implementation of this method is mandatory only if the new LU is
210 # concurrent, so that old LUs don't need to be changed all at the same
213 self.needed_locks = {} # Exclusive LUs don't need locks.
215 raise NotImplementedError
217 def DeclareLocks(self, level):
218 """Declare LU locking needs for a level
220 While most LUs can just declare their locking needs at ExpandNames time,
221 sometimes there's the need to calculate some locks after having acquired
222 the ones before. This function is called just before acquiring locks at a
223 particular level, but after acquiring the ones at lower levels, and permits
224 such calculations. It can be used to modify self.needed_locks, and by
225 default it does nothing.
227 This function is only called if you have something already set in
228 self.needed_locks for the level.
230 @param level: Locking level which is going to be locked
231 @type level: member of ganeti.locking.LEVELS
235 def CheckPrereq(self):
236 """Check prerequisites for this LU.
238 This method should check that the prerequisites for the execution
239 of this LU are fulfilled. It can do internode communication, but
240 it should be idempotent - no cluster or system changes are
243 The method should raise errors.OpPrereqError in case something is
244 not fulfilled. Its return value is ignored.
246 This method should also update all the parameters of the opcode to
247 their canonical form if it hasn't been done by ExpandNames before.
250 if self.tasklets is not None:
251 for (idx, tl) in enumerate(self.tasklets):
252 logging.debug("Checking prerequisites for tasklet %s/%s",
253 idx + 1, len(self.tasklets))
258 def Exec(self, feedback_fn):
261 This method should implement the actual work. It should raise
262 errors.OpExecError for failures that are somewhat dealt with in
266 if self.tasklets is not None:
267 for (idx, tl) in enumerate(self.tasklets):
268 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271 raise NotImplementedError
273 def BuildHooksEnv(self):
274 """Build hooks environment for this LU.
276 This method should return a three-node tuple consisting of: a dict
277 containing the environment that will be used for running the
278 specific hook for this LU, a list of node names on which the hook
279 should run before the execution, and a list of node names on which
280 the hook should run after the execution.
282 The keys of the dict must not have 'GANETI_' prefixed as this will
283 be handled in the hooks runner. Also note additional keys will be
284 added by the hooks runner. If the LU doesn't define any
285 environment, an empty dict (and not None) should be returned.
287 No nodes should be returned as an empty list (and not None).
289 Note that if the HPATH for a LU class is None, this function will
293 raise NotImplementedError
295 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296 """Notify the LU about the results of its hooks.
298 This method is called every time a hooks phase is executed, and notifies
299 the Logical Unit about the hooks' result. The LU can then use it to alter
300 its result based on the hooks. By default the method does nothing and the
301 previous result is passed back unchanged but any LU can define it if it
302 wants to use the local cluster hook-scripts somehow.
304 @param phase: one of L{constants.HOOKS_PHASE_POST} or
305 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306 @param hook_results: the results of the multi-node hooks rpc call
307 @param feedback_fn: function used send feedback back to the caller
308 @param lu_result: the previous Exec result this LU had, or None
310 @return: the new Exec result, based on the previous result
314 # API must be kept, thus we ignore the unused argument and could
315 # be a function warnings
316 # pylint: disable-msg=W0613,R0201
319 def _ExpandAndLockInstance(self):
320 """Helper function to expand and lock an instance.
322 Many LUs that work on an instance take its name in self.op.instance_name
323 and need to expand it and then declare the expanded name for locking. This
324 function does it, and then updates self.op.instance_name to the expanded
325 name. It also initializes needed_locks as a dict, if this hasn't been done
329 if self.needed_locks is None:
330 self.needed_locks = {}
332 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333 "_ExpandAndLockInstance called with instance-level locks set"
334 self.op.instance_name = _ExpandInstanceName(self.cfg,
335 self.op.instance_name)
336 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
338 def _LockInstancesNodes(self, primary_only=False):
339 """Helper function to declare instances' nodes for locking.
341 This function should be called after locking one or more instances to lock
342 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343 with all primary or secondary nodes for instances already locked and
344 present in self.needed_locks[locking.LEVEL_INSTANCE].
346 It should be called from DeclareLocks, and for safety only works if
347 self.recalculate_locks[locking.LEVEL_NODE] is set.
349 In the future it may grow parameters to just lock some instance's nodes, or
350 to just lock primaries or secondary nodes, if needed.
352 If should be called in DeclareLocks in a way similar to::
354 if level == locking.LEVEL_NODE:
355 self._LockInstancesNodes()
357 @type primary_only: boolean
358 @param primary_only: only lock primary nodes of locked instances
361 assert locking.LEVEL_NODE in self.recalculate_locks, \
362 "_LockInstancesNodes helper function called with no nodes to recalculate"
364 # TODO: check if we're really been called with the instance locks held
366 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367 # future we might want to have different behaviors depending on the value
368 # of self.recalculate_locks[locking.LEVEL_NODE]
370 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371 instance = self.context.cfg.GetInstanceInfo(instance_name)
372 wanted_nodes.append(instance.primary_node)
374 wanted_nodes.extend(instance.secondary_nodes)
376 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
381 del self.recalculate_locks[locking.LEVEL_NODE]
384 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385 """Simple LU which runs no hooks.
387 This LU is intended as a parent for other LogicalUnits which will
388 run no hooks, in order to reduce duplicate code.
394 def BuildHooksEnv(self):
395 """Empty BuildHooksEnv for NoHooksLu.
397 This just raises an error.
400 assert False, "BuildHooksEnv called for NoHooksLUs"
404 """Tasklet base class.
406 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407 they can mix legacy code with tasklets. Locking needs to be done in the LU,
408 tasklets know nothing about locks.
410 Subclasses must follow these rules:
411 - Implement CheckPrereq
415 def __init__(self, lu):
422 def CheckPrereq(self):
423 """Check prerequisites for this tasklets.
425 This method should check whether the prerequisites for the execution of
426 this tasklet are fulfilled. It can do internode communication, but it
427 should be idempotent - no cluster or system changes are allowed.
429 The method should raise errors.OpPrereqError in case something is not
430 fulfilled. Its return value is ignored.
432 This method should also update all parameters to their canonical form if it
433 hasn't been done before.
438 def Exec(self, feedback_fn):
439 """Execute the tasklet.
441 This method should implement the actual work. It should raise
442 errors.OpExecError for failures that are somewhat dealt with in code, or
446 raise NotImplementedError
450 """Base for query utility classes.
453 #: Attribute holding field definitions
456 def __init__(self, names, fields, use_locking):
457 """Initializes this class.
461 self.use_locking = use_locking
463 self.query = query.Query(self.FIELDS, fields)
464 self.requested_data = self.query.RequestedData()
466 self.do_locking = None
469 def _GetNames(self, lu, all_names, lock_level):
470 """Helper function to determine names asked for in the query.
474 names = lu.acquired_locks[lock_level]
478 if self.wanted == locking.ALL_SET:
479 assert not self.names
480 # caller didn't specify names, so ordering is not important
481 return utils.NiceSort(names)
483 # caller specified names and we must keep the same order
485 assert not self.do_locking or lu.acquired_locks[lock_level]
487 missing = set(self.wanted).difference(names)
489 raise errors.OpExecError("Some items were removed before retrieving"
490 " their data: %s" % missing)
492 # Return expanded names
496 def FieldsQuery(cls, fields):
497 """Returns list of available fields.
499 @return: List of L{objects.QueryFieldDefinition}
502 return query.QueryFields(cls.FIELDS, fields)
504 def ExpandNames(self, lu):
505 """Expand names for this query.
507 See L{LogicalUnit.ExpandNames}.
510 raise NotImplementedError()
512 def DeclareLocks(self, lu, level):
513 """Declare locks for this query.
515 See L{LogicalUnit.DeclareLocks}.
518 raise NotImplementedError()
520 def _GetQueryData(self, lu):
521 """Collects all data for this query.
523 @return: Query data object
526 raise NotImplementedError()
528 def NewStyleQuery(self, lu):
529 """Collect data and execute query.
532 return query.GetQueryResponse(self.query, self._GetQueryData(lu))
534 def OldStyleQuery(self, lu):
535 """Collect data and execute query.
538 return self.query.OldStyleQuery(self._GetQueryData(lu))
541 def _GetWantedNodes(lu, nodes):
542 """Returns list of checked and expanded node names.
544 @type lu: L{LogicalUnit}
545 @param lu: the logical unit on whose behalf we execute
547 @param nodes: list of node names or None for all nodes
549 @return: the list of nodes, sorted
550 @raise errors.ProgrammerError: if the nodes parameter is wrong type
554 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
556 return utils.NiceSort(lu.cfg.GetNodeList())
559 def _GetWantedInstances(lu, instances):
560 """Returns list of checked and expanded instance names.
562 @type lu: L{LogicalUnit}
563 @param lu: the logical unit on whose behalf we execute
564 @type instances: list
565 @param instances: list of instance names or None for all instances
567 @return: the list of instances, sorted
568 @raise errors.OpPrereqError: if the instances parameter is wrong type
569 @raise errors.OpPrereqError: if any of the passed instances is not found
573 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
575 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
579 def _GetUpdatedParams(old_params, update_dict,
580 use_default=True, use_none=False):
581 """Return the new version of a parameter dictionary.
583 @type old_params: dict
584 @param old_params: old parameters
585 @type update_dict: dict
586 @param update_dict: dict containing new parameter values, or
587 constants.VALUE_DEFAULT to reset the parameter to its default
589 @param use_default: boolean
590 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
591 values as 'to be deleted' values
592 @param use_none: boolean
593 @type use_none: whether to recognise C{None} values as 'to be
596 @return: the new parameter dictionary
599 params_copy = copy.deepcopy(old_params)
600 for key, val in update_dict.iteritems():
601 if ((use_default and val == constants.VALUE_DEFAULT) or
602 (use_none and val is None)):
608 params_copy[key] = val
612 def _CheckOutputFields(static, dynamic, selected):
613 """Checks whether all selected fields are valid.
615 @type static: L{utils.FieldSet}
616 @param static: static fields set
617 @type dynamic: L{utils.FieldSet}
618 @param dynamic: dynamic fields set
625 delta = f.NonMatching(selected)
627 raise errors.OpPrereqError("Unknown output fields selected: %s"
628 % ",".join(delta), errors.ECODE_INVAL)
631 def _CheckGlobalHvParams(params):
632 """Validates that given hypervisor params are not global ones.
634 This will ensure that instances don't get customised versions of
638 used_globals = constants.HVC_GLOBALS.intersection(params)
640 msg = ("The following hypervisor parameters are global and cannot"
641 " be customized at instance level, please modify them at"
642 " cluster level: %s" % utils.CommaJoin(used_globals))
643 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
646 def _CheckNodeOnline(lu, node, msg=None):
647 """Ensure that a given node is online.
649 @param lu: the LU on behalf of which we make the check
650 @param node: the node to check
651 @param msg: if passed, should be a message to replace the default one
652 @raise errors.OpPrereqError: if the node is offline
656 msg = "Can't use offline node"
657 if lu.cfg.GetNodeInfo(node).offline:
658 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
661 def _CheckNodeNotDrained(lu, node):
662 """Ensure that a given node is not drained.
664 @param lu: the LU on behalf of which we make the check
665 @param node: the node to check
666 @raise errors.OpPrereqError: if the node is drained
669 if lu.cfg.GetNodeInfo(node).drained:
670 raise errors.OpPrereqError("Can't use drained node %s" % node,
674 def _CheckNodeVmCapable(lu, node):
675 """Ensure that a given node is vm capable.
677 @param lu: the LU on behalf of which we make the check
678 @param node: the node to check
679 @raise errors.OpPrereqError: if the node is not vm capable
682 if not lu.cfg.GetNodeInfo(node).vm_capable:
683 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
687 def _CheckNodeHasOS(lu, node, os_name, force_variant):
688 """Ensure that a node supports a given OS.
690 @param lu: the LU on behalf of which we make the check
691 @param node: the node to check
692 @param os_name: the OS to query about
693 @param force_variant: whether to ignore variant errors
694 @raise errors.OpPrereqError: if the node is not supporting the OS
697 result = lu.rpc.call_os_get(node, os_name)
698 result.Raise("OS '%s' not in supported OS list for node %s" %
700 prereq=True, ecode=errors.ECODE_INVAL)
701 if not force_variant:
702 _CheckOSVariant(result.payload, os_name)
705 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
706 """Ensure that a node has the given secondary ip.
708 @type lu: L{LogicalUnit}
709 @param lu: the LU on behalf of which we make the check
711 @param node: the node to check
712 @type secondary_ip: string
713 @param secondary_ip: the ip to check
714 @type prereq: boolean
715 @param prereq: whether to throw a prerequisite or an execute error
716 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
717 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
720 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
721 result.Raise("Failure checking secondary ip on node %s" % node,
722 prereq=prereq, ecode=errors.ECODE_ENVIRON)
723 if not result.payload:
724 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
725 " please fix and re-run this command" % secondary_ip)
727 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
729 raise errors.OpExecError(msg)
732 def _GetClusterDomainSecret():
733 """Reads the cluster domain secret.
736 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
740 def _CheckInstanceDown(lu, instance, reason):
741 """Ensure that an instance is not running."""
742 if instance.admin_up:
743 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
744 (instance.name, reason), errors.ECODE_STATE)
746 pnode = instance.primary_node
747 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
748 ins_l.Raise("Can't contact node %s for instance information" % pnode,
749 prereq=True, ecode=errors.ECODE_ENVIRON)
751 if instance.name in ins_l.payload:
752 raise errors.OpPrereqError("Instance %s is running, %s" %
753 (instance.name, reason), errors.ECODE_STATE)
756 def _ExpandItemName(fn, name, kind):
757 """Expand an item name.
759 @param fn: the function to use for expansion
760 @param name: requested item name
761 @param kind: text description ('Node' or 'Instance')
762 @return: the resolved (full) name
763 @raise errors.OpPrereqError: if the item is not found
767 if full_name is None:
768 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
773 def _ExpandNodeName(cfg, name):
774 """Wrapper over L{_ExpandItemName} for nodes."""
775 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
778 def _ExpandInstanceName(cfg, name):
779 """Wrapper over L{_ExpandItemName} for instance."""
780 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
783 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
784 memory, vcpus, nics, disk_template, disks,
785 bep, hvp, hypervisor_name):
786 """Builds instance related env variables for hooks
788 This builds the hook environment from individual variables.
791 @param name: the name of the instance
792 @type primary_node: string
793 @param primary_node: the name of the instance's primary node
794 @type secondary_nodes: list
795 @param secondary_nodes: list of secondary nodes as strings
796 @type os_type: string
797 @param os_type: the name of the instance's OS
798 @type status: boolean
799 @param status: the should_run status of the instance
801 @param memory: the memory size of the instance
803 @param vcpus: the count of VCPUs the instance has
805 @param nics: list of tuples (ip, mac, mode, link) representing
806 the NICs the instance has
807 @type disk_template: string
808 @param disk_template: the disk template of the instance
810 @param disks: the list of (size, mode) pairs
812 @param bep: the backend parameters for the instance
814 @param hvp: the hypervisor parameters for the instance
815 @type hypervisor_name: string
816 @param hypervisor_name: the hypervisor for the instance
818 @return: the hook environment for this instance
827 "INSTANCE_NAME": name,
828 "INSTANCE_PRIMARY": primary_node,
829 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
830 "INSTANCE_OS_TYPE": os_type,
831 "INSTANCE_STATUS": str_status,
832 "INSTANCE_MEMORY": memory,
833 "INSTANCE_VCPUS": vcpus,
834 "INSTANCE_DISK_TEMPLATE": disk_template,
835 "INSTANCE_HYPERVISOR": hypervisor_name,
839 nic_count = len(nics)
840 for idx, (ip, mac, mode, link) in enumerate(nics):
843 env["INSTANCE_NIC%d_IP" % idx] = ip
844 env["INSTANCE_NIC%d_MAC" % idx] = mac
845 env["INSTANCE_NIC%d_MODE" % idx] = mode
846 env["INSTANCE_NIC%d_LINK" % idx] = link
847 if mode == constants.NIC_MODE_BRIDGED:
848 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
852 env["INSTANCE_NIC_COUNT"] = nic_count
855 disk_count = len(disks)
856 for idx, (size, mode) in enumerate(disks):
857 env["INSTANCE_DISK%d_SIZE" % idx] = size
858 env["INSTANCE_DISK%d_MODE" % idx] = mode
862 env["INSTANCE_DISK_COUNT"] = disk_count
864 for source, kind in [(bep, "BE"), (hvp, "HV")]:
865 for key, value in source.items():
866 env["INSTANCE_%s_%s" % (kind, key)] = value
871 def _NICListToTuple(lu, nics):
872 """Build a list of nic information tuples.
874 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
875 value in LUInstanceQueryData.
877 @type lu: L{LogicalUnit}
878 @param lu: the logical unit on whose behalf we execute
879 @type nics: list of L{objects.NIC}
880 @param nics: list of nics to convert to hooks tuples
884 cluster = lu.cfg.GetClusterInfo()
888 filled_params = cluster.SimpleFillNIC(nic.nicparams)
889 mode = filled_params[constants.NIC_MODE]
890 link = filled_params[constants.NIC_LINK]
891 hooks_nics.append((ip, mac, mode, link))
895 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
896 """Builds instance related env variables for hooks from an object.
898 @type lu: L{LogicalUnit}
899 @param lu: the logical unit on whose behalf we execute
900 @type instance: L{objects.Instance}
901 @param instance: the instance for which we should build the
904 @param override: dictionary with key/values that will override
907 @return: the hook environment dictionary
910 cluster = lu.cfg.GetClusterInfo()
911 bep = cluster.FillBE(instance)
912 hvp = cluster.FillHV(instance)
914 'name': instance.name,
915 'primary_node': instance.primary_node,
916 'secondary_nodes': instance.secondary_nodes,
917 'os_type': instance.os,
918 'status': instance.admin_up,
919 'memory': bep[constants.BE_MEMORY],
920 'vcpus': bep[constants.BE_VCPUS],
921 'nics': _NICListToTuple(lu, instance.nics),
922 'disk_template': instance.disk_template,
923 'disks': [(disk.size, disk.mode) for disk in instance.disks],
926 'hypervisor_name': instance.hypervisor,
929 args.update(override)
930 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
933 def _AdjustCandidatePool(lu, exceptions):
934 """Adjust the candidate pool after node operations.
937 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
939 lu.LogInfo("Promoted nodes to master candidate role: %s",
940 utils.CommaJoin(node.name for node in mod_list))
941 for name in mod_list:
942 lu.context.ReaddNode(name)
943 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
945 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
949 def _DecideSelfPromotion(lu, exceptions=None):
950 """Decide whether I should promote myself as a master candidate.
953 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
954 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
955 # the new node will increase mc_max with one, so:
956 mc_should = min(mc_should + 1, cp_size)
957 return mc_now < mc_should
960 def _CheckNicsBridgesExist(lu, target_nics, target_node):
961 """Check that the brigdes needed by a list of nics exist.
964 cluster = lu.cfg.GetClusterInfo()
965 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
966 brlist = [params[constants.NIC_LINK] for params in paramslist
967 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
969 result = lu.rpc.call_bridges_exist(target_node, brlist)
970 result.Raise("Error checking bridges on destination node '%s'" %
971 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
974 def _CheckInstanceBridgesExist(lu, instance, node=None):
975 """Check that the brigdes needed by an instance exist.
979 node = instance.primary_node
980 _CheckNicsBridgesExist(lu, instance.nics, node)
983 def _CheckOSVariant(os_obj, name):
984 """Check whether an OS name conforms to the os variants specification.
986 @type os_obj: L{objects.OS}
987 @param os_obj: OS object to check
989 @param name: OS name passed by the user, to check for validity
992 if not os_obj.supported_variants:
994 variant = objects.OS.GetVariant(name)
996 raise errors.OpPrereqError("OS name must include a variant",
999 if variant not in os_obj.supported_variants:
1000 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1003 def _GetNodeInstancesInner(cfg, fn):
1004 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1007 def _GetNodeInstances(cfg, node_name):
1008 """Returns a list of all primary and secondary instances on a node.
1012 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1015 def _GetNodePrimaryInstances(cfg, node_name):
1016 """Returns primary instances on a node.
1019 return _GetNodeInstancesInner(cfg,
1020 lambda inst: node_name == inst.primary_node)
1023 def _GetNodeSecondaryInstances(cfg, node_name):
1024 """Returns secondary instances on a node.
1027 return _GetNodeInstancesInner(cfg,
1028 lambda inst: node_name in inst.secondary_nodes)
1031 def _GetStorageTypeArgs(cfg, storage_type):
1032 """Returns the arguments for a storage type.
1035 # Special case for file storage
1036 if storage_type == constants.ST_FILE:
1037 # storage.FileStorage wants a list of storage directories
1038 return [[cfg.GetFileStorageDir()]]
1043 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1046 for dev in instance.disks:
1047 cfg.SetDiskID(dev, node_name)
1049 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1050 result.Raise("Failed to get disk status from node %s" % node_name,
1051 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1053 for idx, bdev_status in enumerate(result.payload):
1054 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1060 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1061 """Check the sanity of iallocator and node arguments and use the
1062 cluster-wide iallocator if appropriate.
1064 Check that at most one of (iallocator, node) is specified. If none is
1065 specified, then the LU's opcode's iallocator slot is filled with the
1066 cluster-wide default iallocator.
1068 @type iallocator_slot: string
1069 @param iallocator_slot: the name of the opcode iallocator slot
1070 @type node_slot: string
1071 @param node_slot: the name of the opcode target node slot
1074 node = getattr(lu.op, node_slot, None)
1075 iallocator = getattr(lu.op, iallocator_slot, None)
1077 if node is not None and iallocator is not None:
1078 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1080 elif node is None and iallocator is None:
1081 default_iallocator = lu.cfg.GetDefaultIAllocator()
1082 if default_iallocator:
1083 setattr(lu.op, iallocator_slot, default_iallocator)
1085 raise errors.OpPrereqError("No iallocator or node given and no"
1086 " cluster-wide default iallocator found."
1087 " Please specify either an iallocator or a"
1088 " node, or set a cluster-wide default"
1092 class LUClusterPostInit(LogicalUnit):
1093 """Logical unit for running hooks after cluster initialization.
1096 HPATH = "cluster-init"
1097 HTYPE = constants.HTYPE_CLUSTER
1099 def BuildHooksEnv(self):
1103 env = {"OP_TARGET": self.cfg.GetClusterName()}
1104 mn = self.cfg.GetMasterNode()
1105 return env, [], [mn]
1107 def Exec(self, feedback_fn):
1114 class LUClusterDestroy(LogicalUnit):
1115 """Logical unit for destroying the cluster.
1118 HPATH = "cluster-destroy"
1119 HTYPE = constants.HTYPE_CLUSTER
1121 def BuildHooksEnv(self):
1125 env = {"OP_TARGET": self.cfg.GetClusterName()}
1128 def CheckPrereq(self):
1129 """Check prerequisites.
1131 This checks whether the cluster is empty.
1133 Any errors are signaled by raising errors.OpPrereqError.
1136 master = self.cfg.GetMasterNode()
1138 nodelist = self.cfg.GetNodeList()
1139 if len(nodelist) != 1 or nodelist[0] != master:
1140 raise errors.OpPrereqError("There are still %d node(s) in"
1141 " this cluster." % (len(nodelist) - 1),
1143 instancelist = self.cfg.GetInstanceList()
1145 raise errors.OpPrereqError("There are still %d instance(s) in"
1146 " this cluster." % len(instancelist),
1149 def Exec(self, feedback_fn):
1150 """Destroys the cluster.
1153 master = self.cfg.GetMasterNode()
1155 # Run post hooks on master node before it's removed
1156 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1158 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1160 # pylint: disable-msg=W0702
1161 self.LogWarning("Errors occurred running hooks on %s" % master)
1163 result = self.rpc.call_node_stop_master(master, False)
1164 result.Raise("Could not disable the master role")
1169 def _VerifyCertificate(filename):
1170 """Verifies a certificate for LUClusterVerify.
1172 @type filename: string
1173 @param filename: Path to PEM file
1177 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1178 utils.ReadFile(filename))
1179 except Exception, err: # pylint: disable-msg=W0703
1180 return (LUClusterVerify.ETYPE_ERROR,
1181 "Failed to load X509 certificate %s: %s" % (filename, err))
1184 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1185 constants.SSL_CERT_EXPIRATION_ERROR)
1188 fnamemsg = "While verifying %s: %s" % (filename, msg)
1193 return (None, fnamemsg)
1194 elif errcode == utils.CERT_WARNING:
1195 return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1196 elif errcode == utils.CERT_ERROR:
1197 return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1199 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1202 class LUClusterVerify(LogicalUnit):
1203 """Verifies the cluster status.
1206 HPATH = "cluster-verify"
1207 HTYPE = constants.HTYPE_CLUSTER
1210 TCLUSTER = "cluster"
1212 TINSTANCE = "instance"
1214 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1215 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1216 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1217 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1218 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1219 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1220 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1221 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1222 EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1223 ENODEDRBD = (TNODE, "ENODEDRBD")
1224 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1225 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1226 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1227 ENODEHV = (TNODE, "ENODEHV")
1228 ENODELVM = (TNODE, "ENODELVM")
1229 ENODEN1 = (TNODE, "ENODEN1")
1230 ENODENET = (TNODE, "ENODENET")
1231 ENODEOS = (TNODE, "ENODEOS")
1232 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1233 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1234 ENODERPC = (TNODE, "ENODERPC")
1235 ENODESSH = (TNODE, "ENODESSH")
1236 ENODEVERSION = (TNODE, "ENODEVERSION")
1237 ENODESETUP = (TNODE, "ENODESETUP")
1238 ENODETIME = (TNODE, "ENODETIME")
1239 ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1241 ETYPE_FIELD = "code"
1242 ETYPE_ERROR = "ERROR"
1243 ETYPE_WARNING = "WARNING"
1245 _HOOKS_INDENT_RE = re.compile("^", re.M)
1247 class NodeImage(object):
1248 """A class representing the logical and physical status of a node.
1251 @ivar name: the node name to which this object refers
1252 @ivar volumes: a structure as returned from
1253 L{ganeti.backend.GetVolumeList} (runtime)
1254 @ivar instances: a list of running instances (runtime)
1255 @ivar pinst: list of configured primary instances (config)
1256 @ivar sinst: list of configured secondary instances (config)
1257 @ivar sbp: dictionary of {primary-node: list of instances} for all
1258 instances for which this node is secondary (config)
1259 @ivar mfree: free memory, as reported by hypervisor (runtime)
1260 @ivar dfree: free disk, as reported by the node (runtime)
1261 @ivar offline: the offline status (config)
1262 @type rpc_fail: boolean
1263 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1264 not whether the individual keys were correct) (runtime)
1265 @type lvm_fail: boolean
1266 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1267 @type hyp_fail: boolean
1268 @ivar hyp_fail: whether the RPC call didn't return the instance list
1269 @type ghost: boolean
1270 @ivar ghost: whether this is a known node or not (config)
1271 @type os_fail: boolean
1272 @ivar os_fail: whether the RPC call didn't return valid OS data
1274 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1275 @type vm_capable: boolean
1276 @ivar vm_capable: whether the node can host instances
1279 def __init__(self, offline=False, name=None, vm_capable=True):
1288 self.offline = offline
1289 self.vm_capable = vm_capable
1290 self.rpc_fail = False
1291 self.lvm_fail = False
1292 self.hyp_fail = False
1294 self.os_fail = False
1297 def ExpandNames(self):
1298 self.needed_locks = {
1299 locking.LEVEL_NODE: locking.ALL_SET,
1300 locking.LEVEL_INSTANCE: locking.ALL_SET,
1302 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1304 def _Error(self, ecode, item, msg, *args, **kwargs):
1305 """Format an error message.
1307 Based on the opcode's error_codes parameter, either format a
1308 parseable error code, or a simpler error string.
1310 This must be called only from Exec and functions called from Exec.
1313 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1315 # first complete the msg
1318 # then format the whole message
1319 if self.op.error_codes:
1320 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1326 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1327 # and finally report it via the feedback_fn
1328 self._feedback_fn(" - %s" % msg)
1330 def _ErrorIf(self, cond, *args, **kwargs):
1331 """Log an error message if the passed condition is True.
1334 cond = bool(cond) or self.op.debug_simulate_errors
1336 self._Error(*args, **kwargs)
1337 # do not mark the operation as failed for WARN cases only
1338 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1339 self.bad = self.bad or cond
1341 def _VerifyNode(self, ninfo, nresult):
1342 """Perform some basic validation on data returned from a node.
1344 - check the result data structure is well formed and has all the
1346 - check ganeti version
1348 @type ninfo: L{objects.Node}
1349 @param ninfo: the node to check
1350 @param nresult: the results from the node
1352 @return: whether overall this call was successful (and we can expect
1353 reasonable values in the respose)
1357 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1359 # main result, nresult should be a non-empty dict
1360 test = not nresult or not isinstance(nresult, dict)
1361 _ErrorIf(test, self.ENODERPC, node,
1362 "unable to verify node: no data returned")
1366 # compares ganeti version
1367 local_version = constants.PROTOCOL_VERSION
1368 remote_version = nresult.get("version", None)
1369 test = not (remote_version and
1370 isinstance(remote_version, (list, tuple)) and
1371 len(remote_version) == 2)
1372 _ErrorIf(test, self.ENODERPC, node,
1373 "connection to node returned invalid data")
1377 test = local_version != remote_version[0]
1378 _ErrorIf(test, self.ENODEVERSION, node,
1379 "incompatible protocol versions: master %s,"
1380 " node %s", local_version, remote_version[0])
1384 # node seems compatible, we can actually try to look into its results
1386 # full package version
1387 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1388 self.ENODEVERSION, node,
1389 "software version mismatch: master %s, node %s",
1390 constants.RELEASE_VERSION, remote_version[1],
1391 code=self.ETYPE_WARNING)
1393 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1394 if ninfo.vm_capable and isinstance(hyp_result, dict):
1395 for hv_name, hv_result in hyp_result.iteritems():
1396 test = hv_result is not None
1397 _ErrorIf(test, self.ENODEHV, node,
1398 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1400 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1401 if ninfo.vm_capable and isinstance(hvp_result, list):
1402 for item, hv_name, hv_result in hvp_result:
1403 _ErrorIf(True, self.ENODEHV, node,
1404 "hypervisor %s parameter verify failure (source %s): %s",
1405 hv_name, item, hv_result)
1407 test = nresult.get(constants.NV_NODESETUP,
1408 ["Missing NODESETUP results"])
1409 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1414 def _VerifyNodeTime(self, ninfo, nresult,
1415 nvinfo_starttime, nvinfo_endtime):
1416 """Check the node time.
1418 @type ninfo: L{objects.Node}
1419 @param ninfo: the node to check
1420 @param nresult: the remote results for the node
1421 @param nvinfo_starttime: the start time of the RPC call
1422 @param nvinfo_endtime: the end time of the RPC call
1426 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1428 ntime = nresult.get(constants.NV_TIME, None)
1430 ntime_merged = utils.MergeTime(ntime)
1431 except (ValueError, TypeError):
1432 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1435 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1436 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1437 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1438 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1442 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1443 "Node time diverges by at least %s from master node time",
1446 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1447 """Check the node time.
1449 @type ninfo: L{objects.Node}
1450 @param ninfo: the node to check
1451 @param nresult: the remote results for the node
1452 @param vg_name: the configured VG name
1459 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1461 # checks vg existence and size > 20G
1462 vglist = nresult.get(constants.NV_VGLIST, None)
1464 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1466 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1467 constants.MIN_VG_SIZE)
1468 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1471 pvlist = nresult.get(constants.NV_PVLIST, None)
1472 test = pvlist is None
1473 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1475 # check that ':' is not present in PV names, since it's a
1476 # special character for lvcreate (denotes the range of PEs to
1478 for _, pvname, owner_vg in pvlist:
1479 test = ":" in pvname
1480 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1481 " '%s' of VG '%s'", pvname, owner_vg)
1483 def _VerifyNodeNetwork(self, ninfo, nresult):
1484 """Check the node time.
1486 @type ninfo: L{objects.Node}
1487 @param ninfo: the node to check
1488 @param nresult: the remote results for the node
1492 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1494 test = constants.NV_NODELIST not in nresult
1495 _ErrorIf(test, self.ENODESSH, node,
1496 "node hasn't returned node ssh connectivity data")
1498 if nresult[constants.NV_NODELIST]:
1499 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1500 _ErrorIf(True, self.ENODESSH, node,
1501 "ssh communication with node '%s': %s", a_node, a_msg)
1503 test = constants.NV_NODENETTEST not in nresult
1504 _ErrorIf(test, self.ENODENET, node,
1505 "node hasn't returned node tcp connectivity data")
1507 if nresult[constants.NV_NODENETTEST]:
1508 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1510 _ErrorIf(True, self.ENODENET, node,
1511 "tcp communication with node '%s': %s",
1512 anode, nresult[constants.NV_NODENETTEST][anode])
1514 test = constants.NV_MASTERIP not in nresult
1515 _ErrorIf(test, self.ENODENET, node,
1516 "node hasn't returned node master IP reachability data")
1518 if not nresult[constants.NV_MASTERIP]:
1519 if node == self.master_node:
1520 msg = "the master node cannot reach the master IP (not configured?)"
1522 msg = "cannot reach the master IP"
1523 _ErrorIf(True, self.ENODENET, node, msg)
1525 def _VerifyInstance(self, instance, instanceconfig, node_image,
1527 """Verify an instance.
1529 This function checks to see if the required block devices are
1530 available on the instance's node.
1533 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1534 node_current = instanceconfig.primary_node
1536 node_vol_should = {}
1537 instanceconfig.MapLVsByNode(node_vol_should)
1539 for node in node_vol_should:
1540 n_img = node_image[node]
1541 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1542 # ignore missing volumes on offline or broken nodes
1544 for volume in node_vol_should[node]:
1545 test = volume not in n_img.volumes
1546 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1547 "volume %s missing on node %s", volume, node)
1549 if instanceconfig.admin_up:
1550 pri_img = node_image[node_current]
1551 test = instance not in pri_img.instances and not pri_img.offline
1552 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1553 "instance not running on its primary node %s",
1556 for node, n_img in node_image.items():
1557 if (not node == node_current):
1558 test = instance in n_img.instances
1559 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1560 "instance should not run on node %s", node)
1562 diskdata = [(nname, success, status, idx)
1563 for (nname, disks) in diskstatus.items()
1564 for idx, (success, status) in enumerate(disks)]
1566 for nname, success, bdev_status, idx in diskdata:
1567 _ErrorIf(instanceconfig.admin_up and not success,
1568 self.EINSTANCEFAULTYDISK, instance,
1569 "couldn't retrieve status for disk/%s on %s: %s",
1570 idx, nname, bdev_status)
1571 _ErrorIf((instanceconfig.admin_up and success and
1572 bdev_status.ldisk_status == constants.LDS_FAULTY),
1573 self.EINSTANCEFAULTYDISK, instance,
1574 "disk/%s on %s is faulty", idx, nname)
1576 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1577 """Verify if there are any unknown volumes in the cluster.
1579 The .os, .swap and backup volumes are ignored. All other volumes are
1580 reported as unknown.
1582 @type reserved: L{ganeti.utils.FieldSet}
1583 @param reserved: a FieldSet of reserved volume names
1586 for node, n_img in node_image.items():
1587 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1588 # skip non-healthy nodes
1590 for volume in n_img.volumes:
1591 test = ((node not in node_vol_should or
1592 volume not in node_vol_should[node]) and
1593 not reserved.Matches(volume))
1594 self._ErrorIf(test, self.ENODEORPHANLV, node,
1595 "volume %s is unknown", volume)
1597 def _VerifyOrphanInstances(self, instancelist, node_image):
1598 """Verify the list of running instances.
1600 This checks what instances are running but unknown to the cluster.
1603 for node, n_img in node_image.items():
1604 for o_inst in n_img.instances:
1605 test = o_inst not in instancelist
1606 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1607 "instance %s on node %s should not exist", o_inst, node)
1609 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1610 """Verify N+1 Memory Resilience.
1612 Check that if one single node dies we can still start all the
1613 instances it was primary for.
1616 cluster_info = self.cfg.GetClusterInfo()
1617 for node, n_img in node_image.items():
1618 # This code checks that every node which is now listed as
1619 # secondary has enough memory to host all instances it is
1620 # supposed to should a single other node in the cluster fail.
1621 # FIXME: not ready for failover to an arbitrary node
1622 # FIXME: does not support file-backed instances
1623 # WARNING: we currently take into account down instances as well
1624 # as up ones, considering that even if they're down someone
1625 # might want to start them even in the event of a node failure.
1626 for prinode, instances in n_img.sbp.items():
1628 for instance in instances:
1629 bep = cluster_info.FillBE(instance_cfg[instance])
1630 if bep[constants.BE_AUTO_BALANCE]:
1631 needed_mem += bep[constants.BE_MEMORY]
1632 test = n_img.mfree < needed_mem
1633 self._ErrorIf(test, self.ENODEN1, node,
1634 "not enough memory to accomodate instance failovers"
1635 " should node %s fail", prinode)
1637 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1639 """Verifies and computes the node required file checksums.
1641 @type ninfo: L{objects.Node}
1642 @param ninfo: the node to check
1643 @param nresult: the remote results for the node
1644 @param file_list: required list of files
1645 @param local_cksum: dictionary of local files and their checksums
1646 @param master_files: list of files that only masters should have
1650 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1652 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1653 test = not isinstance(remote_cksum, dict)
1654 _ErrorIf(test, self.ENODEFILECHECK, node,
1655 "node hasn't returned file checksum data")
1659 for file_name in file_list:
1660 node_is_mc = ninfo.master_candidate
1661 must_have = (file_name not in master_files) or node_is_mc
1663 test1 = file_name not in remote_cksum
1665 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1667 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1668 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1669 "file '%s' missing", file_name)
1670 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1671 "file '%s' has wrong checksum", file_name)
1672 # not candidate and this is not a must-have file
1673 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1674 "file '%s' should not exist on non master"
1675 " candidates (and the file is outdated)", file_name)
1676 # all good, except non-master/non-must have combination
1677 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1678 "file '%s' should not exist"
1679 " on non master candidates", file_name)
1681 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1683 """Verifies and the node DRBD status.
1685 @type ninfo: L{objects.Node}
1686 @param ninfo: the node to check
1687 @param nresult: the remote results for the node
1688 @param instanceinfo: the dict of instances
1689 @param drbd_helper: the configured DRBD usermode helper
1690 @param drbd_map: the DRBD map as returned by
1691 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1695 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1698 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1699 test = (helper_result == None)
1700 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1701 "no drbd usermode helper returned")
1703 status, payload = helper_result
1705 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1706 "drbd usermode helper check unsuccessful: %s", payload)
1707 test = status and (payload != drbd_helper)
1708 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1709 "wrong drbd usermode helper: %s", payload)
1711 # compute the DRBD minors
1713 for minor, instance in drbd_map[node].items():
1714 test = instance not in instanceinfo
1715 _ErrorIf(test, self.ECLUSTERCFG, None,
1716 "ghost instance '%s' in temporary DRBD map", instance)
1717 # ghost instance should not be running, but otherwise we
1718 # don't give double warnings (both ghost instance and
1719 # unallocated minor in use)
1721 node_drbd[minor] = (instance, False)
1723 instance = instanceinfo[instance]
1724 node_drbd[minor] = (instance.name, instance.admin_up)
1726 # and now check them
1727 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1728 test = not isinstance(used_minors, (tuple, list))
1729 _ErrorIf(test, self.ENODEDRBD, node,
1730 "cannot parse drbd status file: %s", str(used_minors))
1732 # we cannot check drbd status
1735 for minor, (iname, must_exist) in node_drbd.items():
1736 test = minor not in used_minors and must_exist
1737 _ErrorIf(test, self.ENODEDRBD, node,
1738 "drbd minor %d of instance %s is not active", minor, iname)
1739 for minor in used_minors:
1740 test = minor not in node_drbd
1741 _ErrorIf(test, self.ENODEDRBD, node,
1742 "unallocated drbd minor %d is in use", minor)
1744 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1745 """Builds the node OS structures.
1747 @type ninfo: L{objects.Node}
1748 @param ninfo: the node to check
1749 @param nresult: the remote results for the node
1750 @param nimg: the node image object
1754 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1756 remote_os = nresult.get(constants.NV_OSLIST, None)
1757 test = (not isinstance(remote_os, list) or
1758 not compat.all(isinstance(v, list) and len(v) == 7
1759 for v in remote_os))
1761 _ErrorIf(test, self.ENODEOS, node,
1762 "node hasn't returned valid OS data")
1771 for (name, os_path, status, diagnose,
1772 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1774 if name not in os_dict:
1777 # parameters is a list of lists instead of list of tuples due to
1778 # JSON lacking a real tuple type, fix it:
1779 parameters = [tuple(v) for v in parameters]
1780 os_dict[name].append((os_path, status, diagnose,
1781 set(variants), set(parameters), set(api_ver)))
1783 nimg.oslist = os_dict
1785 def _VerifyNodeOS(self, ninfo, nimg, base):
1786 """Verifies the node OS list.
1788 @type ninfo: L{objects.Node}
1789 @param ninfo: the node to check
1790 @param nimg: the node image object
1791 @param base: the 'template' node we match against (e.g. from the master)
1795 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1797 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1799 for os_name, os_data in nimg.oslist.items():
1800 assert os_data, "Empty OS status for OS %s?!" % os_name
1801 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1802 _ErrorIf(not f_status, self.ENODEOS, node,
1803 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1804 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1805 "OS '%s' has multiple entries (first one shadows the rest): %s",
1806 os_name, utils.CommaJoin([v[0] for v in os_data]))
1807 # this will catched in backend too
1808 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1809 and not f_var, self.ENODEOS, node,
1810 "OS %s with API at least %d does not declare any variant",
1811 os_name, constants.OS_API_V15)
1812 # comparisons with the 'base' image
1813 test = os_name not in base.oslist
1814 _ErrorIf(test, self.ENODEOS, node,
1815 "Extra OS %s not present on reference node (%s)",
1819 assert base.oslist[os_name], "Base node has empty OS status?"
1820 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1822 # base OS is invalid, skipping
1824 for kind, a, b in [("API version", f_api, b_api),
1825 ("variants list", f_var, b_var),
1826 ("parameters", f_param, b_param)]:
1827 _ErrorIf(a != b, self.ENODEOS, node,
1828 "OS %s %s differs from reference node %s: %s vs. %s",
1829 kind, os_name, base.name,
1830 utils.CommaJoin(a), utils.CommaJoin(b))
1832 # check any missing OSes
1833 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1834 _ErrorIf(missing, self.ENODEOS, node,
1835 "OSes present on reference node %s but missing on this node: %s",
1836 base.name, utils.CommaJoin(missing))
1838 def _VerifyOob(self, ninfo, nresult):
1839 """Verifies out of band functionality of a node.
1841 @type ninfo: L{objects.Node}
1842 @param ninfo: the node to check
1843 @param nresult: the remote results for the node
1847 # We just have to verify the paths on master and/or master candidates
1848 # as the oob helper is invoked on the master
1849 if ((ninfo.master_candidate or ninfo.master_capable) and
1850 constants.NV_OOB_PATHS in nresult):
1851 for path_result in nresult[constants.NV_OOB_PATHS]:
1852 self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1854 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1855 """Verifies and updates the node volume data.
1857 This function will update a L{NodeImage}'s internal structures
1858 with data from the remote call.
1860 @type ninfo: L{objects.Node}
1861 @param ninfo: the node to check
1862 @param nresult: the remote results for the node
1863 @param nimg: the node image object
1864 @param vg_name: the configured VG name
1868 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1870 nimg.lvm_fail = True
1871 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1874 elif isinstance(lvdata, basestring):
1875 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1876 utils.SafeEncode(lvdata))
1877 elif not isinstance(lvdata, dict):
1878 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1880 nimg.volumes = lvdata
1881 nimg.lvm_fail = False
1883 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1884 """Verifies and updates the node instance list.
1886 If the listing was successful, then updates this node's instance
1887 list. Otherwise, it marks the RPC call as failed for the instance
1890 @type ninfo: L{objects.Node}
1891 @param ninfo: the node to check
1892 @param nresult: the remote results for the node
1893 @param nimg: the node image object
1896 idata = nresult.get(constants.NV_INSTANCELIST, None)
1897 test = not isinstance(idata, list)
1898 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1899 " (instancelist): %s", utils.SafeEncode(str(idata)))
1901 nimg.hyp_fail = True
1903 nimg.instances = idata
1905 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1906 """Verifies and computes a node information map
1908 @type ninfo: L{objects.Node}
1909 @param ninfo: the node to check
1910 @param nresult: the remote results for the node
1911 @param nimg: the node image object
1912 @param vg_name: the configured VG name
1916 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1918 # try to read free memory (from the hypervisor)
1919 hv_info = nresult.get(constants.NV_HVINFO, None)
1920 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1921 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1924 nimg.mfree = int(hv_info["memory_free"])
1925 except (ValueError, TypeError):
1926 _ErrorIf(True, self.ENODERPC, node,
1927 "node returned invalid nodeinfo, check hypervisor")
1929 # FIXME: devise a free space model for file based instances as well
1930 if vg_name is not None:
1931 test = (constants.NV_VGLIST not in nresult or
1932 vg_name not in nresult[constants.NV_VGLIST])
1933 _ErrorIf(test, self.ENODELVM, node,
1934 "node didn't return data for the volume group '%s'"
1935 " - it is either missing or broken", vg_name)
1938 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1939 except (ValueError, TypeError):
1940 _ErrorIf(True, self.ENODERPC, node,
1941 "node returned invalid LVM info, check LVM status")
1943 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1944 """Gets per-disk status information for all instances.
1946 @type nodelist: list of strings
1947 @param nodelist: Node names
1948 @type node_image: dict of (name, L{objects.Node})
1949 @param node_image: Node objects
1950 @type instanceinfo: dict of (name, L{objects.Instance})
1951 @param instanceinfo: Instance objects
1952 @rtype: {instance: {node: [(succes, payload)]}}
1953 @return: a dictionary of per-instance dictionaries with nodes as
1954 keys and disk information as values; the disk information is a
1955 list of tuples (success, payload)
1958 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1961 node_disks_devonly = {}
1962 diskless_instances = set()
1963 diskless = constants.DT_DISKLESS
1965 for nname in nodelist:
1966 node_instances = list(itertools.chain(node_image[nname].pinst,
1967 node_image[nname].sinst))
1968 diskless_instances.update(inst for inst in node_instances
1969 if instanceinfo[inst].disk_template == diskless)
1970 disks = [(inst, disk)
1971 for inst in node_instances
1972 for disk in instanceinfo[inst].disks]
1975 # No need to collect data
1978 node_disks[nname] = disks
1980 # Creating copies as SetDiskID below will modify the objects and that can
1981 # lead to incorrect data returned from nodes
1982 devonly = [dev.Copy() for (_, dev) in disks]
1985 self.cfg.SetDiskID(dev, nname)
1987 node_disks_devonly[nname] = devonly
1989 assert len(node_disks) == len(node_disks_devonly)
1991 # Collect data from all nodes with disks
1992 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
1995 assert len(result) == len(node_disks)
1999 for (nname, nres) in result.items():
2000 disks = node_disks[nname]
2003 # No data from this node
2004 data = len(disks) * [(False, "node offline")]
2007 _ErrorIf(msg, self.ENODERPC, nname,
2008 "while getting disk information: %s", msg)
2010 # No data from this node
2011 data = len(disks) * [(False, msg)]
2014 for idx, i in enumerate(nres.payload):
2015 if isinstance(i, (tuple, list)) and len(i) == 2:
2018 logging.warning("Invalid result from node %s, entry %d: %s",
2020 data.append((False, "Invalid result from the remote node"))
2022 for ((inst, _), status) in zip(disks, data):
2023 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2025 # Add empty entries for diskless instances.
2026 for inst in diskless_instances:
2027 assert inst not in instdisk
2030 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2031 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2032 compat.all(isinstance(s, (tuple, list)) and
2033 len(s) == 2 for s in statuses)
2034 for inst, nnames in instdisk.items()
2035 for nname, statuses in nnames.items())
2036 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2040 def _VerifyHVP(self, hvp_data):
2041 """Verifies locally the syntax of the hypervisor parameters.
2044 for item, hv_name, hv_params in hvp_data:
2045 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2048 hv_class = hypervisor.GetHypervisor(hv_name)
2049 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2050 hv_class.CheckParameterSyntax(hv_params)
2051 except errors.GenericError, err:
2052 self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2055 def BuildHooksEnv(self):
2058 Cluster-Verify hooks just ran in the post phase and their failure makes
2059 the output be logged in the verify output and the verification to fail.
2062 all_nodes = self.cfg.GetNodeList()
2064 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2066 for node in self.cfg.GetAllNodesInfo().values():
2067 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2069 return env, [], all_nodes
2071 def Exec(self, feedback_fn):
2072 """Verify integrity of cluster, performing various test on nodes.
2075 # This method has too many local variables. pylint: disable-msg=R0914
2077 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2078 verbose = self.op.verbose
2079 self._feedback_fn = feedback_fn
2080 feedback_fn("* Verifying global settings")
2081 for msg in self.cfg.VerifyConfig():
2082 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2084 # Check the cluster certificates
2085 for cert_filename in constants.ALL_CERT_FILES:
2086 (errcode, msg) = _VerifyCertificate(cert_filename)
2087 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2089 vg_name = self.cfg.GetVGName()
2090 drbd_helper = self.cfg.GetDRBDHelper()
2091 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2092 cluster = self.cfg.GetClusterInfo()
2093 nodelist = utils.NiceSort(self.cfg.GetNodeList())
2094 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2095 nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2096 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2097 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2098 for iname in instancelist)
2099 groupinfo = self.cfg.GetAllNodeGroupsInfo()
2100 i_non_redundant = [] # Non redundant instances
2101 i_non_a_balanced = [] # Non auto-balanced instances
2102 n_offline = 0 # Count of offline nodes
2103 n_drained = 0 # Count of nodes being drained
2104 node_vol_should = {}
2106 # FIXME: verify OS list
2107 # do local checksums
2108 master_files = [constants.CLUSTER_CONF_FILE]
2109 master_node = self.master_node = self.cfg.GetMasterNode()
2110 master_ip = self.cfg.GetMasterIP()
2112 file_names = ssconf.SimpleStore().GetFileList()
2113 file_names.extend(constants.ALL_CERT_FILES)
2114 file_names.extend(master_files)
2115 if cluster.modify_etc_hosts:
2116 file_names.append(constants.ETC_HOSTS)
2118 local_checksums = utils.FingerprintFiles(file_names)
2120 # Compute the set of hypervisor parameters
2122 for hv_name in hypervisors:
2123 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2124 for os_name, os_hvp in cluster.os_hvp.items():
2125 for hv_name, hv_params in os_hvp.items():
2128 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2129 hvp_data.append(("os %s" % os_name, hv_name, full_params))
2130 # TODO: collapse identical parameter values in a single one
2131 for instance in instanceinfo.values():
2132 if not instance.hvparams:
2134 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2135 cluster.FillHV(instance)))
2136 # and verify them locally
2137 self._VerifyHVP(hvp_data)
2139 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2140 node_verify_param = {
2141 constants.NV_FILELIST: file_names,
2142 constants.NV_NODELIST: [node.name for node in nodeinfo
2143 if not node.offline],
2144 constants.NV_HYPERVISOR: hypervisors,
2145 constants.NV_HVPARAMS: hvp_data,
2146 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2147 node.secondary_ip) for node in nodeinfo
2148 if not node.offline],
2149 constants.NV_INSTANCELIST: hypervisors,
2150 constants.NV_VERSION: None,
2151 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2152 constants.NV_NODESETUP: None,
2153 constants.NV_TIME: None,
2154 constants.NV_MASTERIP: (master_node, master_ip),
2155 constants.NV_OSLIST: None,
2156 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2159 if vg_name is not None:
2160 node_verify_param[constants.NV_VGLIST] = None
2161 node_verify_param[constants.NV_LVLIST] = vg_name
2162 node_verify_param[constants.NV_PVLIST] = [vg_name]
2163 node_verify_param[constants.NV_DRBDLIST] = None
2166 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2168 # Build our expected cluster state
2169 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2171 vm_capable=node.vm_capable))
2172 for node in nodeinfo)
2176 for node in nodeinfo:
2177 path = _SupportsOob(self.cfg, node)
2178 if path and path not in oob_paths:
2179 oob_paths.append(path)
2182 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2184 for instance in instancelist:
2185 inst_config = instanceinfo[instance]
2187 for nname in inst_config.all_nodes:
2188 if nname not in node_image:
2190 gnode = self.NodeImage(name=nname)
2192 node_image[nname] = gnode
2194 inst_config.MapLVsByNode(node_vol_should)
2196 pnode = inst_config.primary_node
2197 node_image[pnode].pinst.append(instance)
2199 for snode in inst_config.secondary_nodes:
2200 nimg = node_image[snode]
2201 nimg.sinst.append(instance)
2202 if pnode not in nimg.sbp:
2203 nimg.sbp[pnode] = []
2204 nimg.sbp[pnode].append(instance)
2206 # At this point, we have the in-memory data structures complete,
2207 # except for the runtime information, which we'll gather next
2209 # Due to the way our RPC system works, exact response times cannot be
2210 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2211 # time before and after executing the request, we can at least have a time
2213 nvinfo_starttime = time.time()
2214 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2215 self.cfg.GetClusterName())
2216 nvinfo_endtime = time.time()
2218 all_drbd_map = self.cfg.ComputeDRBDMap()
2220 feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2221 instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2223 feedback_fn("* Verifying node status")
2227 for node_i in nodeinfo:
2229 nimg = node_image[node]
2233 feedback_fn("* Skipping offline node %s" % (node,))
2237 if node == master_node:
2239 elif node_i.master_candidate:
2240 ntype = "master candidate"
2241 elif node_i.drained:
2247 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2249 msg = all_nvinfo[node].fail_msg
2250 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2252 nimg.rpc_fail = True
2255 nresult = all_nvinfo[node].payload
2257 nimg.call_ok = self._VerifyNode(node_i, nresult)
2258 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2259 self._VerifyNodeNetwork(node_i, nresult)
2260 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2263 self._VerifyOob(node_i, nresult)
2266 self._VerifyNodeLVM(node_i, nresult, vg_name)
2267 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2270 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2271 self._UpdateNodeInstances(node_i, nresult, nimg)
2272 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2273 self._UpdateNodeOS(node_i, nresult, nimg)
2274 if not nimg.os_fail:
2275 if refos_img is None:
2277 self._VerifyNodeOS(node_i, nimg, refos_img)
2279 feedback_fn("* Verifying instance status")
2280 for instance in instancelist:
2282 feedback_fn("* Verifying instance %s" % instance)
2283 inst_config = instanceinfo[instance]
2284 self._VerifyInstance(instance, inst_config, node_image,
2286 inst_nodes_offline = []
2288 pnode = inst_config.primary_node
2289 pnode_img = node_image[pnode]
2290 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2291 self.ENODERPC, pnode, "instance %s, connection to"
2292 " primary node failed", instance)
2294 if pnode_img.offline:
2295 inst_nodes_offline.append(pnode)
2297 # If the instance is non-redundant we cannot survive losing its primary
2298 # node, so we are not N+1 compliant. On the other hand we have no disk
2299 # templates with more than one secondary so that situation is not well
2301 # FIXME: does not support file-backed instances
2302 if not inst_config.secondary_nodes:
2303 i_non_redundant.append(instance)
2305 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2306 instance, "instance has multiple secondary nodes: %s",
2307 utils.CommaJoin(inst_config.secondary_nodes),
2308 code=self.ETYPE_WARNING)
2310 if inst_config.disk_template in constants.DTS_NET_MIRROR:
2311 pnode = inst_config.primary_node
2312 instance_nodes = utils.NiceSort(inst_config.all_nodes)
2313 instance_groups = {}
2315 for node in instance_nodes:
2316 instance_groups.setdefault(nodeinfo_byname[node].group,
2320 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2321 # Sort so that we always list the primary node first.
2322 for group, nodes in sorted(instance_groups.items(),
2323 key=lambda (_, nodes): pnode in nodes,
2326 self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2327 instance, "instance has primary and secondary nodes in"
2328 " different groups: %s", utils.CommaJoin(pretty_list),
2329 code=self.ETYPE_WARNING)
2331 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2332 i_non_a_balanced.append(instance)
2334 for snode in inst_config.secondary_nodes:
2335 s_img = node_image[snode]
2336 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2337 "instance %s, connection to secondary node failed", instance)
2340 inst_nodes_offline.append(snode)
2342 # warn that the instance lives on offline nodes
2343 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2344 "instance lives on offline node(s) %s",
2345 utils.CommaJoin(inst_nodes_offline))
2346 # ... or ghost/non-vm_capable nodes
2347 for node in inst_config.all_nodes:
2348 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2349 "instance lives on ghost node %s", node)
2350 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2351 instance, "instance lives on non-vm_capable node %s", node)
2353 feedback_fn("* Verifying orphan volumes")
2354 reserved = utils.FieldSet(*cluster.reserved_lvs)
2355 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2357 feedback_fn("* Verifying orphan instances")
2358 self._VerifyOrphanInstances(instancelist, node_image)
2360 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2361 feedback_fn("* Verifying N+1 Memory redundancy")
2362 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2364 feedback_fn("* Other Notes")
2366 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2367 % len(i_non_redundant))
2369 if i_non_a_balanced:
2370 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2371 % len(i_non_a_balanced))
2374 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2377 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2381 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2382 """Analyze the post-hooks' result
2384 This method analyses the hook result, handles it, and sends some
2385 nicely-formatted feedback back to the user.
2387 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2388 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2389 @param hooks_results: the results of the multi-node hooks rpc call
2390 @param feedback_fn: function used send feedback back to the caller
2391 @param lu_result: previous Exec result
2392 @return: the new Exec result, based on the previous result
2396 # We only really run POST phase hooks, and are only interested in
2398 if phase == constants.HOOKS_PHASE_POST:
2399 # Used to change hooks' output to proper indentation
2400 feedback_fn("* Hooks Results")
2401 assert hooks_results, "invalid result from hooks"
2403 for node_name in hooks_results:
2404 res = hooks_results[node_name]
2406 test = msg and not res.offline
2407 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2408 "Communication failure in hooks execution: %s", msg)
2409 if res.offline or msg:
2410 # No need to investigate payload if node is offline or gave an error.
2411 # override manually lu_result here as _ErrorIf only
2412 # overrides self.bad
2415 for script, hkr, output in res.payload:
2416 test = hkr == constants.HKR_FAIL
2417 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2418 "Script %s failed, output:", script)
2420 output = self._HOOKS_INDENT_RE.sub(' ', output)
2421 feedback_fn("%s" % output)
2427 class LUClusterVerifyDisks(NoHooksLU):
2428 """Verifies the cluster disks status.
2433 def ExpandNames(self):
2434 self.needed_locks = {
2435 locking.LEVEL_NODE: locking.ALL_SET,
2436 locking.LEVEL_INSTANCE: locking.ALL_SET,
2438 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2440 def Exec(self, feedback_fn):
2441 """Verify integrity of cluster disks.
2443 @rtype: tuple of three items
2444 @return: a tuple of (dict of node-to-node_error, list of instances
2445 which need activate-disks, dict of instance: (node, volume) for
2449 result = res_nodes, res_instances, res_missing = {}, [], {}
2451 nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2452 instances = self.cfg.GetAllInstancesInfo().values()
2455 for inst in instances:
2457 if not inst.admin_up:
2459 inst.MapLVsByNode(inst_lvs)
2460 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2461 for node, vol_list in inst_lvs.iteritems():
2462 for vol in vol_list:
2463 nv_dict[(node, vol)] = inst
2468 node_lvs = self.rpc.call_lv_list(nodes, [])
2469 for node, node_res in node_lvs.items():
2470 if node_res.offline:
2472 msg = node_res.fail_msg
2474 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2475 res_nodes[node] = msg
2478 lvs = node_res.payload
2479 for lv_name, (_, _, lv_online) in lvs.items():
2480 inst = nv_dict.pop((node, lv_name), None)
2481 if (not lv_online and inst is not None
2482 and inst.name not in res_instances):
2483 res_instances.append(inst.name)
2485 # any leftover items in nv_dict are missing LVs, let's arrange the
2487 for key, inst in nv_dict.iteritems():
2488 if inst.name not in res_missing:
2489 res_missing[inst.name] = []
2490 res_missing[inst.name].append(key)
2495 class LUClusterRepairDiskSizes(NoHooksLU):
2496 """Verifies the cluster disks sizes.
2501 def ExpandNames(self):
2502 if self.op.instances:
2503 self.wanted_names = []
2504 for name in self.op.instances:
2505 full_name = _ExpandInstanceName(self.cfg, name)
2506 self.wanted_names.append(full_name)
2507 self.needed_locks = {
2508 locking.LEVEL_NODE: [],
2509 locking.LEVEL_INSTANCE: self.wanted_names,
2511 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2513 self.wanted_names = None
2514 self.needed_locks = {
2515 locking.LEVEL_NODE: locking.ALL_SET,
2516 locking.LEVEL_INSTANCE: locking.ALL_SET,
2518 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2520 def DeclareLocks(self, level):
2521 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2522 self._LockInstancesNodes(primary_only=True)
2524 def CheckPrereq(self):
2525 """Check prerequisites.
2527 This only checks the optional instance list against the existing names.
2530 if self.wanted_names is None:
2531 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2533 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2534 in self.wanted_names]
2536 def _EnsureChildSizes(self, disk):
2537 """Ensure children of the disk have the needed disk size.
2539 This is valid mainly for DRBD8 and fixes an issue where the
2540 children have smaller disk size.
2542 @param disk: an L{ganeti.objects.Disk} object
2545 if disk.dev_type == constants.LD_DRBD8:
2546 assert disk.children, "Empty children for DRBD8?"
2547 fchild = disk.children[0]
2548 mismatch = fchild.size < disk.size
2550 self.LogInfo("Child disk has size %d, parent %d, fixing",
2551 fchild.size, disk.size)
2552 fchild.size = disk.size
2554 # and we recurse on this child only, not on the metadev
2555 return self._EnsureChildSizes(fchild) or mismatch
2559 def Exec(self, feedback_fn):
2560 """Verify the size of cluster disks.
2563 # TODO: check child disks too
2564 # TODO: check differences in size between primary/secondary nodes
2566 for instance in self.wanted_instances:
2567 pnode = instance.primary_node
2568 if pnode not in per_node_disks:
2569 per_node_disks[pnode] = []
2570 for idx, disk in enumerate(instance.disks):
2571 per_node_disks[pnode].append((instance, idx, disk))
2574 for node, dskl in per_node_disks.items():
2575 newl = [v[2].Copy() for v in dskl]
2577 self.cfg.SetDiskID(dsk, node)
2578 result = self.rpc.call_blockdev_getsizes(node, newl)
2580 self.LogWarning("Failure in blockdev_getsizes call to node"
2581 " %s, ignoring", node)
2583 if len(result.data) != len(dskl):
2584 self.LogWarning("Invalid result from node %s, ignoring node results",
2587 for ((instance, idx, disk), size) in zip(dskl, result.data):
2589 self.LogWarning("Disk %d of instance %s did not return size"
2590 " information, ignoring", idx, instance.name)
2592 if not isinstance(size, (int, long)):
2593 self.LogWarning("Disk %d of instance %s did not return valid"
2594 " size information, ignoring", idx, instance.name)
2597 if size != disk.size:
2598 self.LogInfo("Disk %d of instance %s has mismatched size,"
2599 " correcting: recorded %d, actual %d", idx,
2600 instance.name, disk.size, size)
2602 self.cfg.Update(instance, feedback_fn)
2603 changed.append((instance.name, idx, size))
2604 if self._EnsureChildSizes(disk):
2605 self.cfg.Update(instance, feedback_fn)
2606 changed.append((instance.name, idx, disk.size))
2610 class LUClusterRename(LogicalUnit):
2611 """Rename the cluster.
2614 HPATH = "cluster-rename"
2615 HTYPE = constants.HTYPE_CLUSTER
2617 def BuildHooksEnv(self):
2622 "OP_TARGET": self.cfg.GetClusterName(),
2623 "NEW_NAME": self.op.name,
2625 mn = self.cfg.GetMasterNode()
2626 all_nodes = self.cfg.GetNodeList()
2627 return env, [mn], all_nodes
2629 def CheckPrereq(self):
2630 """Verify that the passed name is a valid one.
2633 hostname = netutils.GetHostname(name=self.op.name,
2634 family=self.cfg.GetPrimaryIPFamily())
2636 new_name = hostname.name
2637 self.ip = new_ip = hostname.ip
2638 old_name = self.cfg.GetClusterName()
2639 old_ip = self.cfg.GetMasterIP()
2640 if new_name == old_name and new_ip == old_ip:
2641 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2642 " cluster has changed",
2644 if new_ip != old_ip:
2645 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2646 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2647 " reachable on the network" %
2648 new_ip, errors.ECODE_NOTUNIQUE)
2650 self.op.name = new_name
2652 def Exec(self, feedback_fn):
2653 """Rename the cluster.
2656 clustername = self.op.name
2659 # shutdown the master IP
2660 master = self.cfg.GetMasterNode()
2661 result = self.rpc.call_node_stop_master(master, False)
2662 result.Raise("Could not disable the master role")
2665 cluster = self.cfg.GetClusterInfo()
2666 cluster.cluster_name = clustername
2667 cluster.master_ip = ip
2668 self.cfg.Update(cluster, feedback_fn)
2670 # update the known hosts file
2671 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2672 node_list = self.cfg.GetOnlineNodeList()
2674 node_list.remove(master)
2677 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2679 result = self.rpc.call_node_start_master(master, False, False)
2680 msg = result.fail_msg
2682 self.LogWarning("Could not re-enable the master role on"
2683 " the master, please restart manually: %s", msg)
2688 class LUClusterSetParams(LogicalUnit):
2689 """Change the parameters of the cluster.
2692 HPATH = "cluster-modify"
2693 HTYPE = constants.HTYPE_CLUSTER
2696 def CheckArguments(self):
2700 if self.op.uid_pool:
2701 uidpool.CheckUidPool(self.op.uid_pool)
2703 if self.op.add_uids:
2704 uidpool.CheckUidPool(self.op.add_uids)
2706 if self.op.remove_uids:
2707 uidpool.CheckUidPool(self.op.remove_uids)
2709 def ExpandNames(self):
2710 # FIXME: in the future maybe other cluster params won't require checking on
2711 # all nodes to be modified.
2712 self.needed_locks = {
2713 locking.LEVEL_NODE: locking.ALL_SET,
2715 self.share_locks[locking.LEVEL_NODE] = 1
2717 def BuildHooksEnv(self):
2722 "OP_TARGET": self.cfg.GetClusterName(),
2723 "NEW_VG_NAME": self.op.vg_name,
2725 mn = self.cfg.GetMasterNode()
2726 return env, [mn], [mn]
2728 def CheckPrereq(self):
2729 """Check prerequisites.
2731 This checks whether the given params don't conflict and
2732 if the given volume group is valid.
2735 if self.op.vg_name is not None and not self.op.vg_name:
2736 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2737 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2738 " instances exist", errors.ECODE_INVAL)
2740 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2741 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2742 raise errors.OpPrereqError("Cannot disable drbd helper while"
2743 " drbd-based instances exist",
2746 node_list = self.acquired_locks[locking.LEVEL_NODE]
2748 # if vg_name not None, checks given volume group on all nodes
2750 vglist = self.rpc.call_vg_list(node_list)
2751 for node in node_list:
2752 msg = vglist[node].fail_msg
2754 # ignoring down node
2755 self.LogWarning("Error while gathering data on node %s"
2756 " (ignoring node): %s", node, msg)
2758 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2760 constants.MIN_VG_SIZE)
2762 raise errors.OpPrereqError("Error on node '%s': %s" %
2763 (node, vgstatus), errors.ECODE_ENVIRON)
2765 if self.op.drbd_helper:
2766 # checks given drbd helper on all nodes
2767 helpers = self.rpc.call_drbd_helper(node_list)
2768 for node in node_list:
2769 ninfo = self.cfg.GetNodeInfo(node)
2771 self.LogInfo("Not checking drbd helper on offline node %s", node)
2773 msg = helpers[node].fail_msg
2775 raise errors.OpPrereqError("Error checking drbd helper on node"
2776 " '%s': %s" % (node, msg),
2777 errors.ECODE_ENVIRON)
2778 node_helper = helpers[node].payload
2779 if node_helper != self.op.drbd_helper:
2780 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2781 (node, node_helper), errors.ECODE_ENVIRON)
2783 self.cluster = cluster = self.cfg.GetClusterInfo()
2784 # validate params changes
2785 if self.op.beparams:
2786 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2787 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2789 if self.op.ndparams:
2790 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2791 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2793 if self.op.nicparams:
2794 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2795 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2796 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2799 # check all instances for consistency
2800 for instance in self.cfg.GetAllInstancesInfo().values():
2801 for nic_idx, nic in enumerate(instance.nics):
2802 params_copy = copy.deepcopy(nic.nicparams)
2803 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2805 # check parameter syntax
2807 objects.NIC.CheckParameterSyntax(params_filled)
2808 except errors.ConfigurationError, err:
2809 nic_errors.append("Instance %s, nic/%d: %s" %
2810 (instance.name, nic_idx, err))
2812 # if we're moving instances to routed, check that they have an ip
2813 target_mode = params_filled[constants.NIC_MODE]
2814 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2815 nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2816 (instance.name, nic_idx))
2818 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2819 "\n".join(nic_errors))
2821 # hypervisor list/parameters
2822 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2823 if self.op.hvparams:
2824 for hv_name, hv_dict in self.op.hvparams.items():
2825 if hv_name not in self.new_hvparams:
2826 self.new_hvparams[hv_name] = hv_dict
2828 self.new_hvparams[hv_name].update(hv_dict)
2830 # os hypervisor parameters
2831 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2833 for os_name, hvs in self.op.os_hvp.items():
2834 if os_name not in self.new_os_hvp:
2835 self.new_os_hvp[os_name] = hvs
2837 for hv_name, hv_dict in hvs.items():
2838 if hv_name not in self.new_os_hvp[os_name]:
2839 self.new_os_hvp[os_name][hv_name] = hv_dict
2841 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2844 self.new_osp = objects.FillDict(cluster.osparams, {})
2845 if self.op.osparams:
2846 for os_name, osp in self.op.osparams.items():
2847 if os_name not in self.new_osp:
2848 self.new_osp[os_name] = {}
2850 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2853 if not self.new_osp[os_name]:
2854 # we removed all parameters
2855 del self.new_osp[os_name]
2857 # check the parameter validity (remote check)
2858 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2859 os_name, self.new_osp[os_name])
2861 # changes to the hypervisor list
2862 if self.op.enabled_hypervisors is not None:
2863 self.hv_list = self.op.enabled_hypervisors
2864 for hv in self.hv_list:
2865 # if the hypervisor doesn't already exist in the cluster
2866 # hvparams, we initialize it to empty, and then (in both
2867 # cases) we make sure to fill the defaults, as we might not
2868 # have a complete defaults list if the hypervisor wasn't
2870 if hv not in new_hvp:
2872 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2873 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2875 self.hv_list = cluster.enabled_hypervisors
2877 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2878 # either the enabled list has changed, or the parameters have, validate
2879 for hv_name, hv_params in self.new_hvparams.items():
2880 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2881 (self.op.enabled_hypervisors and
2882 hv_name in self.op.enabled_hypervisors)):
2883 # either this is a new hypervisor, or its parameters have changed
2884 hv_class = hypervisor.GetHypervisor(hv_name)
2885 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2886 hv_class.CheckParameterSyntax(hv_params)
2887 _CheckHVParams(self, node_list, hv_name, hv_params)
2890 # no need to check any newly-enabled hypervisors, since the
2891 # defaults have already been checked in the above code-block
2892 for os_name, os_hvp in self.new_os_hvp.items():
2893 for hv_name, hv_params in os_hvp.items():
2894 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2895 # we need to fill in the new os_hvp on top of the actual hv_p
2896 cluster_defaults = self.new_hvparams.get(hv_name, {})
2897 new_osp = objects.FillDict(cluster_defaults, hv_params)
2898 hv_class = hypervisor.GetHypervisor(hv_name)
2899 hv_class.CheckParameterSyntax(new_osp)
2900 _CheckHVParams(self, node_list, hv_name, new_osp)
2902 if self.op.default_iallocator:
2903 alloc_script = utils.FindFile(self.op.default_iallocator,
2904 constants.IALLOCATOR_SEARCH_PATH,
2906 if alloc_script is None:
2907 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2908 " specified" % self.op.default_iallocator,
2911 def Exec(self, feedback_fn):
2912 """Change the parameters of the cluster.
2915 if self.op.vg_name is not None:
2916 new_volume = self.op.vg_name
2919 if new_volume != self.cfg.GetVGName():
2920 self.cfg.SetVGName(new_volume)
2922 feedback_fn("Cluster LVM configuration already in desired"
2923 " state, not changing")
2924 if self.op.drbd_helper is not None:
2925 new_helper = self.op.drbd_helper
2928 if new_helper != self.cfg.GetDRBDHelper():
2929 self.cfg.SetDRBDHelper(new_helper)
2931 feedback_fn("Cluster DRBD helper already in desired state,"
2933 if self.op.hvparams:
2934 self.cluster.hvparams = self.new_hvparams
2936 self.cluster.os_hvp = self.new_os_hvp
2937 if self.op.enabled_hypervisors is not None:
2938 self.cluster.hvparams = self.new_hvparams
2939 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2940 if self.op.beparams:
2941 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2942 if self.op.nicparams:
2943 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2944 if self.op.osparams:
2945 self.cluster.osparams = self.new_osp
2946 if self.op.ndparams:
2947 self.cluster.ndparams = self.new_ndparams
2949 if self.op.candidate_pool_size is not None:
2950 self.cluster.candidate_pool_size = self.op.candidate_pool_size
2951 # we need to update the pool size here, otherwise the save will fail
2952 _AdjustCandidatePool(self, [])
2954 if self.op.maintain_node_health is not None:
2955 self.cluster.maintain_node_health = self.op.maintain_node_health
2957 if self.op.prealloc_wipe_disks is not None:
2958 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2960 if self.op.add_uids is not None:
2961 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2963 if self.op.remove_uids is not None:
2964 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2966 if self.op.uid_pool is not None:
2967 self.cluster.uid_pool = self.op.uid_pool
2969 if self.op.default_iallocator is not None:
2970 self.cluster.default_iallocator = self.op.default_iallocator
2972 if self.op.reserved_lvs is not None:
2973 self.cluster.reserved_lvs = self.op.reserved_lvs
2975 def helper_os(aname, mods, desc):
2977 lst = getattr(self.cluster, aname)
2978 for key, val in mods:
2979 if key == constants.DDM_ADD:
2981 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
2984 elif key == constants.DDM_REMOVE:
2988 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
2990 raise errors.ProgrammerError("Invalid modification '%s'" % key)
2992 if self.op.hidden_os:
2993 helper_os("hidden_os", self.op.hidden_os, "hidden")
2995 if self.op.blacklisted_os:
2996 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
2998 if self.op.master_netdev:
2999 master = self.cfg.GetMasterNode()
3000 feedback_fn("Shutting down master ip on the current netdev (%s)" %
3001 self.cluster.master_netdev)
3002 result = self.rpc.call_node_stop_master(master, False)
3003 result.Raise("Could not disable the master ip")
3004 feedback_fn("Changing master_netdev from %s to %s" %
3005 (self.cluster.master_netdev, self.op.master_netdev))
3006 self.cluster.master_netdev = self.op.master_netdev
3008 self.cfg.Update(self.cluster, feedback_fn)
3010 if self.op.master_netdev:
3011 feedback_fn("Starting the master ip on the new master netdev (%s)" %
3012 self.op.master_netdev)
3013 result = self.rpc.call_node_start_master(master, False, False)
3015 self.LogWarning("Could not re-enable the master ip on"
3016 " the master, please restart manually: %s",
3020 def _UploadHelper(lu, nodes, fname):
3021 """Helper for uploading a file and showing warnings.
3024 if os.path.exists(fname):
3025 result = lu.rpc.call_upload_file(nodes, fname)
3026 for to_node, to_result in result.items():
3027 msg = to_result.fail_msg
3029 msg = ("Copy of file %s to node %s failed: %s" %
3030 (fname, to_node, msg))
3031 lu.proc.LogWarning(msg)
3034 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3035 """Distribute additional files which are part of the cluster configuration.
3037 ConfigWriter takes care of distributing the config and ssconf files, but
3038 there are more files which should be distributed to all nodes. This function
3039 makes sure those are copied.
3041 @param lu: calling logical unit
3042 @param additional_nodes: list of nodes not in the config to distribute to
3043 @type additional_vm: boolean
3044 @param additional_vm: whether the additional nodes are vm-capable or not
3047 # 1. Gather target nodes
3048 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3049 dist_nodes = lu.cfg.GetOnlineNodeList()
3050 nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3051 vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3052 if additional_nodes is not None:
3053 dist_nodes.extend(additional_nodes)
3055 vm_nodes.extend(additional_nodes)
3056 if myself.name in dist_nodes:
3057 dist_nodes.remove(myself.name)
3058 if myself.name in vm_nodes:
3059 vm_nodes.remove(myself.name)
3061 # 2. Gather files to distribute
3062 dist_files = set([constants.ETC_HOSTS,
3063 constants.SSH_KNOWN_HOSTS_FILE,
3064 constants.RAPI_CERT_FILE,
3065 constants.RAPI_USERS_FILE,
3066 constants.CONFD_HMAC_KEY,
3067 constants.CLUSTER_DOMAIN_SECRET_FILE,
3071 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3072 for hv_name in enabled_hypervisors:
3073 hv_class = hypervisor.GetHypervisor(hv_name)
3074 vm_files.update(hv_class.GetAncillaryFiles())
3076 # 3. Perform the files upload
3077 for fname in dist_files:
3078 _UploadHelper(lu, dist_nodes, fname)
3079 for fname in vm_files:
3080 _UploadHelper(lu, vm_nodes, fname)
3083 class LUClusterRedistConf(NoHooksLU):
3084 """Force the redistribution of cluster configuration.
3086 This is a very simple LU.
3091 def ExpandNames(self):
3092 self.needed_locks = {
3093 locking.LEVEL_NODE: locking.ALL_SET,
3095 self.share_locks[locking.LEVEL_NODE] = 1
3097 def Exec(self, feedback_fn):
3098 """Redistribute the configuration.
3101 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3102 _RedistributeAncillaryFiles(self)
3105 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3106 """Sleep and poll for an instance's disk to sync.
3109 if not instance.disks or disks is not None and not disks:
3112 disks = _ExpandCheckDisks(instance, disks)
3115 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3117 node = instance.primary_node
3120 lu.cfg.SetDiskID(dev, node)
3122 # TODO: Convert to utils.Retry
3125 degr_retries = 10 # in seconds, as we sleep 1 second each time
3129 cumul_degraded = False
3130 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3131 msg = rstats.fail_msg
3133 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3136 raise errors.RemoteError("Can't contact node %s for mirror data,"
3137 " aborting." % node)
3140 rstats = rstats.payload
3142 for i, mstat in enumerate(rstats):
3144 lu.LogWarning("Can't compute data for node %s/%s",
3145 node, disks[i].iv_name)
3148 cumul_degraded = (cumul_degraded or
3149 (mstat.is_degraded and mstat.sync_percent is None))
3150 if mstat.sync_percent is not None:
3152 if mstat.estimated_time is not None:
3153 rem_time = ("%s remaining (estimated)" %
3154 utils.FormatSeconds(mstat.estimated_time))
3155 max_time = mstat.estimated_time
3157 rem_time = "no time estimate"
3158 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3159 (disks[i].iv_name, mstat.sync_percent, rem_time))
3161 # if we're done but degraded, let's do a few small retries, to
3162 # make sure we see a stable and not transient situation; therefore
3163 # we force restart of the loop
3164 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3165 logging.info("Degraded disks found, %d retries left", degr_retries)
3173 time.sleep(min(60, max_time))
3176 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3177 return not cumul_degraded
3180 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3181 """Check that mirrors are not degraded.
3183 The ldisk parameter, if True, will change the test from the
3184 is_degraded attribute (which represents overall non-ok status for
3185 the device(s)) to the ldisk (representing the local storage status).
3188 lu.cfg.SetDiskID(dev, node)
3192 if on_primary or dev.AssembleOnSecondary():
3193 rstats = lu.rpc.call_blockdev_find(node, dev)
3194 msg = rstats.fail_msg
3196 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3198 elif not rstats.payload:
3199 lu.LogWarning("Can't find disk on node %s", node)
3203 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3205 result = result and not rstats.payload.is_degraded
3208 for child in dev.children:
3209 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3214 class LUOobCommand(NoHooksLU):
3215 """Logical unit for OOB handling.
3220 def CheckPrereq(self):
3221 """Check prerequisites.
3224 - the node exists in the configuration
3227 Any errors are signaled by raising errors.OpPrereqError.
3231 master_node = self.cfg.GetMasterNode()
3232 for node_name in self.op.node_names:
3233 node = self.cfg.GetNodeInfo(node_name)
3236 raise errors.OpPrereqError("Node %s not found" % node_name,
3239 self.nodes.append(node)
3241 if (not self.op.ignore_status and
3242 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3243 raise errors.OpPrereqError(("Cannot power off node %s because it is"
3244 " not marked offline") % node_name,
3247 if self.op.command in (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE):
3248 # This does two things, it checks if master is in the list and if so and
3249 # force_master is set it puts it to the end so the master is done last
3251 self.op.node_names.remove(master_node)
3255 if self.op.force_master:
3256 self.op.node_names.append(master_node)
3258 self.LogWarning("Master %s was skipped, use the force master"
3259 " option to operate on the master too",
3261 if not self.op.node_names:
3262 raise errors.OpPrereqError("No nodes left to operate on, aborting",
3265 assert (master_node not in self.op.node_names or
3266 self.op.node_names[-1] == master_node)
3268 def ExpandNames(self):
3269 """Gather locks we need.
3272 if self.op.node_names:
3273 self.op.node_names = [_ExpandNodeName(self.cfg, name)
3274 for name in self.op.node_names]
3276 self.op.node_names = self.cfg.GetNodeList()
3278 self.needed_locks = {
3279 locking.LEVEL_NODE: self.op.node_names,
3282 def Exec(self, feedback_fn):
3283 """Execute OOB and return result if we expect any.
3286 master_node = self.cfg.GetMasterNode()
3289 for node in self.nodes:
3290 node_entry = [(constants.RS_NORMAL, node.name)]
3291 ret.append(node_entry)
3293 oob_program = _SupportsOob(self.cfg, node)
3296 node_entry.append((constants.RS_UNAVAIL, None))
3299 logging.info("Executing out-of-band command '%s' using '%s' on %s",
3300 self.op.command, oob_program, node.name)
3301 result = self.rpc.call_run_oob(master_node, oob_program,
3302 self.op.command, node.name,
3306 self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3307 node.name, result.fail_msg)
3308 node_entry.append((constants.RS_NODATA, None))
3311 self._CheckPayload(result)
3312 except errors.OpExecError, err:
3313 self.LogWarning("The payload returned by '%s' is not valid: %s",
3315 node_entry.append((constants.RS_NODATA, None))
3317 if self.op.command == constants.OOB_HEALTH:
3318 # For health we should log important events
3319 for item, status in result.payload:
3320 if status in [constants.OOB_STATUS_WARNING,
3321 constants.OOB_STATUS_CRITICAL]:
3322 self.LogWarning("On node '%s' item '%s' has status '%s'",
3323 node.name, item, status)
3325 if self.op.command == constants.OOB_POWER_ON:
3327 elif self.op.command == constants.OOB_POWER_OFF:
3328 node.powered = False
3329 elif self.op.command == constants.OOB_POWER_STATUS:
3330 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3331 if powered != node.powered:
3332 logging.warning(("Recorded power state (%s) of node '%s' does not"
3333 " match actual power state (%s)"), node.powered,
3336 # For configuration changing commands we should update the node
3337 if self.op.command in (constants.OOB_POWER_ON,
3338 constants.OOB_POWER_OFF):
3339 self.cfg.Update(node, feedback_fn)
3341 node_entry.append((constants.RS_NORMAL, result.payload))
3345 def _CheckPayload(self, result):
3346 """Checks if the payload is valid.
3348 @param result: RPC result
3349 @raises errors.OpExecError: If payload is not valid
3353 if self.op.command == constants.OOB_HEALTH:
3354 if not isinstance(result.payload, list):
3355 errs.append("command 'health' is expected to return a list but got %s" %
3356 type(result.payload))
3358 for item, status in result.payload:
3359 if status not in constants.OOB_STATUSES:
3360 errs.append("health item '%s' has invalid status '%s'" %
3363 if self.op.command == constants.OOB_POWER_STATUS:
3364 if not isinstance(result.payload, dict):
3365 errs.append("power-status is expected to return a dict but got %s" %
3366 type(result.payload))
3368 if self.op.command in [
3369 constants.OOB_POWER_ON,
3370 constants.OOB_POWER_OFF,
3371 constants.OOB_POWER_CYCLE,
3373 if result.payload is not None:
3374 errs.append("%s is expected to not return payload but got '%s'" %
3375 (self.op.command, result.payload))
3378 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3379 utils.CommaJoin(errs))
3383 class LUOsDiagnose(NoHooksLU):
3384 """Logical unit for OS diagnose/query.
3389 _BLK = "blacklisted"
3391 _FIELDS_STATIC = utils.FieldSet()
3392 _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3393 "parameters", "api_versions", _HID, _BLK)
3395 def CheckArguments(self):
3397 raise errors.OpPrereqError("Selective OS query not supported",
3400 _CheckOutputFields(static=self._FIELDS_STATIC,
3401 dynamic=self._FIELDS_DYNAMIC,
3402 selected=self.op.output_fields)
3404 def ExpandNames(self):
3405 # Lock all nodes, in shared mode
3406 # Temporary removal of locks, should be reverted later
3407 # TODO: reintroduce locks when they are lighter-weight
3408 self.needed_locks = {}
3409 #self.share_locks[locking.LEVEL_NODE] = 1
3410 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3413 def _DiagnoseByOS(rlist):
3414 """Remaps a per-node return list into an a per-os per-node dictionary
3416 @param rlist: a map with node names as keys and OS objects as values
3419 @return: a dictionary with osnames as keys and as value another
3420 map, with nodes as keys and tuples of (path, status, diagnose,
3421 variants, parameters, api_versions) as values, eg::
3423 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3424 (/srv/..., False, "invalid api")],
3425 "node2": [(/srv/..., True, "", [], [])]}
3430 # we build here the list of nodes that didn't fail the RPC (at RPC
3431 # level), so that nodes with a non-responding node daemon don't
3432 # make all OSes invalid
3433 good_nodes = [node_name for node_name in rlist
3434 if not rlist[node_name].fail_msg]
3435 for node_name, nr in rlist.items():
3436 if nr.fail_msg or not nr.payload:
3438 for (name, path, status, diagnose, variants,
3439 params, api_versions) in nr.payload:
3440 if name not in all_os:
3441 # build a list of nodes for this os containing empty lists
3442 # for each node in node_list
3444 for nname in good_nodes:
3445 all_os[name][nname] = []
3446 # convert params from [name, help] to (name, help)
3447 params = [tuple(v) for v in params]
3448 all_os[name][node_name].append((path, status, diagnose,
3449 variants, params, api_versions))
3452 def Exec(self, feedback_fn):
3453 """Compute the list of OSes.
3456 valid_nodes = [node.name
3457 for node in self.cfg.GetAllNodesInfo().values()
3458 if not node.offline and node.vm_capable]
3459 node_data = self.rpc.call_os_diagnose(valid_nodes)
3460 pol = self._DiagnoseByOS(node_data)
3462 cluster = self.cfg.GetClusterInfo()
3464 for os_name in utils.NiceSort(pol.keys()):
3465 os_data = pol[os_name]
3468 (variants, params, api_versions) = null_state = (set(), set(), set())
3469 for idx, osl in enumerate(os_data.values()):
3470 valid = bool(valid and osl and osl[0][1])
3472 (variants, params, api_versions) = null_state
3474 node_variants, node_params, node_api = osl[0][3:6]
3475 if idx == 0: # first entry
3476 variants = set(node_variants)
3477 params = set(node_params)
3478 api_versions = set(node_api)
3479 else: # keep consistency
3480 variants.intersection_update(node_variants)
3481 params.intersection_update(node_params)
3482 api_versions.intersection_update(node_api)
3484 is_hid = os_name in cluster.hidden_os
3485 is_blk = os_name in cluster.blacklisted_os
3486 if ((self._HID not in self.op.output_fields and is_hid) or
3487 (self._BLK not in self.op.output_fields and is_blk) or
3488 (self._VLD not in self.op.output_fields and not valid)):
3491 for field in self.op.output_fields:
3494 elif field == self._VLD:
3496 elif field == "node_status":
3497 # this is just a copy of the dict
3499 for node_name, nos_list in os_data.items():
3500 val[node_name] = nos_list
3501 elif field == "variants":
3502 val = utils.NiceSort(list(variants))
3503 elif field == "parameters":
3505 elif field == "api_versions":
3506 val = list(api_versions)
3507 elif field == self._HID:
3509 elif field == self._BLK:
3512 raise errors.ParameterError(field)
3519 class LUNodeRemove(LogicalUnit):
3520 """Logical unit for removing a node.
3523 HPATH = "node-remove"
3524 HTYPE = constants.HTYPE_NODE
3526 def BuildHooksEnv(self):
3529 This doesn't run on the target node in the pre phase as a failed
3530 node would then be impossible to remove.
3534 "OP_TARGET": self.op.node_name,
3535 "NODE_NAME": self.op.node_name,
3537 all_nodes = self.cfg.GetNodeList()
3539 all_nodes.remove(self.op.node_name)
3541 logging.warning("Node %s which is about to be removed not found"
3542 " in the all nodes list", self.op.node_name)
3543 return env, all_nodes, all_nodes
3545 def CheckPrereq(self):
3546 """Check prerequisites.
3549 - the node exists in the configuration
3550 - it does not have primary or secondary instances
3551 - it's not the master
3553 Any errors are signaled by raising errors.OpPrereqError.
3556 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3557 node = self.cfg.GetNodeInfo(self.op.node_name)
3558 assert node is not None
3560 instance_list = self.cfg.GetInstanceList()
3562 masternode = self.cfg.GetMasterNode()
3563 if node.name == masternode:
3564 raise errors.OpPrereqError("Node is the master node,"
3565 " you need to failover first.",
3568 for instance_name in instance_list:
3569 instance = self.cfg.GetInstanceInfo(instance_name)
3570 if node.name in instance.all_nodes:
3571 raise errors.OpPrereqError("Instance %s is still running on the node,"
3572 " please remove first." % instance_name,
3574 self.op.node_name = node.name
3577 def Exec(self, feedback_fn):
3578 """Removes the node from the cluster.
3582 logging.info("Stopping the node daemon and removing configs from node %s",
3585 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3587 # Promote nodes to master candidate as needed
3588 _AdjustCandidatePool(self, exceptions=[node.name])
3589 self.context.RemoveNode(node.name)
3591 # Run post hooks on the node before it's removed
3592 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3594 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3596 # pylint: disable-msg=W0702
3597 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3599 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3600 msg = result.fail_msg
3602 self.LogWarning("Errors encountered on the remote node while leaving"
3603 " the cluster: %s", msg)
3605 # Remove node from our /etc/hosts
3606 if self.cfg.GetClusterInfo().modify_etc_hosts:
3607 master_node = self.cfg.GetMasterNode()
3608 result = self.rpc.call_etc_hosts_modify(master_node,
3609 constants.ETC_HOSTS_REMOVE,
3611 result.Raise("Can't update hosts file with new host data")
3612 _RedistributeAncillaryFiles(self)
3615 class _NodeQuery(_QueryBase):
3616 FIELDS = query.NODE_FIELDS
3618 def ExpandNames(self, lu):
3619 lu.needed_locks = {}
3620 lu.share_locks[locking.LEVEL_NODE] = 1
3623 self.wanted = _GetWantedNodes(lu, self.names)
3625 self.wanted = locking.ALL_SET
3627 self.do_locking = (self.use_locking and
3628 query.NQ_LIVE in self.requested_data)
3631 # if we don't request only static fields, we need to lock the nodes
3632 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3634 def DeclareLocks(self, lu, level):
3637 def _GetQueryData(self, lu):
3638 """Computes the list of nodes and their attributes.
3641 all_info = lu.cfg.GetAllNodesInfo()
3643 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3645 # Gather data as requested
3646 if query.NQ_LIVE in self.requested_data:
3647 node_data = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
3648 lu.cfg.GetHypervisorType())
3649 live_data = dict((name, nresult.payload)
3650 for (name, nresult) in node_data.items()
3651 if not nresult.fail_msg and nresult.payload)
3655 if query.NQ_INST in self.requested_data:
3656 node_to_primary = dict([(name, set()) for name in nodenames])
3657 node_to_secondary = dict([(name, set()) for name in nodenames])
3659 inst_data = lu.cfg.GetAllInstancesInfo()
3661 for inst in inst_data.values():
3662 if inst.primary_node in node_to_primary:
3663 node_to_primary[inst.primary_node].add(inst.name)
3664 for secnode in inst.secondary_nodes:
3665 if secnode in node_to_secondary:
3666 node_to_secondary[secnode].add(inst.name)
3668 node_to_primary = None
3669 node_to_secondary = None
3671 if query.NQ_OOB in self.requested_data:
3672 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3673 for name, node in all_info.iteritems())
3677 if query.NQ_GROUP in self.requested_data:
3678 groups = lu.cfg.GetAllNodeGroupsInfo()
3682 return query.NodeQueryData([all_info[name] for name in nodenames],
3683 live_data, lu.cfg.GetMasterNode(),
3684 node_to_primary, node_to_secondary, groups,
3685 oob_support, lu.cfg.GetClusterInfo())
3688 class LUNodeQuery(NoHooksLU):
3689 """Logical unit for querying nodes.
3692 # pylint: disable-msg=W0142
3695 def CheckArguments(self):
3696 self.nq = _NodeQuery(self.op.names, self.op.output_fields,
3697 self.op.use_locking)
3699 def ExpandNames(self):
3700 self.nq.ExpandNames(self)
3702 def Exec(self, feedback_fn):
3703 return self.nq.OldStyleQuery(self)
3706 class LUNodeQueryvols(NoHooksLU):
3707 """Logical unit for getting volumes on node(s).
3711 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3712 _FIELDS_STATIC = utils.FieldSet("node")
3714 def CheckArguments(self):
3715 _CheckOutputFields(static=self._FIELDS_STATIC,
3716 dynamic=self._FIELDS_DYNAMIC,
3717 selected=self.op.output_fields)
3719 def ExpandNames(self):
3720 self.needed_locks = {}
3721 self.share_locks[locking.LEVEL_NODE] = 1
3722 if not self.op.nodes:
3723 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3725 self.needed_locks[locking.LEVEL_NODE] = \
3726 _GetWantedNodes(self, self.op.nodes)
3728 def Exec(self, feedback_fn):
3729 """Computes the list of nodes and their attributes.
3732 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3733 volumes = self.rpc.call_node_volumes(nodenames)
3735 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3736 in self.cfg.GetInstanceList()]
3738 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3741 for node in nodenames:
3742 nresult = volumes[node]
3745 msg = nresult.fail_msg
3747 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3750 node_vols = nresult.payload[:]
3751 node_vols.sort(key=lambda vol: vol['dev'])
3753 for vol in node_vols:
3755 for field in self.op.output_fields:
3758 elif field == "phys":
3762 elif field == "name":
3764 elif field == "size":
3765 val = int(float(vol['size']))
3766 elif field == "instance":
3768 if node not in lv_by_node[inst]:
3770 if vol['name'] in lv_by_node[inst][node]:
3776 raise errors.ParameterError(field)
3777 node_output.append(str(val))
3779 output.append(node_output)
3784 class LUNodeQueryStorage(NoHooksLU):
3785 """Logical unit for getting information on storage units on node(s).
3788 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3791 def CheckArguments(self):
3792 _CheckOutputFields(static=self._FIELDS_STATIC,
3793 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3794 selected=self.op.output_fields)
3796 def ExpandNames(self):
3797 self.needed_locks = {}
3798 self.share_locks[locking.LEVEL_NODE] = 1
3801 self.needed_locks[locking.LEVEL_NODE] = \
3802 _GetWantedNodes(self, self.op.nodes)
3804 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3806 def Exec(self, feedback_fn):
3807 """Computes the list of nodes and their attributes.
3810 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3812 # Always get name to sort by
3813 if constants.SF_NAME in self.op.output_fields:
3814 fields = self.op.output_fields[:]
3816 fields = [constants.SF_NAME] + self.op.output_fields
3818 # Never ask for node or type as it's only known to the LU
3819 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3820 while extra in fields:
3821 fields.remove(extra)
3823 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3824 name_idx = field_idx[constants.SF_NAME]
3826 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3827 data = self.rpc.call_storage_list(self.nodes,
3828 self.op.storage_type, st_args,
3829 self.op.name, fields)
3833 for node in utils.NiceSort(self.nodes):
3834 nresult = data[node]
3838 msg = nresult.fail_msg
3840 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3843 rows = dict([(row[name_idx], row) for row in nresult.payload])
3845 for name in utils.NiceSort(rows.keys()):
3850 for field in self.op.output_fields:
3851 if field == constants.SF_NODE:
3853 elif field == constants.SF_TYPE:
3854 val = self.op.storage_type
3855 elif field in field_idx:
3856 val = row[field_idx[field]]
3858 raise errors.ParameterError(field)
3867 class _InstanceQuery(_QueryBase):
3868 FIELDS = query.INSTANCE_FIELDS
3870 def ExpandNames(self, lu):
3871 lu.needed_locks = {}
3872 lu.share_locks[locking.LEVEL_INSTANCE] = 1
3873 lu.share_locks[locking.LEVEL_NODE] = 1
3876 self.wanted = _GetWantedInstances(lu, self.names)
3878 self.wanted = locking.ALL_SET
3880 self.do_locking = (self.use_locking and
3881 query.IQ_LIVE in self.requested_data)
3883 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3884 lu.needed_locks[locking.LEVEL_NODE] = []
3885 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3887 def DeclareLocks(self, lu, level):
3888 if level == locking.LEVEL_NODE and self.do_locking:
3889 lu._LockInstancesNodes() # pylint: disable-msg=W0212
3891 def _GetQueryData(self, lu):
3892 """Computes the list of instances and their attributes.
3895 all_info = lu.cfg.GetAllInstancesInfo()
3897 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3899 instance_list = [all_info[name] for name in instance_names]
3900 nodes = frozenset([inst.primary_node for inst in instance_list])
3901 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3905 # Gather data as requested
3906 if query.IQ_LIVE in self.requested_data:
3908 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3910 result = node_data[name]
3912 # offline nodes will be in both lists
3913 assert result.fail_msg
3914 offline_nodes.append(name)
3916 bad_nodes.append(name)
3917 elif result.payload:
3918 live_data.update(result.payload)
3919 # else no instance is alive
3923 if query.IQ_DISKUSAGE in self.requested_data:
3924 disk_usage = dict((inst.name,
3925 _ComputeDiskSize(inst.disk_template,
3926 [{"size": disk.size}
3927 for disk in inst.disks]))
3928 for inst in instance_list)
3932 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3933 disk_usage, offline_nodes, bad_nodes,
3937 class LUQuery(NoHooksLU):
3938 """Query for resources/items of a certain kind.
3941 # pylint: disable-msg=W0142
3944 def CheckArguments(self):
3945 qcls = _GetQueryImplementation(self.op.what)
3946 names = qlang.ReadSimpleFilter("name", self.op.filter)
3948 self.impl = qcls(names, self.op.fields, False)
3950 def ExpandNames(self):
3951 self.impl.ExpandNames(self)
3953 def DeclareLocks(self, level):
3954 self.impl.DeclareLocks(self, level)
3956 def Exec(self, feedback_fn):
3957 return self.impl.NewStyleQuery(self)
3960 class LUQueryFields(NoHooksLU):
3961 """Query for resources/items of a certain kind.
3964 # pylint: disable-msg=W0142
3967 def CheckArguments(self):
3968 self.qcls = _GetQueryImplementation(self.op.what)
3970 def ExpandNames(self):
3971 self.needed_locks = {}
3973 def Exec(self, feedback_fn):
3974 return self.qcls.FieldsQuery(self.op.fields)
3977 class LUNodeModifyStorage(NoHooksLU):
3978 """Logical unit for modifying a storage volume on a node.
3983 def CheckArguments(self):
3984 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3986 storage_type = self.op.storage_type
3989 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3991 raise errors.OpPrereqError("Storage units of type '%s' can not be"
3992 " modified" % storage_type,
3995 diff = set(self.op.changes.keys()) - modifiable
3997 raise errors.OpPrereqError("The following fields can not be modified for"
3998 " storage units of type '%s': %r" %
3999 (storage_type, list(diff)),
4002 def ExpandNames(self):
4003 self.needed_locks = {
4004 locking.LEVEL_NODE: self.op.node_name,
4007 def Exec(self, feedback_fn):
4008 """Computes the list of nodes and their attributes.
4011 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4012 result = self.rpc.call_storage_modify(self.op.node_name,
4013 self.op.storage_type, st_args,
4014 self.op.name, self.op.changes)
4015 result.Raise("Failed to modify storage unit '%s' on %s" %
4016 (self.op.name, self.op.node_name))
4019 class LUNodeAdd(LogicalUnit):
4020 """Logical unit for adding node to the cluster.
4024 HTYPE = constants.HTYPE_NODE
4025 _NFLAGS = ["master_capable", "vm_capable"]
4027 def CheckArguments(self):
4028 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4029 # validate/normalize the node name
4030 self.hostname = netutils.GetHostname(name=self.op.node_name,
4031 family=self.primary_ip_family)
4032 self.op.node_name = self.hostname.name
4033 if self.op.readd and self.op.group:
4034 raise errors.OpPrereqError("Cannot pass a node group when a node is"
4035 " being readded", errors.ECODE_INVAL)
4037 def BuildHooksEnv(self):
4040 This will run on all nodes before, and on all nodes + the new node after.
4044 "OP_TARGET": self.op.node_name,
4045 "NODE_NAME": self.op.node_name,
4046 "NODE_PIP": self.op.primary_ip,
4047 "NODE_SIP": self.op.secondary_ip,
4048 "MASTER_CAPABLE": str(self.op.master_capable),
4049 "VM_CAPABLE": str(self.op.vm_capable),
4051 nodes_0 = self.cfg.GetNodeList()
4052 nodes_1 = nodes_0 + [self.op.node_name, ]
4053 return env, nodes_0, nodes_1
4055 def CheckPrereq(self):
4056 """Check prerequisites.
4059 - the new node is not already in the config
4061 - its parameters (single/dual homed) matches the cluster
4063 Any errors are signaled by raising errors.OpPrereqError.
4067 hostname = self.hostname
4068 node = hostname.name
4069 primary_ip = self.op.primary_ip = hostname.ip
4070 if self.op.secondary_ip is None:
4071 if self.primary_ip_family == netutils.IP6Address.family:
4072 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4073 " IPv4 address must be given as secondary",
4075 self.op.secondary_ip = primary_ip
4077 secondary_ip = self.op.secondary_ip
4078 if not netutils.IP4Address.IsValid(secondary_ip):
4079 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4080 " address" % secondary_ip, errors.ECODE_INVAL)
4082 node_list = cfg.GetNodeList()
4083 if not self.op.readd and node in node_list:
4084 raise errors.OpPrereqError("Node %s is already in the configuration" %
4085 node, errors.ECODE_EXISTS)
4086 elif self.op.readd and node not in node_list:
4087 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4090 self.changed_primary_ip = False
4092 for existing_node_name in node_list:
4093 existing_node = cfg.GetNodeInfo(existing_node_name)
4095 if self.op.readd and node == existing_node_name:
4096 if existing_node.secondary_ip != secondary_ip:
4097 raise errors.OpPrereqError("Readded node doesn't have the same IP"
4098 " address configuration as before",
4100 if existing_node.primary_ip != primary_ip:
4101 self.changed_primary_ip = True
4105 if (existing_node.primary_ip == primary_ip or
4106 existing_node.secondary_ip == primary_ip or
4107 existing_node.primary_ip == secondary_ip or
4108 existing_node.secondary_ip == secondary_ip):
4109 raise errors.OpPrereqError("New node ip address(es) conflict with"
4110 " existing node %s" % existing_node.name,
4111 errors.ECODE_NOTUNIQUE)
4113 # After this 'if' block, None is no longer a valid value for the
4114 # _capable op attributes
4116 old_node = self.cfg.GetNodeInfo(node)
4117 assert old_node is not None, "Can't retrieve locked node %s" % node
4118 for attr in self._NFLAGS:
4119 if getattr(self.op, attr) is None:
4120 setattr(self.op, attr, getattr(old_node, attr))
4122 for attr in self._NFLAGS:
4123 if getattr(self.op, attr) is None:
4124 setattr(self.op, attr, True)
4126 if self.op.readd and not self.op.vm_capable:
4127 pri, sec = cfg.GetNodeInstances(node)
4129 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4130 " flag set to false, but it already holds"
4131 " instances" % node,
4134 # check that the type of the node (single versus dual homed) is the
4135 # same as for the master
4136 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4137 master_singlehomed = myself.secondary_ip == myself.primary_ip
4138 newbie_singlehomed = secondary_ip == primary_ip
4139 if master_singlehomed != newbie_singlehomed:
4140 if master_singlehomed:
4141 raise errors.OpPrereqError("The master has no secondary ip but the"
4142 " new node has one",
4145 raise errors.OpPrereqError("The master has a secondary ip but the"
4146 " new node doesn't have one",
4149 # checks reachability
4150 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4151 raise errors.OpPrereqError("Node not reachable by ping",
4152 errors.ECODE_ENVIRON)
4154 if not newbie_singlehomed:
4155 # check reachability from my secondary ip to newbie's secondary ip
4156 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4157 source=myself.secondary_ip):
4158 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4159 " based ping to node daemon port",
4160 errors.ECODE_ENVIRON)
4167 if self.op.master_capable:
4168 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4170 self.master_candidate = False
4173 self.new_node = old_node
4175 node_group = cfg.LookupNodeGroup(self.op.group)
4176 self.new_node = objects.Node(name=node,
4177 primary_ip=primary_ip,
4178 secondary_ip=secondary_ip,
4179 master_candidate=self.master_candidate,
4180 offline=False, drained=False,
4183 if self.op.ndparams:
4184 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4186 def Exec(self, feedback_fn):
4187 """Adds the new node to the cluster.
4190 new_node = self.new_node
4191 node = new_node.name
4193 # We adding a new node so we assume it's powered
4194 new_node.powered = True
4196 # for re-adds, reset the offline/drained/master-candidate flags;
4197 # we need to reset here, otherwise offline would prevent RPC calls
4198 # later in the procedure; this also means that if the re-add
4199 # fails, we are left with a non-offlined, broken node
4201 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4202 self.LogInfo("Readding a node, the offline/drained flags were reset")
4203 # if we demote the node, we do cleanup later in the procedure
4204 new_node.master_candidate = self.master_candidate
4205 if self.changed_primary_ip:
4206 new_node.primary_ip = self.op.primary_ip
4208 # copy the master/vm_capable flags
4209 for attr in self._NFLAGS:
4210 setattr(new_node, attr, getattr(self.op, attr))
4212 # notify the user about any possible mc promotion
4213 if new_node.master_candidate:
4214 self.LogInfo("Node will be a master candidate")
4216 if self.op.ndparams:
4217 new_node.ndparams = self.op.ndparams
4219 new_node.ndparams = {}
4221 # check connectivity
4222 result = self.rpc.call_version([node])[node]
4223 result.Raise("Can't get version information from node %s" % node)
4224 if constants.PROTOCOL_VERSION == result.payload:
4225 logging.info("Communication to node %s fine, sw version %s match",
4226 node, result.payload)
4228 raise errors.OpExecError("Version mismatch master version %s,"
4229 " node version %s" %
4230 (constants.PROTOCOL_VERSION, result.payload))
4232 # Add node to our /etc/hosts, and add key to known_hosts
4233 if self.cfg.GetClusterInfo().modify_etc_hosts:
4234 master_node = self.cfg.GetMasterNode()
4235 result = self.rpc.call_etc_hosts_modify(master_node,
4236 constants.ETC_HOSTS_ADD,
4239 result.Raise("Can't update hosts file with new host data")
4241 if new_node.secondary_ip != new_node.primary_ip:
4242 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4245 node_verify_list = [self.cfg.GetMasterNode()]
4246 node_verify_param = {
4247 constants.NV_NODELIST: [node],
4248 # TODO: do a node-net-test as well?
4251 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4252 self.cfg.GetClusterName())
4253 for verifier in node_verify_list:
4254 result[verifier].Raise("Cannot communicate with node %s" % verifier)
4255 nl_payload = result[verifier].payload[constants.NV_NODELIST]
4257 for failed in nl_payload:
4258 feedback_fn("ssh/hostname verification failed"
4259 " (checking from %s): %s" %
4260 (verifier, nl_payload[failed]))
4261 raise errors.OpExecError("ssh/hostname verification failed.")
4264 _RedistributeAncillaryFiles(self)
4265 self.context.ReaddNode(new_node)
4266 # make sure we redistribute the config
4267 self.cfg.Update(new_node, feedback_fn)
4268 # and make sure the new node will not have old files around
4269 if not new_node.master_candidate:
4270 result = self.rpc.call_node_demote_from_mc(new_node.name)
4271 msg = result.fail_msg
4273 self.LogWarning("Node failed to demote itself from master"
4274 " candidate status: %s" % msg)
4276 _RedistributeAncillaryFiles(self, additional_nodes=[node],
4277 additional_vm=self.op.vm_capable)
4278 self.context.AddNode(new_node, self.proc.GetECId())
4281 class LUNodeSetParams(LogicalUnit):
4282 """Modifies the parameters of a node.
4284 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4285 to the node role (as _ROLE_*)
4286 @cvar _R2F: a dictionary from node role to tuples of flags
4287 @cvar _FLAGS: a list of attribute names corresponding to the flags
4290 HPATH = "node-modify"
4291 HTYPE = constants.HTYPE_NODE
4293 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4295 (True, False, False): _ROLE_CANDIDATE,
4296 (False, True, False): _ROLE_DRAINED,
4297 (False, False, True): _ROLE_OFFLINE,
4298 (False, False, False): _ROLE_REGULAR,
4300 _R2F = dict((v, k) for k, v in _F2R.items())
4301 _FLAGS = ["master_candidate", "drained", "offline"]
4303 def CheckArguments(self):
4304 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4305 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4306 self.op.master_capable, self.op.vm_capable,
4307 self.op.secondary_ip, self.op.ndparams]
4308 if all_mods.count(None) == len(all_mods):
4309 raise errors.OpPrereqError("Please pass at least one modification",
4311 if all_mods.count(True) > 1:
4312 raise errors.OpPrereqError("Can't set the node into more than one"
4313 " state at the same time",
4316 # Boolean value that tells us whether we might be demoting from MC
4317 self.might_demote = (self.op.master_candidate == False or
4318 self.op.offline == True or
4319 self.op.drained == True or
4320 self.op.master_capable == False)
4322 if self.op.secondary_ip:
4323 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4324 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4325 " address" % self.op.secondary_ip,
4328 self.lock_all = self.op.auto_promote and self.might_demote
4329 self.lock_instances = self.op.secondary_ip is not None
4331 def ExpandNames(self):
4333 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4335 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4337 if self.lock_instances:
4338 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4340 def DeclareLocks(self, level):
4341 # If we have locked all instances, before waiting to lock nodes, release
4342 # all the ones living on nodes unrelated to the current operation.
4343 if level == locking.LEVEL_NODE and self.lock_instances:
4344 instances_release = []
4346 self.affected_instances = []
4347 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4348 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4349 instance = self.context.cfg.GetInstanceInfo(instance_name)
4350 i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4351 if i_mirrored and self.op.node_name in instance.all_nodes:
4352 instances_keep.append(instance_name)
4353 self.affected_instances.append(instance)
4355 instances_release.append(instance_name)
4356 if instances_release:
4357 self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4358 self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4360 def BuildHooksEnv(self):
4363 This runs on the master node.
4367 "OP_TARGET": self.op.node_name,
4368 "MASTER_CANDIDATE": str(self.op.master_candidate),
4369 "OFFLINE": str(self.op.offline),
4370 "DRAINED": str(self.op.drained),
4371 "MASTER_CAPABLE": str(self.op.master_capable),
4372 "VM_CAPABLE": str(self.op.vm_capable),
4374 nl = [self.cfg.GetMasterNode(),
4378 def CheckPrereq(self):
4379 """Check prerequisites.
4381 This only checks the instance list against the existing names.
4384 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4386 if (self.op.master_candidate is not None or
4387 self.op.drained is not None or
4388 self.op.offline is not None):
4389 # we can't change the master's node flags
4390 if self.op.node_name == self.cfg.GetMasterNode():
4391 raise errors.OpPrereqError("The master role can be changed"
4392 " only via master-failover",
4395 if self.op.master_candidate and not node.master_capable:
4396 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4397 " it a master candidate" % node.name,
4400 if self.op.vm_capable == False:
4401 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4403 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4404 " the vm_capable flag" % node.name,
4407 if node.master_candidate and self.might_demote and not self.lock_all:
4408 assert not self.op.auto_promote, "auto_promote set but lock_all not"
4409 # check if after removing the current node, we're missing master
4411 (mc_remaining, mc_should, _) = \
4412 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4413 if mc_remaining < mc_should:
4414 raise errors.OpPrereqError("Not enough master candidates, please"
4415 " pass auto promote option to allow"
4416 " promotion", errors.ECODE_STATE)
4418 self.old_flags = old_flags = (node.master_candidate,
4419 node.drained, node.offline)
4420 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4421 self.old_role = old_role = self._F2R[old_flags]
4423 # Check for ineffective changes
4424 for attr in self._FLAGS:
4425 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4426 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4427 setattr(self.op, attr, None)
4429 # Past this point, any flag change to False means a transition
4430 # away from the respective state, as only real changes are kept
4432 # TODO: We might query the real power state if it supports OOB
4433 if _SupportsOob(self.cfg, node):
4434 if self.op.offline is False and not (node.powered or
4435 self.op.powered == True):
4436 raise errors.OpPrereqError(("Please power on node %s first before you"
4437 " can reset offline state") %
4439 elif self.op.powered is not None:
4440 raise errors.OpPrereqError(("Unable to change powered state for node %s"
4441 " which does not support out-of-band"
4442 " handling") % self.op.node_name)
4444 # If we're being deofflined/drained, we'll MC ourself if needed
4445 if (self.op.drained == False or self.op.offline == False or
4446 (self.op.master_capable and not node.master_capable)):
4447 if _DecideSelfPromotion(self):
4448 self.op.master_candidate = True
4449 self.LogInfo("Auto-promoting node to master candidate")
4451 # If we're no longer master capable, we'll demote ourselves from MC
4452 if self.op.master_capable == False and node.master_candidate:
4453 self.LogInfo("Demoting from master candidate")
4454 self.op.master_candidate = False
4457 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4458 if self.op.master_candidate:
4459 new_role = self._ROLE_CANDIDATE
4460 elif self.op.drained:
4461 new_role = self._ROLE_DRAINED
4462 elif self.op.offline:
4463 new_role = self._ROLE_OFFLINE
4464 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4465 # False is still in new flags, which means we're un-setting (the
4467 new_role = self._ROLE_REGULAR
4468 else: # no new flags, nothing, keep old role
4471 self.new_role = new_role
4473 if old_role == self._ROLE_OFFLINE and new_role != old_role:
4474 # Trying to transition out of offline status
4475 result = self.rpc.call_version([node.name])[node.name]
4477 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4478 " to report its version: %s" %
4479 (node.name, result.fail_msg),
4482 self.LogWarning("Transitioning node from offline to online state"
4483 " without using re-add. Please make sure the node"
4486 if self.op.secondary_ip:
4487 # Ok even without locking, because this can't be changed by any LU
4488 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4489 master_singlehomed = master.secondary_ip == master.primary_ip
4490 if master_singlehomed and self.op.secondary_ip:
4491 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4492 " homed cluster", errors.ECODE_INVAL)
4495 if self.affected_instances:
4496 raise errors.OpPrereqError("Cannot change secondary ip: offline"
4497 " node has instances (%s) configured"
4498 " to use it" % self.affected_instances)
4500 # On online nodes, check that no instances are running, and that
4501 # the node has the new ip and we can reach it.
4502 for instance in self.affected_instances:
4503 _CheckInstanceDown(self, instance, "cannot change secondary ip")
4505 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4506 if master.name != node.name:
4507 # check reachability from master secondary ip to new secondary ip
4508 if not netutils.TcpPing(self.op.secondary_ip,
4509 constants.DEFAULT_NODED_PORT,
4510 source=master.secondary_ip):
4511 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4512 " based ping to node daemon port",
4513 errors.ECODE_ENVIRON)
4515 if self.op.ndparams:
4516 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4517 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4518 self.new_ndparams = new_ndparams
4520 def Exec(self, feedback_fn):
4525 old_role = self.old_role
4526 new_role = self.new_role
4530 if self.op.ndparams:
4531 node.ndparams = self.new_ndparams
4533 if self.op.powered is not None:
4534 node.powered = self.op.powered
4536 for attr in ["master_capable", "vm_capable"]:
4537 val = getattr(self.op, attr)
4539 setattr(node, attr, val)
4540 result.append((attr, str(val)))
4542 if new_role != old_role:
4543 # Tell the node to demote itself, if no longer MC and not offline
4544 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4545 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4547 self.LogWarning("Node failed to demote itself: %s", msg)
4549 new_flags = self._R2F[new_role]
4550 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4552 result.append((desc, str(nf)))
4553 (node.master_candidate, node.drained, node.offline) = new_flags
4555 # we locked all nodes, we adjust the CP before updating this node
4557 _AdjustCandidatePool(self, [node.name])
4559 if self.op.secondary_ip:
4560 node.secondary_ip = self.op.secondary_ip
4561 result.append(("secondary_ip", self.op.secondary_ip))
4563 # this will trigger configuration file update, if needed
4564 self.cfg.Update(node, feedback_fn)
4566 # this will trigger job queue propagation or cleanup if the mc
4568 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4569 self.context.ReaddNode(node)
4574 class LUNodePowercycle(NoHooksLU):
4575 """Powercycles a node.
4580 def CheckArguments(self):
4581 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4582 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4583 raise errors.OpPrereqError("The node is the master and the force"
4584 " parameter was not set",
4587 def ExpandNames(self):
4588 """Locking for PowercycleNode.
4590 This is a last-resort option and shouldn't block on other
4591 jobs. Therefore, we grab no locks.
4594 self.needed_locks = {}
4596 def Exec(self, feedback_fn):
4600 result = self.rpc.call_node_powercycle(self.op.node_name,
4601 self.cfg.GetHypervisorType())
4602 result.Raise("Failed to schedule the reboot")
4603 return result.payload
4606 class LUClusterQuery(NoHooksLU):
4607 """Query cluster configuration.
4612 def ExpandNames(self):
4613 self.needed_locks = {}
4615 def Exec(self, feedback_fn):
4616 """Return cluster config.
4619 cluster = self.cfg.GetClusterInfo()
4622 # Filter just for enabled hypervisors
4623 for os_name, hv_dict in cluster.os_hvp.items():
4624 os_hvp[os_name] = {}
4625 for hv_name, hv_params in hv_dict.items():
4626 if hv_name in cluster.enabled_hypervisors:
4627 os_hvp[os_name][hv_name] = hv_params
4629 # Convert ip_family to ip_version
4630 primary_ip_version = constants.IP4_VERSION
4631 if cluster.primary_ip_family == netutils.IP6Address.family:
4632 primary_ip_version = constants.IP6_VERSION
4635 "software_version": constants.RELEASE_VERSION,
4636 "protocol_version": constants.PROTOCOL_VERSION,
4637 "config_version": constants.CONFIG_VERSION,
4638 "os_api_version": max(constants.OS_API_VERSIONS),
4639 "export_version": constants.EXPORT_VERSION,
4640 "architecture": (platform.architecture()[0], platform.machine()),
4641 "name": cluster.cluster_name,
4642 "master": cluster.master_node,
4643 "default_hypervisor": cluster.enabled_hypervisors[0],
4644 "enabled_hypervisors": cluster.enabled_hypervisors,
4645 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4646 for hypervisor_name in cluster.enabled_hypervisors]),
4648 "beparams": cluster.beparams,
4649 "osparams": cluster.osparams,
4650 "nicparams": cluster.nicparams,
4651 "ndparams": cluster.ndparams,
4652 "candidate_pool_size": cluster.candidate_pool_size,
4653 "master_netdev": cluster.master_netdev,
4654 "volume_group_name": cluster.volume_group_name,
4655 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4656 "file_storage_dir": cluster.file_storage_dir,
4657 "maintain_node_health": cluster.maintain_node_health,
4658 "ctime": cluster.ctime,
4659 "mtime": cluster.mtime,
4660 "uuid": cluster.uuid,
4661 "tags": list(cluster.GetTags()),
4662 "uid_pool": cluster.uid_pool,
4663 "default_iallocator": cluster.default_iallocator,
4664 "reserved_lvs": cluster.reserved_lvs,
4665 "primary_ip_version": primary_ip_version,
4666 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4667 "hidden_os": cluster.hidden_os,
4668 "blacklisted_os": cluster.blacklisted_os,
4674 class LUClusterConfigQuery(NoHooksLU):
4675 """Return configuration values.
4679 _FIELDS_DYNAMIC = utils.FieldSet()
4680 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4681 "watcher_pause", "volume_group_name")
4683 def CheckArguments(self):
4684 _CheckOutputFields(static=self._FIELDS_STATIC,
4685 dynamic=self._FIELDS_DYNAMIC,
4686 selected=self.op.output_fields)
4688 def ExpandNames(self):
4689 self.needed_locks = {}
4691 def Exec(self, feedback_fn):
4692 """Dump a representation of the cluster config to the standard output.
4696 for field in self.op.output_fields:
4697 if field == "cluster_name":
4698 entry = self.cfg.GetClusterName()
4699 elif field == "master_node":
4700 entry = self.cfg.GetMasterNode()
4701 elif field == "drain_flag":
4702 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4703 elif field == "watcher_pause":
4704 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4705 elif field == "volume_group_name":
4706 entry = self.cfg.GetVGName()
4708 raise errors.ParameterError(field)
4709 values.append(entry)
4713 class LUInstanceActivateDisks(NoHooksLU):
4714 """Bring up an instance's disks.
4719 def ExpandNames(self):
4720 self._ExpandAndLockInstance()
4721 self.needed_locks[locking.LEVEL_NODE] = []
4722 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4724 def DeclareLocks(self, level):
4725 if level == locking.LEVEL_NODE:
4726 self._LockInstancesNodes()
4728 def CheckPrereq(self):
4729 """Check prerequisites.
4731 This checks that the instance is in the cluster.
4734 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4735 assert self.instance is not None, \
4736 "Cannot retrieve locked instance %s" % self.op.instance_name
4737 _CheckNodeOnline(self, self.instance.primary_node)
4739 def Exec(self, feedback_fn):
4740 """Activate the disks.
4743 disks_ok, disks_info = \
4744 _AssembleInstanceDisks(self, self.instance,
4745 ignore_size=self.op.ignore_size)
4747 raise errors.OpExecError("Cannot activate block devices")
4752 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4754 """Prepare the block devices for an instance.
4756 This sets up the block devices on all nodes.
4758 @type lu: L{LogicalUnit}
4759 @param lu: the logical unit on whose behalf we execute
4760 @type instance: L{objects.Instance}
4761 @param instance: the instance for whose disks we assemble
4762 @type disks: list of L{objects.Disk} or None
4763 @param disks: which disks to assemble (or all, if None)
4764 @type ignore_secondaries: boolean
4765 @param ignore_secondaries: if true, errors on secondary nodes
4766 won't result in an error return from the function
4767 @type ignore_size: boolean
4768 @param ignore_size: if true, the current known size of the disk
4769 will not be used during the disk activation, useful for cases
4770 when the size is wrong
4771 @return: False if the operation failed, otherwise a list of
4772 (host, instance_visible_name, node_visible_name)
4773 with the mapping from node devices to instance devices
4778 iname = instance.name
4779 disks = _ExpandCheckDisks(instance, disks)
4781 # With the two passes mechanism we try to reduce the window of
4782 # opportunity for the race condition of switching DRBD to primary
4783 # before handshaking occured, but we do not eliminate it
4785 # The proper fix would be to wait (with some limits) until the
4786 # connection has been made and drbd transitions from WFConnection
4787 # into any other network-connected state (Connected, SyncTarget,
4790 # 1st pass, assemble on all nodes in secondary mode
4791 for inst_disk in disks:
4792 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4794 node_disk = node_disk.Copy()
4795 node_disk.UnsetSize()
4796 lu.cfg.SetDiskID(node_disk, node)
4797 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
4798 msg = result.fail_msg
4800 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4801 " (is_primary=False, pass=1): %s",
4802 inst_disk.iv_name, node, msg)
4803 if not ignore_secondaries:
4806 # FIXME: race condition on drbd migration to primary
4808 # 2nd pass, do only the primary node
4809 for inst_disk in disks:
4812 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4813 if node != instance.primary_node:
4816 node_disk = node_disk.Copy()
4817 node_disk.UnsetSize()
4818 lu.cfg.SetDiskID(node_disk, node)
4819 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
4820 msg = result.fail_msg
4822 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4823 " (is_primary=True, pass=2): %s",
4824 inst_disk.iv_name, node, msg)
4827 dev_path = result.payload
4829 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4831 # leave the disks configured for the primary node
4832 # this is a workaround that would be fixed better by
4833 # improving the logical/physical id handling
4835 lu.cfg.SetDiskID(disk, instance.primary_node)
4837 return disks_ok, device_info
4840 def _StartInstanceDisks(lu, instance, force):
4841 """Start the disks of an instance.
4844 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4845 ignore_secondaries=force)
4847 _ShutdownInstanceDisks(lu, instance)
4848 if force is not None and not force:
4849 lu.proc.LogWarning("", hint="If the message above refers to a"
4851 " you can retry the operation using '--force'.")
4852 raise errors.OpExecError("Disk consistency error")
4855 class LUInstanceDeactivateDisks(NoHooksLU):
4856 """Shutdown an instance's disks.
4861 def ExpandNames(self):
4862 self._ExpandAndLockInstance()
4863 self.needed_locks[locking.LEVEL_NODE] = []
4864 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4866 def DeclareLocks(self, level):
4867 if level == locking.LEVEL_NODE:
4868 self._LockInstancesNodes()
4870 def CheckPrereq(self):
4871 """Check prerequisites.
4873 This checks that the instance is in the cluster.
4876 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4877 assert self.instance is not None, \
4878 "Cannot retrieve locked instance %s" % self.op.instance_name
4880 def Exec(self, feedback_fn):
4881 """Deactivate the disks
4884 instance = self.instance
4886 _ShutdownInstanceDisks(self, instance)
4888 _SafeShutdownInstanceDisks(self, instance)
4891 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4892 """Shutdown block devices of an instance.
4894 This function checks if an instance is running, before calling
4895 _ShutdownInstanceDisks.
4898 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4899 _ShutdownInstanceDisks(lu, instance, disks=disks)
4902 def _ExpandCheckDisks(instance, disks):
4903 """Return the instance disks selected by the disks list
4905 @type disks: list of L{objects.Disk} or None
4906 @param disks: selected disks
4907 @rtype: list of L{objects.Disk}
4908 @return: selected instance disks to act on
4912 return instance.disks
4914 if not set(disks).issubset(instance.disks):
4915 raise errors.ProgrammerError("Can only act on disks belonging to the"
4920 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4921 """Shutdown block devices of an instance.
4923 This does the shutdown on all nodes of the instance.
4925 If the ignore_primary is false, errors on the primary node are
4930 disks = _ExpandCheckDisks(instance, disks)
4933 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4934 lu.cfg.SetDiskID(top_disk, node)
4935 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4936 msg = result.fail_msg
4938 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4939 disk.iv_name, node, msg)
4940 if ((node == instance.primary_node and not ignore_primary) or
4941 (node != instance.primary_node and not result.offline)):
4946 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4947 """Checks if a node has enough free memory.
4949 This function check if a given node has the needed amount of free
4950 memory. In case the node has less memory or we cannot get the
4951 information from the node, this function raise an OpPrereqError
4954 @type lu: C{LogicalUnit}
4955 @param lu: a logical unit from which we get configuration data
4957 @param node: the node to check
4958 @type reason: C{str}
4959 @param reason: string to use in the error message
4960 @type requested: C{int}
4961 @param requested: the amount of memory in MiB to check for
4962 @type hypervisor_name: C{str}
4963 @param hypervisor_name: the hypervisor to ask for memory stats
4964 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4965 we cannot check the node
4968 nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
4969 nodeinfo[node].Raise("Can't get data from node %s" % node,
4970 prereq=True, ecode=errors.ECODE_ENVIRON)
4971 free_mem = nodeinfo[node].payload.get('memory_free', None)
4972 if not isinstance(free_mem, int):
4973 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4974 " was '%s'" % (node, free_mem),
4975 errors.ECODE_ENVIRON)
4976 if requested > free_mem:
4977 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4978 " needed %s MiB, available %s MiB" %
4979 (node, reason, requested, free_mem),
4983 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
4984 """Checks if nodes have enough free disk space in the all VGs.
4986 This function check if all given nodes have the needed amount of
4987 free disk. In case any node has less disk or we cannot get the
4988 information from the node, this function raise an OpPrereqError
4991 @type lu: C{LogicalUnit}
4992 @param lu: a logical unit from which we get configuration data
4993 @type nodenames: C{list}
4994 @param nodenames: the list of node names to check
4995 @type req_sizes: C{dict}
4996 @param req_sizes: the hash of vg and corresponding amount of disk in
4998 @raise errors.OpPrereqError: if the node doesn't have enough disk,
4999 or we cannot check the node
5002 for vg, req_size in req_sizes.items():
5003 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5006 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5007 """Checks if nodes have enough free disk space in the specified VG.
5009 This function check if all given nodes have the needed amount of
5010 free disk. In case any node has less disk or we cannot get the
5011 information from the node, this function raise an OpPrereqError
5014 @type lu: C{LogicalUnit}
5015 @param lu: a logical unit from which we get configuration data
5016 @type nodenames: C{list}
5017 @param nodenames: the list of node names to check
5019 @param vg: the volume group to check
5020 @type requested: C{int}
5021 @param requested: the amount of disk in MiB to check for
5022 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5023 or we cannot check the node
5026 nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5027 for node in nodenames:
5028 info = nodeinfo[node]
5029 info.Raise("Cannot get current information from node %s" % node,
5030 prereq=True, ecode=errors.ECODE_ENVIRON)
5031 vg_free = info.payload.get("vg_free", None)
5032 if not isinstance(vg_free, int):
5033 raise errors.OpPrereqError("Can't compute free disk space on node"
5034 " %s for vg %s, result was '%s'" %
5035 (node, vg, vg_free), errors.ECODE_ENVIRON)
5036 if requested > vg_free:
5037 raise errors.OpPrereqError("Not enough disk space on target node %s"
5038 " vg %s: required %d MiB, available %d MiB" %
5039 (node, vg, requested, vg_free),
5043 class LUInstanceStartup(LogicalUnit):
5044 """Starts an instance.
5047 HPATH = "instance-start"
5048 HTYPE = constants.HTYPE_INSTANCE
5051 def CheckArguments(self):
5053 if self.op.beparams:
5054 # fill the beparams dict
5055 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5057 def ExpandNames(self):
5058 self._ExpandAndLockInstance()
5060 def BuildHooksEnv(self):
5063 This runs on master, primary and secondary nodes of the instance.
5067 "FORCE": self.op.force,
5069 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5070 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5073 def CheckPrereq(self):
5074 """Check prerequisites.
5076 This checks that the instance is in the cluster.
5079 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5080 assert self.instance is not None, \
5081 "Cannot retrieve locked instance %s" % self.op.instance_name
5084 if self.op.hvparams:
5085 # check hypervisor parameter syntax (locally)
5086 cluster = self.cfg.GetClusterInfo()
5087 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5088 filled_hvp = cluster.FillHV(instance)
5089 filled_hvp.update(self.op.hvparams)
5090 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5091 hv_type.CheckParameterSyntax(filled_hvp)
5092 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5094 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5096 if self.primary_offline and self.op.ignore_offline_nodes:
5097 self.proc.LogWarning("Ignoring offline primary node")
5099 if self.op.hvparams or self.op.beparams:
5100 self.proc.LogWarning("Overridden parameters are ignored")
5102 _CheckNodeOnline(self, instance.primary_node)
5104 bep = self.cfg.GetClusterInfo().FillBE(instance)
5106 # check bridges existence
5107 _CheckInstanceBridgesExist(self, instance)
5109 remote_info = self.rpc.call_instance_info(instance.primary_node,
5111 instance.hypervisor)
5112 remote_info.Raise("Error checking node %s" % instance.primary_node,
5113 prereq=True, ecode=errors.ECODE_ENVIRON)
5114 if not remote_info.payload: # not running already
5115 _CheckNodeFreeMemory(self, instance.primary_node,
5116 "starting instance %s" % instance.name,
5117 bep[constants.BE_MEMORY], instance.hypervisor)
5119 def Exec(self, feedback_fn):
5120 """Start the instance.
5123 instance = self.instance
5124 force = self.op.force
5126 self.cfg.MarkInstanceUp(instance.name)
5128 if self.primary_offline:
5129 assert self.op.ignore_offline_nodes
5130 self.proc.LogInfo("Primary node offline, marked instance as started")
5132 node_current = instance.primary_node
5134 _StartInstanceDisks(self, instance, force)
5136 result = self.rpc.call_instance_start(node_current, instance,
5137 self.op.hvparams, self.op.beparams)
5138 msg = result.fail_msg
5140 _ShutdownInstanceDisks(self, instance)
5141 raise errors.OpExecError("Could not start instance: %s" % msg)
5144 class LUInstanceReboot(LogicalUnit):
5145 """Reboot an instance.
5148 HPATH = "instance-reboot"
5149 HTYPE = constants.HTYPE_INSTANCE
5152 def ExpandNames(self):
5153 self._ExpandAndLockInstance()
5155 def BuildHooksEnv(self):
5158 This runs on master, primary and secondary nodes of the instance.
5162 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5163 "REBOOT_TYPE": self.op.reboot_type,
5164 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5166 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5167 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5170 def CheckPrereq(self):
5171 """Check prerequisites.
5173 This checks that the instance is in the cluster.
5176 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5177 assert self.instance is not None, \
5178 "Cannot retrieve locked instance %s" % self.op.instance_name
5180 _CheckNodeOnline(self, instance.primary_node)
5182 # check bridges existence
5183 _CheckInstanceBridgesExist(self, instance)
5185 def Exec(self, feedback_fn):
5186 """Reboot the instance.
5189 instance = self.instance
5190 ignore_secondaries = self.op.ignore_secondaries
5191 reboot_type = self.op.reboot_type
5193 node_current = instance.primary_node
5195 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5196 constants.INSTANCE_REBOOT_HARD]:
5197 for disk in instance.disks:
5198 self.cfg.SetDiskID(disk, node_current)
5199 result = self.rpc.call_instance_reboot(node_current, instance,
5201 self.op.shutdown_timeout)
5202 result.Raise("Could not reboot instance")
5204 result = self.rpc.call_instance_shutdown(node_current, instance,
5205 self.op.shutdown_timeout)
5206 result.Raise("Could not shutdown instance for full reboot")
5207 _ShutdownInstanceDisks(self, instance)
5208 _StartInstanceDisks(self, instance, ignore_secondaries)
5209 result = self.rpc.call_instance_start(node_current, instance, None, None)
5210 msg = result.fail_msg
5212 _ShutdownInstanceDisks(self, instance)
5213 raise errors.OpExecError("Could not start instance for"
5214 " full reboot: %s" % msg)
5216 self.cfg.MarkInstanceUp(instance.name)
5219 class LUInstanceShutdown(LogicalUnit):
5220 """Shutdown an instance.
5223 HPATH = "instance-stop"
5224 HTYPE = constants.HTYPE_INSTANCE
5227 def ExpandNames(self):
5228 self._ExpandAndLockInstance()
5230 def BuildHooksEnv(self):
5233 This runs on master, primary and secondary nodes of the instance.
5236 env = _BuildInstanceHookEnvByObject(self, self.instance)
5237 env["TIMEOUT"] = self.op.timeout
5238 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5241 def CheckPrereq(self):
5242 """Check prerequisites.
5244 This checks that the instance is in the cluster.
5247 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5248 assert self.instance is not None, \
5249 "Cannot retrieve locked instance %s" % self.op.instance_name
5251 self.primary_offline = \
5252 self.cfg.GetNodeInfo(self.instance.primary_node).offline
5254 if self.primary_offline and self.op.ignore_offline_nodes:
5255 self.proc.LogWarning("Ignoring offline primary node")
5257 _CheckNodeOnline(self, self.instance.primary_node)
5259 def Exec(self, feedback_fn):
5260 """Shutdown the instance.
5263 instance = self.instance
5264 node_current = instance.primary_node
5265 timeout = self.op.timeout
5267 self.cfg.MarkInstanceDown(instance.name)
5269 if self.primary_offline:
5270 assert self.op.ignore_offline_nodes
5271 self.proc.LogInfo("Primary node offline, marked instance as stopped")
5273 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5274 msg = result.fail_msg
5276 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5278 _ShutdownInstanceDisks(self, instance)
5281 class LUInstanceReinstall(LogicalUnit):
5282 """Reinstall an instance.
5285 HPATH = "instance-reinstall"
5286 HTYPE = constants.HTYPE_INSTANCE
5289 def ExpandNames(self):
5290 self._ExpandAndLockInstance()
5292 def BuildHooksEnv(self):
5295 This runs on master, primary and secondary nodes of the instance.
5298 env = _BuildInstanceHookEnvByObject(self, self.instance)
5299 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5302 def CheckPrereq(self):
5303 """Check prerequisites.
5305 This checks that the instance is in the cluster and is not running.
5308 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5309 assert instance is not None, \
5310 "Cannot retrieve locked instance %s" % self.op.instance_name
5311 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5312 " offline, cannot reinstall")
5313 for node in instance.secondary_nodes:
5314 _CheckNodeOnline(self, node, "Instance secondary node offline,"
5315 " cannot reinstall")
5317 if instance.disk_template == constants.DT_DISKLESS:
5318 raise errors.OpPrereqError("Instance '%s' has no disks" %
5319 self.op.instance_name,
5321 _CheckInstanceDown(self, instance, "cannot reinstall")
5323 if self.op.os_type is not None:
5325 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5326 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5327 instance_os = self.op.os_type
5329 instance_os = instance.os
5331 nodelist = list(instance.all_nodes)
5333 if self.op.osparams:
5334 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5335 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5336 self.os_inst = i_osdict # the new dict (without defaults)
5340 self.instance = instance
5342 def Exec(self, feedback_fn):
5343 """Reinstall the instance.
5346 inst = self.instance
5348 if self.op.os_type is not None:
5349 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5350 inst.os = self.op.os_type
5351 # Write to configuration
5352 self.cfg.Update(inst, feedback_fn)
5354 _StartInstanceDisks(self, inst, None)
5356 feedback_fn("Running the instance OS create scripts...")
5357 # FIXME: pass debug option from opcode to backend
5358 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5359 self.op.debug_level,
5360 osparams=self.os_inst)
5361 result.Raise("Could not install OS for instance %s on node %s" %
5362 (inst.name, inst.primary_node))
5364 _ShutdownInstanceDisks(self, inst)
5367 class LUInstanceRecreateDisks(LogicalUnit):
5368 """Recreate an instance's missing disks.
5371 HPATH = "instance-recreate-disks"
5372 HTYPE = constants.HTYPE_INSTANCE
5375 def ExpandNames(self):
5376 self._ExpandAndLockInstance()
5378 def BuildHooksEnv(self):
5381 This runs on master, primary and secondary nodes of the instance.
5384 env = _BuildInstanceHookEnvByObject(self, self.instance)
5385 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5388 def CheckPrereq(self):
5389 """Check prerequisites.
5391 This checks that the instance is in the cluster and is not running.
5394 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5395 assert instance is not None, \
5396 "Cannot retrieve locked instance %s" % self.op.instance_name
5397 _CheckNodeOnline(self, instance.primary_node)
5399 if instance.disk_template == constants.DT_DISKLESS:
5400 raise errors.OpPrereqError("Instance '%s' has no disks" %
5401 self.op.instance_name, errors.ECODE_INVAL)
5402 _CheckInstanceDown(self, instance, "cannot recreate disks")
5404 if not self.op.disks:
5405 self.op.disks = range(len(instance.disks))
5407 for idx in self.op.disks:
5408 if idx >= len(instance.disks):
5409 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5412 self.instance = instance
5414 def Exec(self, feedback_fn):
5415 """Recreate the disks.
5419 for idx, _ in enumerate(self.instance.disks):
5420 if idx not in self.op.disks: # disk idx has not been passed in
5424 _CreateDisks(self, self.instance, to_skip=to_skip)
5427 class LUInstanceRename(LogicalUnit):
5428 """Rename an instance.
5431 HPATH = "instance-rename"
5432 HTYPE = constants.HTYPE_INSTANCE
5434 def CheckArguments(self):
5438 if self.op.ip_check and not self.op.name_check:
5439 # TODO: make the ip check more flexible and not depend on the name check
5440 raise errors.OpPrereqError("Cannot do ip check without a name check",
5443 def BuildHooksEnv(self):
5446 This runs on master, primary and secondary nodes of the instance.
5449 env = _BuildInstanceHookEnvByObject(self, self.instance)
5450 env["INSTANCE_NEW_NAME"] = self.op.new_name
5451 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5454 def CheckPrereq(self):
5455 """Check prerequisites.
5457 This checks that the instance is in the cluster and is not running.
5460 self.op.instance_name = _ExpandInstanceName(self.cfg,
5461 self.op.instance_name)
5462 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5463 assert instance is not None
5464 _CheckNodeOnline(self, instance.primary_node)
5465 _CheckInstanceDown(self, instance, "cannot rename")
5466 self.instance = instance
5468 new_name = self.op.new_name
5469 if self.op.name_check:
5470 hostname = netutils.GetHostname(name=new_name)
5471 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5473 new_name = self.op.new_name = hostname.name
5474 if (self.op.ip_check and
5475 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5476 raise errors.OpPrereqError("IP %s of instance %s already in use" %
5477 (hostname.ip, new_name),
5478 errors.ECODE_NOTUNIQUE)
5480 instance_list = self.cfg.GetInstanceList()
5481 if new_name in instance_list and new_name != instance.name:
5482 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5483 new_name, errors.ECODE_EXISTS)
5485 def Exec(self, feedback_fn):
5486 """Rename the instance.
5489 inst = self.instance
5490 old_name = inst.name
5492 rename_file_storage = False
5493 if (inst.disk_template == constants.DT_FILE and
5494 self.op.new_name != inst.name):
5495 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5496 rename_file_storage = True
5498 self.cfg.RenameInstance(inst.name, self.op.new_name)
5499 # Change the instance lock. This is definitely safe while we hold the BGL
5500 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5501 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5503 # re-read the instance from the configuration after rename
5504 inst = self.cfg.GetInstanceInfo(self.op.new_name)
5506 if rename_file_storage:
5507 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5508 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5509 old_file_storage_dir,
5510 new_file_storage_dir)
5511 result.Raise("Could not rename on node %s directory '%s' to '%s'"
5512 " (but the instance has been renamed in Ganeti)" %
5513 (inst.primary_node, old_file_storage_dir,
5514 new_file_storage_dir))
5516 _StartInstanceDisks(self, inst, None)
5518 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5519 old_name, self.op.debug_level)
5520 msg = result.fail_msg
5522 msg = ("Could not run OS rename script for instance %s on node %s"
5523 " (but the instance has been renamed in Ganeti): %s" %
5524 (inst.name, inst.primary_node, msg))
5525 self.proc.LogWarning(msg)
5527 _ShutdownInstanceDisks(self, inst)
5532 class LUInstanceRemove(LogicalUnit):
5533 """Remove an instance.
5536 HPATH = "instance-remove"
5537 HTYPE = constants.HTYPE_INSTANCE
5540 def ExpandNames(self):
5541 self._ExpandAndLockInstance()
5542 self.needed_locks[locking.LEVEL_NODE] = []
5543 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5545 def DeclareLocks(self, level):
5546 if level == locking.LEVEL_NODE:
5547 self._LockInstancesNodes()
5549 def BuildHooksEnv(self):
5552 This runs on master, primary and secondary nodes of the instance.
5555 env = _BuildInstanceHookEnvByObject(self, self.instance)
5556 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5557 nl = [self.cfg.GetMasterNode()]
5558 nl_post = list(self.instance.all_nodes) + nl
5559 return env, nl, nl_post
5561 def CheckPrereq(self):
5562 """Check prerequisites.
5564 This checks that the instance is in the cluster.
5567 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5568 assert self.instance is not None, \
5569 "Cannot retrieve locked instance %s" % self.op.instance_name
5571 def Exec(self, feedback_fn):
5572 """Remove the instance.
5575 instance = self.instance
5576 logging.info("Shutting down instance %s on node %s",
5577 instance.name, instance.primary_node)
5579 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5580 self.op.shutdown_timeout)
5581 msg = result.fail_msg
5583 if self.op.ignore_failures:
5584 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5586 raise errors.OpExecError("Could not shutdown instance %s on"
5588 (instance.name, instance.primary_node, msg))
5590 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5593 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5594 """Utility function to remove an instance.
5597 logging.info("Removing block devices for instance %s", instance.name)
5599 if not _RemoveDisks(lu, instance):
5600 if not ignore_failures:
5601 raise errors.OpExecError("Can't remove instance's disks")
5602 feedback_fn("Warning: can't remove instance's disks")
5604 logging.info("Removing instance %s out of cluster config", instance.name)
5606 lu.cfg.RemoveInstance(instance.name)
5608 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5609 "Instance lock removal conflict"
5611 # Remove lock for the instance
5612 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5615 class LUInstanceQuery(NoHooksLU):
5616 """Logical unit for querying instances.
5619 # pylint: disable-msg=W0142
5622 def CheckArguments(self):
5623 self.iq = _InstanceQuery(self.op.names, self.op.output_fields,
5624 self.op.use_locking)
5626 def ExpandNames(self):
5627 self.iq.ExpandNames(self)
5629 def DeclareLocks(self, level):
5630 self.iq.DeclareLocks(self, level)
5632 def Exec(self, feedback_fn):
5633 return self.iq.OldStyleQuery(self)
5636 class LUInstanceFailover(LogicalUnit):
5637 """Failover an instance.
5640 HPATH = "instance-failover"
5641 HTYPE = constants.HTYPE_INSTANCE
5644 def ExpandNames(self):
5645 self._ExpandAndLockInstance()
5646 self.needed_locks[locking.LEVEL_NODE] = []
5647 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5649 def DeclareLocks(self, level):
5650 if level == locking.LEVEL_NODE:
5651 self._LockInstancesNodes()
5653 def BuildHooksEnv(self):
5656 This runs on master, primary and secondary nodes of the instance.
5659 instance = self.instance
5660 source_node = instance.primary_node
5661 target_node = instance.secondary_nodes[0]
5663 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5664 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5665 "OLD_PRIMARY": source_node,
5666 "OLD_SECONDARY": target_node,
5667 "NEW_PRIMARY": target_node,
5668 "NEW_SECONDARY": source_node,
5670 env.update(_BuildInstanceHookEnvByObject(self, instance))
5671 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5673 nl_post.append(source_node)
5674 return env, nl, nl_post
5676 def CheckPrereq(self):
5677 """Check prerequisites.
5679 This checks that the instance is in the cluster.
5682 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5683 assert self.instance is not None, \
5684 "Cannot retrieve locked instance %s" % self.op.instance_name
5686 bep = self.cfg.GetClusterInfo().FillBE(instance)
5687 if instance.disk_template not in constants.DTS_NET_MIRROR:
5688 raise errors.OpPrereqError("Instance's disk layout is not"
5689 " network mirrored, cannot failover.",
5692 secondary_nodes = instance.secondary_nodes
5693 if not secondary_nodes:
5694 raise errors.ProgrammerError("no secondary node but using "
5695 "a mirrored disk template")
5697 target_node = secondary_nodes[0]
5698 _CheckNodeOnline(self, target_node)
5699 _CheckNodeNotDrained(self, target_node)
5700 if instance.admin_up:
5701 # check memory requirements on the secondary node
5702 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5703 instance.name, bep[constants.BE_MEMORY],
5704 instance.hypervisor)
5706 self.LogInfo("Not checking memory on the secondary node as"
5707 " instance will not be started")
5709 # check bridge existance
5710 _CheckInstanceBridgesExist(self, instance, node=target_node)
5712 def Exec(self, feedback_fn):
5713 """Failover an instance.
5715 The failover is done by shutting it down on its present node and
5716 starting it on the secondary.
5719 instance = self.instance
5720 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5722 source_node = instance.primary_node
5723 target_node = instance.secondary_nodes[0]
5725 if instance.admin_up:
5726 feedback_fn("* checking disk consistency between source and target")
5727 for dev in instance.disks:
5728 # for drbd, these are drbd over lvm
5729 if not _CheckDiskConsistency(self, dev, target_node, False):
5730 if not self.op.ignore_consistency:
5731 raise errors.OpExecError("Disk %s is degraded on target node,"
5732 " aborting failover." % dev.iv_name)
5734 feedback_fn("* not checking disk consistency as instance is not running")
5736 feedback_fn("* shutting down instance on source node")
5737 logging.info("Shutting down instance %s on node %s",
5738 instance.name, source_node)
5740 result = self.rpc.call_instance_shutdown(source_node, instance,
5741 self.op.shutdown_timeout)
5742 msg = result.fail_msg
5744 if self.op.ignore_consistency or primary_node.offline:
5745 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5746 " Proceeding anyway. Please make sure node"
5747 " %s is down. Error details: %s",
5748 instance.name, source_node, source_node, msg)
5750 raise errors.OpExecError("Could not shutdown instance %s on"
5752 (instance.name, source_node, msg))
5754 feedback_fn("* deactivating the instance's disks on source node")
5755 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5756 raise errors.OpExecError("Can't shut down the instance's disks.")
5758 instance.primary_node = target_node
5759 # distribute new instance config to the other nodes
5760 self.cfg.Update(instance, feedback_fn)
5762 # Only start the instance if it's marked as up
5763 if instance.admin_up:
5764 feedback_fn("* activating the instance's disks on target node")
5765 logging.info("Starting instance %s on node %s",
5766 instance.name, target_node)
5768 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5769 ignore_secondaries=True)
5771 _ShutdownInstanceDisks(self, instance)
5772 raise errors.OpExecError("Can't activate the instance's disks")
5774 feedback_fn("* starting the instance on the target node")
5775 result = self.rpc.call_instance_start(target_node, instance, None, None)
5776 msg = result.fail_msg
5778 _ShutdownInstanceDisks(self, instance)
5779 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5780 (instance.name, target_node, msg))
5783 class LUInstanceMigrate(LogicalUnit):
5784 """Migrate an instance.
5786 This is migration without shutting down, compared to the failover,
5787 which is done with shutdown.
5790 HPATH = "instance-migrate"
5791 HTYPE = constants.HTYPE_INSTANCE
5794 def ExpandNames(self):
5795 self._ExpandAndLockInstance()
5797 self.needed_locks[locking.LEVEL_NODE] = []
5798 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5800 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5802 self.tasklets = [self._migrater]
5804 def DeclareLocks(self, level):
5805 if level == locking.LEVEL_NODE:
5806 self._LockInstancesNodes()
5808 def BuildHooksEnv(self):
5811 This runs on master, primary and secondary nodes of the instance.
5814 instance = self._migrater.instance
5815 source_node = instance.primary_node
5816 target_node = instance.secondary_nodes[0]
5817 env = _BuildInstanceHookEnvByObject(self, instance)
5818 env["MIGRATE_LIVE"] = self._migrater.live
5819 env["MIGRATE_CLEANUP"] = self.op.cleanup
5821 "OLD_PRIMARY": source_node,
5822 "OLD_SECONDARY": target_node,
5823 "NEW_PRIMARY": target_node,
5824 "NEW_SECONDARY": source_node,
5826 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5828 nl_post.append(source_node)
5829 return env, nl, nl_post
5832 class LUInstanceMove(LogicalUnit):
5833 """Move an instance by data-copying.
5836 HPATH = "instance-move"
5837 HTYPE = constants.HTYPE_INSTANCE
5840 def ExpandNames(self):
5841 self._ExpandAndLockInstance()
5842 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5843 self.op.target_node = target_node
5844 self.needed_locks[locking.LEVEL_NODE] = [target_node]
5845 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5847 def DeclareLocks(self, level):
5848 if level == locking.LEVEL_NODE:
5849 self._LockInstancesNodes(primary_only=True)
5851 def BuildHooksEnv(self):
5854 This runs on master, primary and secondary nodes of the instance.
5858 "TARGET_NODE": self.op.target_node,
5859 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5861 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5862 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5863 self.op.target_node]
5866 def CheckPrereq(self):
5867 """Check prerequisites.
5869 This checks that the instance is in the cluster.
5872 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5873 assert self.instance is not None, \
5874 "Cannot retrieve locked instance %s" % self.op.instance_name
5876 node = self.cfg.GetNodeInfo(self.op.target_node)
5877 assert node is not None, \
5878 "Cannot retrieve locked node %s" % self.op.target_node
5880 self.target_node = target_node = node.name
5882 if target_node == instance.primary_node:
5883 raise errors.OpPrereqError("Instance %s is already on the node %s" %
5884 (instance.name, target_node),
5887 bep = self.cfg.GetClusterInfo().FillBE(instance)
5889 for idx, dsk in enumerate(instance.disks):
5890 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5891 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5892 " cannot copy" % idx, errors.ECODE_STATE)
5894 _CheckNodeOnline(self, target_node)
5895 _CheckNodeNotDrained(self, target_node)
5896 _CheckNodeVmCapable(self, target_node)
5898 if instance.admin_up:
5899 # check memory requirements on the secondary node
5900 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5901 instance.name, bep[constants.BE_MEMORY],
5902 instance.hypervisor)
5904 self.LogInfo("Not checking memory on the secondary node as"
5905 " instance will not be started")
5907 # check bridge existance
5908 _CheckInstanceBridgesExist(self, instance, node=target_node)
5910 def Exec(self, feedback_fn):
5911 """Move an instance.
5913 The move is done by shutting it down on its present node, copying
5914 the data over (slow) and starting it on the new node.
5917 instance = self.instance
5919 source_node = instance.primary_node
5920 target_node = self.target_node
5922 self.LogInfo("Shutting down instance %s on source node %s",
5923 instance.name, source_node)
5925 result = self.rpc.call_instance_shutdown(source_node, instance,
5926 self.op.shutdown_timeout)
5927 msg = result.fail_msg
5929 if self.op.ignore_consistency:
5930 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5931 " Proceeding anyway. Please make sure node"
5932 " %s is down. Error details: %s",
5933 instance.name, source_node, source_node, msg)
5935 raise errors.OpExecError("Could not shutdown instance %s on"
5937 (instance.name, source_node, msg))
5939 # create the target disks
5941 _CreateDisks(self, instance, target_node=target_node)
5942 except errors.OpExecError:
5943 self.LogWarning("Device creation failed, reverting...")
5945 _RemoveDisks(self, instance, target_node=target_node)
5947 self.cfg.ReleaseDRBDMinors(instance.name)
5950 cluster_name = self.cfg.GetClusterInfo().cluster_name
5953 # activate, get path, copy the data over
5954 for idx, disk in enumerate(instance.disks):
5955 self.LogInfo("Copying data for disk %d", idx)
5956 result = self.rpc.call_blockdev_assemble(target_node, disk,
5957 instance.name, True)
5959 self.LogWarning("Can't assemble newly created disk %d: %s",
5960 idx, result.fail_msg)
5961 errs.append(result.fail_msg)
5963 dev_path = result.payload
5964 result = self.rpc.call_blockdev_export(source_node, disk,
5965 target_node, dev_path,
5968 self.LogWarning("Can't copy data over for disk %d: %s",
5969 idx, result.fail_msg)
5970 errs.append(result.fail_msg)
5974 self.LogWarning("Some disks failed to copy, aborting")
5976 _RemoveDisks(self, instance, target_node=target_node)
5978 self.cfg.ReleaseDRBDMinors(instance.name)
5979 raise errors.OpExecError("Errors during disk copy: %s" %
5982 instance.primary_node = target_node
5983 self.cfg.Update(instance, feedback_fn)
5985 self.LogInfo("Removing the disks on the original node")
5986 _RemoveDisks(self, instance, target_node=source_node)
5988 # Only start the instance if it's marked as up
5989 if instance.admin_up:
5990 self.LogInfo("Starting instance %s on node %s",
5991 instance.name, target_node)
5993 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5994 ignore_secondaries=True)
5996 _ShutdownInstanceDisks(self, instance)
5997 raise errors.OpExecError("Can't activate the instance's disks")
5999 result = self.rpc.call_instance_start(target_node, instance, None, None)
6000 msg = result.fail_msg
6002 _ShutdownInstanceDisks(self, instance)
6003 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6004 (instance.name, target_node, msg))
6007 class LUNodeMigrate(LogicalUnit):
6008 """Migrate all instances from a node.
6011 HPATH = "node-migrate"
6012 HTYPE = constants.HTYPE_NODE
6015 def ExpandNames(self):
6016 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6018 self.needed_locks = {
6019 locking.LEVEL_NODE: [self.op.node_name],
6022 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6024 # Create tasklets for migrating instances for all instances on this node
6028 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6029 logging.debug("Migrating instance %s", inst.name)
6030 names.append(inst.name)
6032 tasklets.append(TLMigrateInstance(self, inst.name, False))
6034 self.tasklets = tasklets
6036 # Declare instance locks
6037 self.needed_locks[locking.LEVEL_INSTANCE] = names
6039 def DeclareLocks(self, level):
6040 if level == locking.LEVEL_NODE:
6041 self._LockInstancesNodes()
6043 def BuildHooksEnv(self):
6046 This runs on the master, the primary and all the secondaries.
6050 "NODE_NAME": self.op.node_name,
6053 nl = [self.cfg.GetMasterNode()]
6055 return (env, nl, nl)
6058 class TLMigrateInstance(Tasklet):
6059 """Tasklet class for instance migration.
6062 @ivar live: whether the migration will be done live or non-live;
6063 this variable is initalized only after CheckPrereq has run
6066 def __init__(self, lu, instance_name, cleanup):
6067 """Initializes this class.
6070 Tasklet.__init__(self, lu)
6073 self.instance_name = instance_name
6074 self.cleanup = cleanup
6075 self.live = False # will be overridden later
6077 def CheckPrereq(self):
6078 """Check prerequisites.
6080 This checks that the instance is in the cluster.
6083 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6084 instance = self.cfg.GetInstanceInfo(instance_name)
6085 assert instance is not None
6087 if instance.disk_template != constants.DT_DRBD8:
6088 raise errors.OpPrereqError("Instance's disk layout is not"
6089 " drbd8, cannot migrate.", errors.ECODE_STATE)
6091 secondary_nodes = instance.secondary_nodes
6092 if not secondary_nodes:
6093 raise errors.ConfigurationError("No secondary node but using"
6094 " drbd8 disk template")
6096 i_be = self.cfg.GetClusterInfo().FillBE(instance)
6098 target_node = secondary_nodes[0]
6099 # check memory requirements on the secondary node
6100 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6101 instance.name, i_be[constants.BE_MEMORY],
6102 instance.hypervisor)
6104 # check bridge existance
6105 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6107 if not self.cleanup:
6108 _CheckNodeNotDrained(self.lu, target_node)
6109 result = self.rpc.call_instance_migratable(instance.primary_node,
6111 result.Raise("Can't migrate, please use failover",
6112 prereq=True, ecode=errors.ECODE_STATE)
6114 self.instance = instance
6116 if self.lu.op.live is not None and self.lu.op.mode is not None:
6117 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6118 " parameters are accepted",
6120 if self.lu.op.live is not None:
6122 self.lu.op.mode = constants.HT_MIGRATION_LIVE
6124 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6125 # reset the 'live' parameter to None so that repeated
6126 # invocations of CheckPrereq do not raise an exception
6127 self.lu.op.live = None
6128 elif self.lu.op.mode is None:
6129 # read the default value from the hypervisor
6130 i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6131 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6133 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6135 def _WaitUntilSync(self):
6136 """Poll with custom rpc for disk sync.
6138 This uses our own step-based rpc call.
6141 self.feedback_fn("* wait until resync is done")
6145 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6147 self.instance.disks)
6149 for node, nres in result.items():
6150 nres.Raise("Cannot resync disks on node %s" % node)
6151 node_done, node_percent = nres.payload
6152 all_done = all_done and node_done
6153 if node_percent is not None:
6154 min_percent = min(min_percent, node_percent)
6156 if min_percent < 100:
6157 self.feedback_fn(" - progress: %.1f%%" % min_percent)
6160 def _EnsureSecondary(self, node):
6161 """Demote a node to secondary.
6164 self.feedback_fn("* switching node %s to secondary mode" % node)
6166 for dev in self.instance.disks:
6167 self.cfg.SetDiskID(dev, node)
6169 result = self.rpc.call_blockdev_close(node, self.instance.name,
6170 self.instance.disks)
6171 result.Raise("Cannot change disk to secondary on node %s" % node)
6173 def _GoStandalone(self):
6174 """Disconnect from the network.
6177 self.feedback_fn("* changing into standalone mode")
6178 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6179 self.instance.disks)
6180 for node, nres in result.items():
6181 nres.Raise("Cannot disconnect disks node %s" % node)
6183 def _GoReconnect(self, multimaster):
6184 """Reconnect to the network.
6190 msg = "single-master"
6191 self.feedback_fn("* changing disks into %s mode" % msg)
6192 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6193 self.instance.disks,
6194 self.instance.name, multimaster)
6195 for node, nres in result.items():
6196 nres.Raise("Cannot change disks config on node %s" % node)
6198 def _ExecCleanup(self):
6199 """Try to cleanup after a failed migration.
6201 The cleanup is done by:
6202 - check that the instance is running only on one node
6203 (and update the config if needed)
6204 - change disks on its secondary node to secondary
6205 - wait until disks are fully synchronized
6206 - disconnect from the network
6207 - change disks into single-master mode
6208 - wait again until disks are fully synchronized
6211 instance = self.instance
6212 target_node = self.target_node
6213 source_node = self.source_node
6215 # check running on only one node
6216 self.feedback_fn("* checking where the instance actually runs"
6217 " (if this hangs, the hypervisor might be in"
6219 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6220 for node, result in ins_l.items():
6221 result.Raise("Can't contact node %s" % node)
6223 runningon_source = instance.name in ins_l[source_node].payload
6224 runningon_target = instance.name in ins_l[target_node].payload
6226 if runningon_source and runningon_target:
6227 raise errors.OpExecError("Instance seems to be running on two nodes,"
6228 " or the hypervisor is confused. You will have"
6229 " to ensure manually that it runs only on one"
6230 " and restart this operation.")
6232 if not (runningon_source or runningon_target):
6233 raise errors.OpExecError("Instance does not seem to be running at all."
6234 " In this case, it's safer to repair by"
6235 " running 'gnt-instance stop' to ensure disk"
6236 " shutdown, and then restarting it.")
6238 if runningon_target:
6239 # the migration has actually succeeded, we need to update the config
6240 self.feedback_fn("* instance running on secondary node (%s),"
6241 " updating config" % target_node)
6242 instance.primary_node = target_node
6243 self.cfg.Update(instance, self.feedback_fn)
6244 demoted_node = source_node
6246 self.feedback_fn("* instance confirmed to be running on its"
6247 " primary node (%s)" % source_node)
6248 demoted_node = target_node
6250 self._EnsureSecondary(demoted_node)
6252 self._WaitUntilSync()
6253 except errors.OpExecError:
6254 # we ignore here errors, since if the device is standalone, it
6255 # won't be able to sync
6257 self._GoStandalone()
6258 self._GoReconnect(False)
6259 self._WaitUntilSync()
6261 self.feedback_fn("* done")
6263 def _RevertDiskStatus(self):
6264 """Try to revert the disk status after a failed migration.
6267 target_node = self.target_node
6269 self._EnsureSecondary(target_node)
6270 self._GoStandalone()
6271 self._GoReconnect(False)
6272 self._WaitUntilSync()
6273 except errors.OpExecError, err:
6274 self.lu.LogWarning("Migration failed and I can't reconnect the"
6275 " drives: error '%s'\n"
6276 "Please look and recover the instance status" %
6279 def _AbortMigration(self):
6280 """Call the hypervisor code to abort a started migration.
6283 instance = self.instance
6284 target_node = self.target_node
6285 migration_info = self.migration_info
6287 abort_result = self.rpc.call_finalize_migration(target_node,
6291 abort_msg = abort_result.fail_msg
6293 logging.error("Aborting migration failed on target node %s: %s",
6294 target_node, abort_msg)
6295 # Don't raise an exception here, as we stil have to try to revert the
6296 # disk status, even if this step failed.
6298 def _ExecMigration(self):
6299 """Migrate an instance.
6301 The migrate is done by:
6302 - change the disks into dual-master mode
6303 - wait until disks are fully synchronized again
6304 - migrate the instance
6305 - change disks on the new secondary node (the old primary) to secondary
6306 - wait until disks are fully synchronized
6307 - change disks into single-master mode
6310 instance = self.instance
6311 target_node = self.target_node
6312 source_node = self.source_node
6314 self.feedback_fn("* checking disk consistency between source and target")
6315 for dev in instance.disks:
6316 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6317 raise errors.OpExecError("Disk %s is degraded or not fully"
6318 " synchronized on target node,"
6319 " aborting migrate." % dev.iv_name)
6321 # First get the migration information from the remote node
6322 result = self.rpc.call_migration_info(source_node, instance)
6323 msg = result.fail_msg
6325 log_err = ("Failed fetching source migration information from %s: %s" %
6327 logging.error(log_err)
6328 raise errors.OpExecError(log_err)
6330 self.migration_info = migration_info = result.payload
6332 # Then switch the disks to master/master mode
6333 self._EnsureSecondary(target_node)
6334 self._GoStandalone()
6335 self._GoReconnect(True)
6336 self._WaitUntilSync()
6338 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6339 result = self.rpc.call_accept_instance(target_node,
6342 self.nodes_ip[target_node])
6344 msg = result.fail_msg
6346 logging.error("Instance pre-migration failed, trying to revert"
6347 " disk status: %s", msg)
6348 self.feedback_fn("Pre-migration failed, aborting")
6349 self._AbortMigration()
6350 self._RevertDiskStatus()
6351 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6352 (instance.name, msg))
6354 self.feedback_fn("* migrating instance to %s" % target_node)
6356 result = self.rpc.call_instance_migrate(source_node, instance,
6357 self.nodes_ip[target_node],
6359 msg = result.fail_msg
6361 logging.error("Instance migration failed, trying to revert"
6362 " disk status: %s", msg)
6363 self.feedback_fn("Migration failed, aborting")
6364 self._AbortMigration()
6365 self._RevertDiskStatus()
6366 raise errors.OpExecError("Could not migrate instance %s: %s" %
6367 (instance.name, msg))
6370 instance.primary_node = target_node
6371 # distribute new instance config to the other nodes
6372 self.cfg.Update(instance, self.feedback_fn)
6374 result = self.rpc.call_finalize_migration(target_node,
6378 msg = result.fail_msg
6380 logging.error("Instance migration succeeded, but finalization failed:"
6382 raise errors.OpExecError("Could not finalize instance migration: %s" %
6385 self._EnsureSecondary(source_node)
6386 self._WaitUntilSync()
6387 self._GoStandalone()
6388 self._GoReconnect(False)
6389 self._WaitUntilSync()
6391 self.feedback_fn("* done")
6393 def Exec(self, feedback_fn):
6394 """Perform the migration.
6397 feedback_fn("Migrating instance %s" % self.instance.name)
6399 self.feedback_fn = feedback_fn
6401 self.source_node = self.instance.primary_node
6402 self.target_node = self.instance.secondary_nodes[0]
6403 self.all_nodes = [self.source_node, self.target_node]
6405 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6406 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6410 return self._ExecCleanup()
6412 return self._ExecMigration()
6415 def _CreateBlockDev(lu, node, instance, device, force_create,
6417 """Create a tree of block devices on a given node.
6419 If this device type has to be created on secondaries, create it and
6422 If not, just recurse to children keeping the same 'force' value.
6424 @param lu: the lu on whose behalf we execute
6425 @param node: the node on which to create the device
6426 @type instance: L{objects.Instance}
6427 @param instance: the instance which owns the device
6428 @type device: L{objects.Disk}
6429 @param device: the device to create
6430 @type force_create: boolean
6431 @param force_create: whether to force creation of this device; this
6432 will be change to True whenever we find a device which has
6433 CreateOnSecondary() attribute
6434 @param info: the extra 'metadata' we should attach to the device
6435 (this will be represented as a LVM tag)
6436 @type force_open: boolean
6437 @param force_open: this parameter will be passes to the
6438 L{backend.BlockdevCreate} function where it specifies
6439 whether we run on primary or not, and it affects both
6440 the child assembly and the device own Open() execution
6443 if device.CreateOnSecondary():
6447 for child in device.children:
6448 _CreateBlockDev(lu, node, instance, child, force_create,
6451 if not force_create:
6454 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6457 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6458 """Create a single block device on a given node.
6460 This will not recurse over children of the device, so they must be
6463 @param lu: the lu on whose behalf we execute
6464 @param node: the node on which to create the device
6465 @type instance: L{objects.Instance}
6466 @param instance: the instance which owns the device
6467 @type device: L{objects.Disk}
6468 @param device: the device to create
6469 @param info: the extra 'metadata' we should attach to the device
6470 (this will be represented as a LVM tag)
6471 @type force_open: boolean
6472 @param force_open: this parameter will be passes to the
6473 L{backend.BlockdevCreate} function where it specifies
6474 whether we run on primary or not, and it affects both
6475 the child assembly and the device own Open() execution
6478 lu.cfg.SetDiskID(device, node)
6479 result = lu.rpc.call_blockdev_create(node, device, device.size,
6480 instance.name, force_open, info)
6481 result.Raise("Can't create block device %s on"
6482 " node %s for instance %s" % (device, node, instance.name))
6483 if device.physical_id is None:
6484 device.physical_id = result.payload
6487 def _GenerateUniqueNames(lu, exts):
6488 """Generate a suitable LV name.
6490 This will generate a logical volume name for the given instance.
6495 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6496 results.append("%s%s" % (new_id, val))
6500 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6502 """Generate a drbd8 device complete with its children.
6505 port = lu.cfg.AllocatePort()
6506 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6507 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6508 logical_id=(vgname, names[0]))
6509 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6510 logical_id=(vgname, names[1]))
6511 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6512 logical_id=(primary, secondary, port,
6515 children=[dev_data, dev_meta],
6520 def _GenerateDiskTemplate(lu, template_name,
6521 instance_name, primary_node,
6522 secondary_nodes, disk_info,
6523 file_storage_dir, file_driver,
6524 base_index, feedback_fn):
6525 """Generate the entire disk layout for a given template type.
6528 #TODO: compute space requirements
6530 vgname = lu.cfg.GetVGName()
6531 disk_count = len(disk_info)
6533 if template_name == constants.DT_DISKLESS:
6535 elif template_name == constants.DT_PLAIN:
6536 if len(secondary_nodes) != 0:
6537 raise errors.ProgrammerError("Wrong template configuration")
6539 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6540 for i in range(disk_count)])
6541 for idx, disk in enumerate(disk_info):
6542 disk_index = idx + base_index
6543 vg = disk.get("vg", vgname)
6544 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6545 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6546 logical_id=(vg, names[idx]),
6547 iv_name="disk/%d" % disk_index,
6549 disks.append(disk_dev)
6550 elif template_name == constants.DT_DRBD8:
6551 if len(secondary_nodes) != 1:
6552 raise errors.ProgrammerError("Wrong template configuration")
6553 remote_node = secondary_nodes[0]
6554 minors = lu.cfg.AllocateDRBDMinor(
6555 [primary_node, remote_node] * len(disk_info), instance_name)
6558 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6559 for i in range(disk_count)]):
6560 names.append(lv_prefix + "_data")
6561 names.append(lv_prefix + "_meta")
6562 for idx, disk in enumerate(disk_info):
6563 disk_index = idx + base_index
6564 vg = disk.get("vg", vgname)
6565 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6566 disk["size"], vg, names[idx*2:idx*2+2],
6567 "disk/%d" % disk_index,
6568 minors[idx*2], minors[idx*2+1])
6569 disk_dev.mode = disk["mode"]
6570 disks.append(disk_dev)
6571 elif template_name == constants.DT_FILE:
6572 if len(secondary_nodes) != 0:
6573 raise errors.ProgrammerError("Wrong template configuration")
6575 opcodes.RequireFileStorage()
6577 for idx, disk in enumerate(disk_info):
6578 disk_index = idx + base_index
6579 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6580 iv_name="disk/%d" % disk_index,
6581 logical_id=(file_driver,
6582 "%s/disk%d" % (file_storage_dir,
6585 disks.append(disk_dev)
6587 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6591 def _GetInstanceInfoText(instance):
6592 """Compute that text that should be added to the disk's metadata.
6595 return "originstname+%s" % instance.name
6598 def _CalcEta(time_taken, written, total_size):
6599 """Calculates the ETA based on size written and total size.
6601 @param time_taken: The time taken so far
6602 @param written: amount written so far
6603 @param total_size: The total size of data to be written
6604 @return: The remaining time in seconds
6607 avg_time = time_taken / float(written)
6608 return (total_size - written) * avg_time
6611 def _WipeDisks(lu, instance):
6612 """Wipes instance disks.
6614 @type lu: L{LogicalUnit}
6615 @param lu: the logical unit on whose behalf we execute
6616 @type instance: L{objects.Instance}
6617 @param instance: the instance whose disks we should create
6618 @return: the success of the wipe
6621 node = instance.primary_node
6622 logging.info("Pause sync of instance %s disks", instance.name)
6623 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6625 for idx, success in enumerate(result.payload):
6627 logging.warn("pause-sync of instance %s for disks %d failed",
6631 for idx, device in enumerate(instance.disks):
6632 lu.LogInfo("* Wiping disk %d", idx)
6633 logging.info("Wiping disk %d for instance %s", idx, instance.name)
6635 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6636 # MAX_WIPE_CHUNK at max
6637 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6638 constants.MIN_WIPE_CHUNK_PERCENT)
6643 start_time = time.time()
6645 while offset < size:
6646 wipe_size = min(wipe_chunk_size, size - offset)
6647 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6648 result.Raise("Could not wipe disk %d at offset %d for size %d" %
6649 (idx, offset, wipe_size))
6652 if now - last_output >= 60:
6653 eta = _CalcEta(now - start_time, offset, size)
6654 lu.LogInfo(" - done: %.1f%% ETA: %s" %
6655 (offset / float(size) * 100, utils.FormatSeconds(eta)))
6658 logging.info("Resume sync of instance %s disks", instance.name)
6660 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6662 for idx, success in enumerate(result.payload):
6664 lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6665 " look at the status and troubleshoot the issue.", idx)
6666 logging.warn("resume-sync of instance %s for disks %d failed",
6670 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6671 """Create all disks for an instance.
6673 This abstracts away some work from AddInstance.
6675 @type lu: L{LogicalUnit}
6676 @param lu: the logical unit on whose behalf we execute
6677 @type instance: L{objects.Instance}
6678 @param instance: the instance whose disks we should create
6680 @param to_skip: list of indices to skip
6681 @type target_node: string
6682 @param target_node: if passed, overrides the target node for creation
6684 @return: the success of the creation
6687 info = _GetInstanceInfoText(instance)
6688 if target_node is None:
6689 pnode = instance.primary_node
6690 all_nodes = instance.all_nodes
6695 if instance.disk_template == constants.DT_FILE:
6696 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6697 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6699 result.Raise("Failed to create directory '%s' on"
6700 " node %s" % (file_storage_dir, pnode))
6702 # Note: this needs to be kept in sync with adding of disks in
6703 # LUInstanceSetParams
6704 for idx, device in enumerate(instance.disks):
6705 if to_skip and idx in to_skip:
6707 logging.info("Creating volume %s for instance %s",
6708 device.iv_name, instance.name)
6710 for node in all_nodes:
6711 f_create = node == pnode
6712 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6715 def _RemoveDisks(lu, instance, target_node=None):
6716 """Remove all disks for an instance.
6718 This abstracts away some work from `AddInstance()` and
6719 `RemoveInstance()`. Note that in case some of the devices couldn't
6720 be removed, the removal will continue with the other ones (compare
6721 with `_CreateDisks()`).
6723 @type lu: L{LogicalUnit}
6724 @param lu: the logical unit on whose behalf we execute
6725 @type instance: L{objects.Instance}
6726 @param instance: the instance whose disks we should remove
6727 @type target_node: string
6728 @param target_node: used to override the node on which to remove the disks
6730 @return: the success of the removal
6733 logging.info("Removing block devices for instance %s", instance.name)
6736 for device in instance.disks:
6738 edata = [(target_node, device)]
6740 edata = device.ComputeNodeTree(instance.primary_node)
6741 for node, disk in edata:
6742 lu.cfg.SetDiskID(disk, node)
6743 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6745 lu.LogWarning("Could not remove block device %s on node %s,"
6746 " continuing anyway: %s", device.iv_name, node, msg)
6749 if instance.disk_template == constants.DT_FILE:
6750 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6754 tgt = instance.primary_node
6755 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6757 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6758 file_storage_dir, instance.primary_node, result.fail_msg)
6764 def _ComputeDiskSizePerVG(disk_template, disks):
6765 """Compute disk size requirements in the volume group
6768 def _compute(disks, payload):
6769 """Universal algorithm
6774 vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
6778 # Required free disk space as a function of disk and swap space
6780 constants.DT_DISKLESS: {},
6781 constants.DT_PLAIN: _compute(disks, 0),
6782 # 128 MB are added for drbd metadata for each disk
6783 constants.DT_DRBD8: _compute(disks, 128),
6784 constants.DT_FILE: {},
6787 if disk_template not in req_size_dict:
6788 raise errors.ProgrammerError("Disk template '%s' size requirement"
6789 " is unknown" % disk_template)
6791 return req_size_dict[disk_template]
6794 def _ComputeDiskSize(disk_template, disks):
6795 """Compute disk size requirements in the volume group
6798 # Required free disk space as a function of disk and swap space
6800 constants.DT_DISKLESS: None,
6801 constants.DT_PLAIN: sum(d["size"] for d in disks),
6802 # 128 MB are added for drbd metadata for each disk
6803 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6804 constants.DT_FILE: None,
6807 if disk_template not in req_size_dict:
6808 raise errors.ProgrammerError("Disk template '%s' size requirement"
6809 " is unknown" % disk_template)
6811 return req_size_dict[disk_template]
6814 def _CheckHVParams(lu, nodenames, hvname, hvparams):
6815 """Hypervisor parameter validation.
6817 This function abstract the hypervisor parameter validation to be
6818 used in both instance create and instance modify.
6820 @type lu: L{LogicalUnit}
6821 @param lu: the logical unit for which we check
6822 @type nodenames: list
6823 @param nodenames: the list of nodes on which we should check
6824 @type hvname: string
6825 @param hvname: the name of the hypervisor we should use
6826 @type hvparams: dict
6827 @param hvparams: the parameters which we need to check
6828 @raise errors.OpPrereqError: if the parameters are not valid
6831 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6834 for node in nodenames:
6838 info.Raise("Hypervisor parameter validation failed on node %s" % node)
6841 def _CheckOSParams(lu, required, nodenames, osname, osparams):
6842 """OS parameters validation.
6844 @type lu: L{LogicalUnit}
6845 @param lu: the logical unit for which we check
6846 @type required: boolean
6847 @param required: whether the validation should fail if the OS is not
6849 @type nodenames: list
6850 @param nodenames: the list of nodes on which we should check
6851 @type osname: string
6852 @param osname: the name of the hypervisor we should use
6853 @type osparams: dict
6854 @param osparams: the parameters which we need to check
6855 @raise errors.OpPrereqError: if the parameters are not valid
6858 result = lu.rpc.call_os_validate(required, nodenames, osname,
6859 [constants.OS_VALIDATE_PARAMETERS],
6861 for node, nres in result.items():
6862 # we don't check for offline cases since this should be run only
6863 # against the master node and/or an instance's nodes
6864 nres.Raise("OS Parameters validation failed on node %s" % node)
6865 if not nres.payload:
6866 lu.LogInfo("OS %s not found on node %s, validation skipped",
6870 class LUInstanceCreate(LogicalUnit):
6871 """Create an instance.
6874 HPATH = "instance-add"
6875 HTYPE = constants.HTYPE_INSTANCE
6878 def CheckArguments(self):
6882 # do not require name_check to ease forward/backward compatibility
6884 if self.op.no_install and self.op.start:
6885 self.LogInfo("No-installation mode selected, disabling startup")
6886 self.op.start = False
6887 # validate/normalize the instance name
6888 self.op.instance_name = \
6889 netutils.Hostname.GetNormalizedName(self.op.instance_name)
6891 if self.op.ip_check and not self.op.name_check:
6892 # TODO: make the ip check more flexible and not depend on the name check
6893 raise errors.OpPrereqError("Cannot do ip check without a name check",
6896 # check nics' parameter names
6897 for nic in self.op.nics:
6898 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6900 # check disks. parameter names and consistent adopt/no-adopt strategy
6901 has_adopt = has_no_adopt = False
6902 for disk in self.op.disks:
6903 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6908 if has_adopt and has_no_adopt:
6909 raise errors.OpPrereqError("Either all disks are adopted or none is",
6912 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
6913 raise errors.OpPrereqError("Disk adoption is not supported for the"
6914 " '%s' disk template" %
6915 self.op.disk_template,
6917 if self.op.iallocator is not None:
6918 raise errors.OpPrereqError("Disk adoption not allowed with an"
6919 " iallocator script", errors.ECODE_INVAL)
6920 if self.op.mode == constants.INSTANCE_IMPORT:
6921 raise errors.OpPrereqError("Disk adoption not allowed for"
6922 " instance import", errors.ECODE_INVAL)
6924 self.adopt_disks = has_adopt
6926 # instance name verification
6927 if self.op.name_check:
6928 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
6929 self.op.instance_name = self.hostname1.name
6930 # used in CheckPrereq for ip ping check
6931 self.check_ip = self.hostname1.ip
6933 self.check_ip = None
6935 # file storage checks
6936 if (self.op.file_driver and
6937 not self.op.file_driver in constants.FILE_DRIVER):
6938 raise errors.OpPrereqError("Invalid file driver name '%s'" %
6939 self.op.file_driver, errors.ECODE_INVAL)
6941 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6942 raise errors.OpPrereqError("File storage directory path not absolute",
6945 ### Node/iallocator related checks
6946 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
6948 if self.op.pnode is not None:
6949 if self.op.disk_template in constants.DTS_NET_MIRROR:
6950 if self.op.snode is None:
6951 raise errors.OpPrereqError("The networked disk templates need"
6952 " a mirror node", errors.ECODE_INVAL)
6954 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
6956 self.op.snode = None
6958 self._cds = _GetClusterDomainSecret()
6960 if self.op.mode == constants.INSTANCE_IMPORT:
6961 # On import force_variant must be True, because if we forced it at
6962 # initial install, our only chance when importing it back is that it
6964 self.op.force_variant = True
6966 if self.op.no_install:
6967 self.LogInfo("No-installation mode has no effect during import")
6969 elif self.op.mode == constants.INSTANCE_CREATE:
6970 if self.op.os_type is None:
6971 raise errors.OpPrereqError("No guest OS specified",
6973 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
6974 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
6975 " installation" % self.op.os_type,
6977 if self.op.disk_template is None:
6978 raise errors.OpPrereqError("No disk template specified",
6981 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6982 # Check handshake to ensure both clusters have the same domain secret
6983 src_handshake = self.op.source_handshake
6984 if not src_handshake:
6985 raise errors.OpPrereqError("Missing source handshake",
6988 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
6991 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
6994 # Load and check source CA
6995 self.source_x509_ca_pem = self.op.source_x509_ca
6996 if not self.source_x509_ca_pem:
6997 raise errors.OpPrereqError("Missing source X509 CA",
7001 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7003 except OpenSSL.crypto.Error, err:
7004 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7005 (err, ), errors.ECODE_INVAL)
7007 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7008 if errcode is not None:
7009 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7012 self.source_x509_ca = cert
7014 src_instance_name = self.op.source_instance_name
7015 if not src_instance_name:
7016 raise errors.OpPrereqError("Missing source instance name",
7019 self.source_instance_name = \
7020 netutils.GetHostname(name=src_instance_name).name
7023 raise errors.OpPrereqError("Invalid instance creation mode %r" %
7024 self.op.mode, errors.ECODE_INVAL)
7026 def ExpandNames(self):
7027 """ExpandNames for CreateInstance.
7029 Figure out the right locks for instance creation.
7032 self.needed_locks = {}
7034 instance_name = self.op.instance_name
7035 # this is just a preventive check, but someone might still add this
7036 # instance in the meantime, and creation will fail at lock-add time
7037 if instance_name in self.cfg.GetInstanceList():
7038 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7039 instance_name, errors.ECODE_EXISTS)
7041 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7043 if self.op.iallocator:
7044 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7046 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7047 nodelist = [self.op.pnode]
7048 if self.op.snode is not None:
7049 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7050 nodelist.append(self.op.snode)
7051 self.needed_locks[locking.LEVEL_NODE] = nodelist
7053 # in case of import lock the source node too
7054 if self.op.mode == constants.INSTANCE_IMPORT:
7055 src_node = self.op.src_node
7056 src_path = self.op.src_path
7058 if src_path is None:
7059 self.op.src_path = src_path = self.op.instance_name
7061 if src_node is None:
7062 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7063 self.op.src_node = None
7064 if os.path.isabs(src_path):
7065 raise errors.OpPrereqError("Importing an instance from an absolute"
7066 " path requires a source node option.",
7069 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7070 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7071 self.needed_locks[locking.LEVEL_NODE].append(src_node)
7072 if not os.path.isabs(src_path):
7073 self.op.src_path = src_path = \
7074 utils.PathJoin(constants.EXPORT_DIR, src_path)
7076 def _RunAllocator(self):
7077 """Run the allocator based on input opcode.
7080 nics = [n.ToDict() for n in self.nics]
7081 ial = IAllocator(self.cfg, self.rpc,
7082 mode=constants.IALLOCATOR_MODE_ALLOC,
7083 name=self.op.instance_name,
7084 disk_template=self.op.disk_template,
7087 vcpus=self.be_full[constants.BE_VCPUS],
7088 mem_size=self.be_full[constants.BE_MEMORY],
7091 hypervisor=self.op.hypervisor,
7094 ial.Run(self.op.iallocator)
7097 raise errors.OpPrereqError("Can't compute nodes using"
7098 " iallocator '%s': %s" %
7099 (self.op.iallocator, ial.info),
7101 if len(ial.result) != ial.required_nodes:
7102 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7103 " of nodes (%s), required %s" %
7104 (self.op.iallocator, len(ial.result),
7105 ial.required_nodes), errors.ECODE_FAULT)
7106 self.op.pnode = ial.result[0]
7107 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7108 self.op.instance_name, self.op.iallocator,
7109 utils.CommaJoin(ial.result))
7110 if ial.required_nodes == 2:
7111 self.op.snode = ial.result[1]
7113 def BuildHooksEnv(self):
7116 This runs on master, primary and secondary nodes of the instance.
7120 "ADD_MODE": self.op.mode,
7122 if self.op.mode == constants.INSTANCE_IMPORT:
7123 env["SRC_NODE"] = self.op.src_node
7124 env["SRC_PATH"] = self.op.src_path
7125 env["SRC_IMAGES"] = self.src_images
7127 env.update(_BuildInstanceHookEnv(
7128 name=self.op.instance_name,
7129 primary_node=self.op.pnode,
7130 secondary_nodes=self.secondaries,
7131 status=self.op.start,
7132 os_type=self.op.os_type,
7133 memory=self.be_full[constants.BE_MEMORY],
7134 vcpus=self.be_full[constants.BE_VCPUS],
7135 nics=_NICListToTuple(self, self.nics),
7136 disk_template=self.op.disk_template,
7137 disks=[(d["size"], d["mode"]) for d in self.disks],
7140 hypervisor_name=self.op.hypervisor,
7143 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7147 def _ReadExportInfo(self):
7148 """Reads the export information from disk.
7150 It will override the opcode source node and path with the actual
7151 information, if these two were not specified before.
7153 @return: the export information
7156 assert self.op.mode == constants.INSTANCE_IMPORT
7158 src_node = self.op.src_node
7159 src_path = self.op.src_path
7161 if src_node is None:
7162 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7163 exp_list = self.rpc.call_export_list(locked_nodes)
7165 for node in exp_list:
7166 if exp_list[node].fail_msg:
7168 if src_path in exp_list[node].payload:
7170 self.op.src_node = src_node = node
7171 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7175 raise errors.OpPrereqError("No export found for relative path %s" %
7176 src_path, errors.ECODE_INVAL)
7178 _CheckNodeOnline(self, src_node)
7179 result = self.rpc.call_export_info(src_node, src_path)
7180 result.Raise("No export or invalid export found in dir %s" % src_path)
7182 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7183 if not export_info.has_section(constants.INISECT_EXP):
7184 raise errors.ProgrammerError("Corrupted export config",
7185 errors.ECODE_ENVIRON)
7187 ei_version = export_info.get(constants.INISECT_EXP, "version")
7188 if (int(ei_version) != constants.EXPORT_VERSION):
7189 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7190 (ei_version, constants.EXPORT_VERSION),
7191 errors.ECODE_ENVIRON)
7194 def _ReadExportParams(self, einfo):
7195 """Use export parameters as defaults.
7197 In case the opcode doesn't specify (as in override) some instance
7198 parameters, then try to use them from the export information, if
7202 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7204 if self.op.disk_template is None:
7205 if einfo.has_option(constants.INISECT_INS, "disk_template"):
7206 self.op.disk_template = einfo.get(constants.INISECT_INS,
7209 raise errors.OpPrereqError("No disk template specified and the export"
7210 " is missing the disk_template information",
7213 if not self.op.disks:
7214 if einfo.has_option(constants.INISECT_INS, "disk_count"):
7216 # TODO: import the disk iv_name too
7217 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7218 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7219 disks.append({"size": disk_sz})
7220 self.op.disks = disks
7222 raise errors.OpPrereqError("No disk info specified and the export"
7223 " is missing the disk information",
7226 if (not self.op.nics and
7227 einfo.has_option(constants.INISECT_INS, "nic_count")):
7229 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7231 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7232 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7237 if (self.op.hypervisor is None and
7238 einfo.has_option(constants.INISECT_INS, "hypervisor")):
7239 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7240 if einfo.has_section(constants.INISECT_HYP):
7241 # use the export parameters but do not override the ones
7242 # specified by the user
7243 for name, value in einfo.items(constants.INISECT_HYP):
7244 if name not in self.op.hvparams:
7245 self.op.hvparams[name] = value
7247 if einfo.has_section(constants.INISECT_BEP):
7248 # use the parameters, without overriding
7249 for name, value in einfo.items(constants.INISECT_BEP):
7250 if name not in self.op.beparams:
7251 self.op.beparams[name] = value
7253 # try to read the parameters old style, from the main section
7254 for name in constants.BES_PARAMETERS:
7255 if (name not in self.op.beparams and
7256 einfo.has_option(constants.INISECT_INS, name)):
7257 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7259 if einfo.has_section(constants.INISECT_OSP):
7260 # use the parameters, without overriding
7261 for name, value in einfo.items(constants.INISECT_OSP):
7262 if name not in self.op.osparams:
7263 self.op.osparams[name] = value
7265 def _RevertToDefaults(self, cluster):
7266 """Revert the instance parameters to the default values.
7270 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7271 for name in self.op.hvparams.keys():
7272 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7273 del self.op.hvparams[name]
7275 be_defs = cluster.SimpleFillBE({})
7276 for name in self.op.beparams.keys():
7277 if name in be_defs and be_defs[name] == self.op.beparams[name]:
7278 del self.op.beparams[name]
7280 nic_defs = cluster.SimpleFillNIC({})
7281 for nic in self.op.nics:
7282 for name in constants.NICS_PARAMETERS:
7283 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7286 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7287 for name in self.op.osparams.keys():
7288 if name in os_defs and os_defs[name] == self.op.osparams[name]:
7289 del self.op.osparams[name]
7291 def CheckPrereq(self):
7292 """Check prerequisites.
7295 if self.op.mode == constants.INSTANCE_IMPORT:
7296 export_info = self._ReadExportInfo()
7297 self._ReadExportParams(export_info)
7299 if (not self.cfg.GetVGName() and
7300 self.op.disk_template not in constants.DTS_NOT_LVM):
7301 raise errors.OpPrereqError("Cluster does not support lvm-based"
7302 " instances", errors.ECODE_STATE)
7304 if self.op.hypervisor is None:
7305 self.op.hypervisor = self.cfg.GetHypervisorType()
7307 cluster = self.cfg.GetClusterInfo()
7308 enabled_hvs = cluster.enabled_hypervisors
7309 if self.op.hypervisor not in enabled_hvs:
7310 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7311 " cluster (%s)" % (self.op.hypervisor,
7312 ",".join(enabled_hvs)),
7315 # check hypervisor parameter syntax (locally)
7316 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7317 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7319 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7320 hv_type.CheckParameterSyntax(filled_hvp)
7321 self.hv_full = filled_hvp
7322 # check that we don't specify global parameters on an instance
7323 _CheckGlobalHvParams(self.op.hvparams)
7325 # fill and remember the beparams dict
7326 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7327 self.be_full = cluster.SimpleFillBE(self.op.beparams)
7329 # build os parameters
7330 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7332 # now that hvp/bep are in final format, let's reset to defaults,
7334 if self.op.identify_defaults:
7335 self._RevertToDefaults(cluster)
7339 for idx, nic in enumerate(self.op.nics):
7340 nic_mode_req = nic.get("mode", None)
7341 nic_mode = nic_mode_req
7342 if nic_mode is None:
7343 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7345 # in routed mode, for the first nic, the default ip is 'auto'
7346 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7347 default_ip_mode = constants.VALUE_AUTO
7349 default_ip_mode = constants.VALUE_NONE
7351 # ip validity checks
7352 ip = nic.get("ip", default_ip_mode)
7353 if ip is None or ip.lower() == constants.VALUE_NONE:
7355 elif ip.lower() == constants.VALUE_AUTO:
7356 if not self.op.name_check:
7357 raise errors.OpPrereqError("IP address set to auto but name checks"
7358 " have been skipped",
7360 nic_ip = self.hostname1.ip
7362 if not netutils.IPAddress.IsValid(ip):
7363 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7367 # TODO: check the ip address for uniqueness
7368 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7369 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7372 # MAC address verification
7373 mac = nic.get("mac", constants.VALUE_AUTO)
7374 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7375 mac = utils.NormalizeAndValidateMac(mac)
7378 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7379 except errors.ReservationError:
7380 raise errors.OpPrereqError("MAC address %s already in use"
7381 " in cluster" % mac,
7382 errors.ECODE_NOTUNIQUE)
7384 # bridge verification
7385 bridge = nic.get("bridge", None)
7386 link = nic.get("link", None)
7388 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7389 " at the same time", errors.ECODE_INVAL)
7390 elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7391 raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7398 nicparams[constants.NIC_MODE] = nic_mode_req
7400 nicparams[constants.NIC_LINK] = link
7402 check_params = cluster.SimpleFillNIC(nicparams)
7403 objects.NIC.CheckParameterSyntax(check_params)
7404 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7406 # disk checks/pre-build
7408 for disk in self.op.disks:
7409 mode = disk.get("mode", constants.DISK_RDWR)
7410 if mode not in constants.DISK_ACCESS_SET:
7411 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7412 mode, errors.ECODE_INVAL)
7413 size = disk.get("size", None)
7415 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7418 except (TypeError, ValueError):
7419 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7421 vg = disk.get("vg", self.cfg.GetVGName())
7422 new_disk = {"size": size, "mode": mode, "vg": vg}
7424 new_disk["adopt"] = disk["adopt"]
7425 self.disks.append(new_disk)
7427 if self.op.mode == constants.INSTANCE_IMPORT:
7429 # Check that the new instance doesn't have less disks than the export
7430 instance_disks = len(self.disks)
7431 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7432 if instance_disks < export_disks:
7433 raise errors.OpPrereqError("Not enough disks to import."
7434 " (instance: %d, export: %d)" %
7435 (instance_disks, export_disks),
7439 for idx in range(export_disks):
7440 option = 'disk%d_dump' % idx
7441 if export_info.has_option(constants.INISECT_INS, option):
7442 # FIXME: are the old os-es, disk sizes, etc. useful?
7443 export_name = export_info.get(constants.INISECT_INS, option)
7444 image = utils.PathJoin(self.op.src_path, export_name)
7445 disk_images.append(image)
7447 disk_images.append(False)
7449 self.src_images = disk_images
7451 old_name = export_info.get(constants.INISECT_INS, 'name')
7453 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7454 except (TypeError, ValueError), err:
7455 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7456 " an integer: %s" % str(err),
7458 if self.op.instance_name == old_name:
7459 for idx, nic in enumerate(self.nics):
7460 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7461 nic_mac_ini = 'nic%d_mac' % idx
7462 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7464 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7466 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7467 if self.op.ip_check:
7468 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7469 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7470 (self.check_ip, self.op.instance_name),
7471 errors.ECODE_NOTUNIQUE)
7473 #### mac address generation
7474 # By generating here the mac address both the allocator and the hooks get
7475 # the real final mac address rather than the 'auto' or 'generate' value.
7476 # There is a race condition between the generation and the instance object
7477 # creation, which means that we know the mac is valid now, but we're not
7478 # sure it will be when we actually add the instance. If things go bad
7479 # adding the instance will abort because of a duplicate mac, and the
7480 # creation job will fail.
7481 for nic in self.nics:
7482 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7483 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7487 if self.op.iallocator is not None:
7488 self._RunAllocator()
7490 #### node related checks
7492 # check primary node
7493 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7494 assert self.pnode is not None, \
7495 "Cannot retrieve locked node %s" % self.op.pnode
7497 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7498 pnode.name, errors.ECODE_STATE)
7500 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7501 pnode.name, errors.ECODE_STATE)
7502 if not pnode.vm_capable:
7503 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7504 " '%s'" % pnode.name, errors.ECODE_STATE)
7506 self.secondaries = []
7508 # mirror node verification
7509 if self.op.disk_template in constants.DTS_NET_MIRROR:
7510 if self.op.snode == pnode.name:
7511 raise errors.OpPrereqError("The secondary node cannot be the"
7512 " primary node.", errors.ECODE_INVAL)
7513 _CheckNodeOnline(self, self.op.snode)
7514 _CheckNodeNotDrained(self, self.op.snode)
7515 _CheckNodeVmCapable(self, self.op.snode)
7516 self.secondaries.append(self.op.snode)
7518 nodenames = [pnode.name] + self.secondaries
7520 if not self.adopt_disks:
7521 # Check lv size requirements, if not adopting
7522 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7523 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7525 else: # instead, we must check the adoption data
7526 all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7527 if len(all_lvs) != len(self.disks):
7528 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7530 for lv_name in all_lvs:
7532 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7533 # to ReserveLV uses the same syntax
7534 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7535 except errors.ReservationError:
7536 raise errors.OpPrereqError("LV named %s used by another instance" %
7537 lv_name, errors.ECODE_NOTUNIQUE)
7539 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7540 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7542 node_lvs = self.rpc.call_lv_list([pnode.name],
7543 vg_names.payload.keys())[pnode.name]
7544 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7545 node_lvs = node_lvs.payload
7547 delta = all_lvs.difference(node_lvs.keys())
7549 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7550 utils.CommaJoin(delta),
7552 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7554 raise errors.OpPrereqError("Online logical volumes found, cannot"
7555 " adopt: %s" % utils.CommaJoin(online_lvs),
7557 # update the size of disk based on what is found
7558 for dsk in self.disks:
7559 dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7561 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7563 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7564 # check OS parameters (remotely)
7565 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7567 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7569 # memory check on primary node
7571 _CheckNodeFreeMemory(self, self.pnode.name,
7572 "creating instance %s" % self.op.instance_name,
7573 self.be_full[constants.BE_MEMORY],
7576 self.dry_run_result = list(nodenames)
7578 def Exec(self, feedback_fn):
7579 """Create and add the instance to the cluster.
7582 instance = self.op.instance_name
7583 pnode_name = self.pnode.name
7585 ht_kind = self.op.hypervisor
7586 if ht_kind in constants.HTS_REQ_PORT:
7587 network_port = self.cfg.AllocatePort()
7591 if constants.ENABLE_FILE_STORAGE:
7592 # this is needed because os.path.join does not accept None arguments
7593 if self.op.file_storage_dir is None:
7594 string_file_storage_dir = ""
7596 string_file_storage_dir = self.op.file_storage_dir
7598 # build the full file storage dir path
7599 file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7600 string_file_storage_dir, instance)
7602 file_storage_dir = ""
7604 disks = _GenerateDiskTemplate(self,
7605 self.op.disk_template,
7606 instance, pnode_name,
7610 self.op.file_driver,
7614 iobj = objects.Instance(name=instance, os=self.op.os_type,
7615 primary_node=pnode_name,
7616 nics=self.nics, disks=disks,
7617 disk_template=self.op.disk_template,
7619 network_port=network_port,
7620 beparams=self.op.beparams,
7621 hvparams=self.op.hvparams,
7622 hypervisor=self.op.hypervisor,
7623 osparams=self.op.osparams,
7626 if self.adopt_disks:
7627 # rename LVs to the newly-generated names; we need to construct
7628 # 'fake' LV disks with the old data, plus the new unique_id
7629 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7631 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7632 rename_to.append(t_dsk.logical_id)
7633 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7634 self.cfg.SetDiskID(t_dsk, pnode_name)
7635 result = self.rpc.call_blockdev_rename(pnode_name,
7636 zip(tmp_disks, rename_to))
7637 result.Raise("Failed to rename adoped LVs")
7639 feedback_fn("* creating instance disks...")
7641 _CreateDisks(self, iobj)
7642 except errors.OpExecError:
7643 self.LogWarning("Device creation failed, reverting...")
7645 _RemoveDisks(self, iobj)
7647 self.cfg.ReleaseDRBDMinors(instance)
7650 if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7651 feedback_fn("* wiping instance disks...")
7653 _WipeDisks(self, iobj)
7654 except errors.OpExecError:
7655 self.LogWarning("Device wiping failed, reverting...")
7657 _RemoveDisks(self, iobj)
7659 self.cfg.ReleaseDRBDMinors(instance)
7662 feedback_fn("adding instance %s to cluster config" % instance)
7664 self.cfg.AddInstance(iobj, self.proc.GetECId())
7666 # Declare that we don't want to remove the instance lock anymore, as we've
7667 # added the instance to the config
7668 del self.remove_locks[locking.LEVEL_INSTANCE]
7669 # Unlock all the nodes
7670 if self.op.mode == constants.INSTANCE_IMPORT:
7671 nodes_keep = [self.op.src_node]
7672 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7673 if node != self.op.src_node]
7674 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7675 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7677 self.context.glm.release(locking.LEVEL_NODE)
7678 del self.acquired_locks[locking.LEVEL_NODE]
7680 if self.op.wait_for_sync:
7681 disk_abort = not _WaitForSync(self, iobj)
7682 elif iobj.disk_template in constants.DTS_NET_MIRROR:
7683 # make sure the disks are not degraded (still sync-ing is ok)
7685 feedback_fn("* checking mirrors status")
7686 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7691 _RemoveDisks(self, iobj)
7692 self.cfg.RemoveInstance(iobj.name)
7693 # Make sure the instance lock gets removed
7694 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7695 raise errors.OpExecError("There are some degraded disks for"
7698 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7699 if self.op.mode == constants.INSTANCE_CREATE:
7700 if not self.op.no_install:
7701 feedback_fn("* running the instance OS create scripts...")
7702 # FIXME: pass debug option from opcode to backend
7703 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7704 self.op.debug_level)
7705 result.Raise("Could not add os for instance %s"
7706 " on node %s" % (instance, pnode_name))
7708 elif self.op.mode == constants.INSTANCE_IMPORT:
7709 feedback_fn("* running the instance OS import scripts...")
7713 for idx, image in enumerate(self.src_images):
7717 # FIXME: pass debug option from opcode to backend
7718 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7719 constants.IEIO_FILE, (image, ),
7720 constants.IEIO_SCRIPT,
7721 (iobj.disks[idx], idx),
7723 transfers.append(dt)
7726 masterd.instance.TransferInstanceData(self, feedback_fn,
7727 self.op.src_node, pnode_name,
7728 self.pnode.secondary_ip,
7730 if not compat.all(import_result):
7731 self.LogWarning("Some disks for instance %s on node %s were not"
7732 " imported successfully" % (instance, pnode_name))
7734 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7735 feedback_fn("* preparing remote import...")
7736 # The source cluster will stop the instance before attempting to make a
7737 # connection. In some cases stopping an instance can take a long time,
7738 # hence the shutdown timeout is added to the connection timeout.
7739 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
7740 self.op.source_shutdown_timeout)
7741 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7743 assert iobj.primary_node == self.pnode.name
7745 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
7746 self.source_x509_ca,
7747 self._cds, timeouts)
7748 if not compat.all(disk_results):
7749 # TODO: Should the instance still be started, even if some disks
7750 # failed to import (valid for local imports, too)?
7751 self.LogWarning("Some disks for instance %s on node %s were not"
7752 " imported successfully" % (instance, pnode_name))
7754 # Run rename script on newly imported instance
7755 assert iobj.name == instance
7756 feedback_fn("Running rename script for %s" % instance)
7757 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7758 self.source_instance_name,
7759 self.op.debug_level)
7761 self.LogWarning("Failed to run rename script for %s on node"
7762 " %s: %s" % (instance, pnode_name, result.fail_msg))
7765 # also checked in the prereq part
7766 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7770 iobj.admin_up = True
7771 self.cfg.Update(iobj, feedback_fn)
7772 logging.info("Starting instance %s on node %s", instance, pnode_name)
7773 feedback_fn("* starting instance...")
7774 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7775 result.Raise("Could not start instance")
7777 return list(iobj.all_nodes)
7780 class LUInstanceConsole(NoHooksLU):
7781 """Connect to an instance's console.
7783 This is somewhat special in that it returns the command line that
7784 you need to run on the master node in order to connect to the
7790 def ExpandNames(self):
7791 self._ExpandAndLockInstance()
7793 def CheckPrereq(self):
7794 """Check prerequisites.
7796 This checks that the instance is in the cluster.
7799 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7800 assert self.instance is not None, \
7801 "Cannot retrieve locked instance %s" % self.op.instance_name
7802 _CheckNodeOnline(self, self.instance.primary_node)
7804 def Exec(self, feedback_fn):
7805 """Connect to the console of an instance
7808 instance = self.instance
7809 node = instance.primary_node
7811 node_insts = self.rpc.call_instance_list([node],
7812 [instance.hypervisor])[node]
7813 node_insts.Raise("Can't get node information from %s" % node)
7815 if instance.name not in node_insts.payload:
7816 if instance.admin_up:
7817 state = "ERROR_down"
7819 state = "ADMIN_down"
7820 raise errors.OpExecError("Instance %s is not running (state %s)" %
7821 (instance.name, state))
7823 logging.debug("Connecting to console of %s on %s", instance.name, node)
7825 hyper = hypervisor.GetHypervisor(instance.hypervisor)
7826 cluster = self.cfg.GetClusterInfo()
7827 # beparams and hvparams are passed separately, to avoid editing the
7828 # instance and then saving the defaults in the instance itself.
7829 hvparams = cluster.FillHV(instance)
7830 beparams = cluster.FillBE(instance)
7831 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
7833 assert console.instance == instance.name
7834 assert console.Validate()
7836 return console.ToDict()
7839 class LUInstanceReplaceDisks(LogicalUnit):
7840 """Replace the disks of an instance.
7843 HPATH = "mirrors-replace"
7844 HTYPE = constants.HTYPE_INSTANCE
7847 def CheckArguments(self):
7848 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7851 def ExpandNames(self):
7852 self._ExpandAndLockInstance()
7854 if self.op.iallocator is not None:
7855 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7857 elif self.op.remote_node is not None:
7858 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7859 self.op.remote_node = remote_node
7861 # Warning: do not remove the locking of the new secondary here
7862 # unless DRBD8.AddChildren is changed to work in parallel;
7863 # currently it doesn't since parallel invocations of
7864 # FindUnusedMinor will conflict
7865 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7866 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7869 self.needed_locks[locking.LEVEL_NODE] = []
7870 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7872 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7873 self.op.iallocator, self.op.remote_node,
7874 self.op.disks, False, self.op.early_release)
7876 self.tasklets = [self.replacer]
7878 def DeclareLocks(self, level):
7879 # If we're not already locking all nodes in the set we have to declare the
7880 # instance's primary/secondary nodes.
7881 if (level == locking.LEVEL_NODE and
7882 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7883 self._LockInstancesNodes()
7885 def BuildHooksEnv(self):
7888 This runs on the master, the primary and all the secondaries.
7891 instance = self.replacer.instance
7893 "MODE": self.op.mode,
7894 "NEW_SECONDARY": self.op.remote_node,
7895 "OLD_SECONDARY": instance.secondary_nodes[0],
7897 env.update(_BuildInstanceHookEnvByObject(self, instance))
7899 self.cfg.GetMasterNode(),
7900 instance.primary_node,
7902 if self.op.remote_node is not None:
7903 nl.append(self.op.remote_node)
7907 class TLReplaceDisks(Tasklet):
7908 """Replaces disks for an instance.
7910 Note: Locking is not within the scope of this class.
7913 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7914 disks, delay_iallocator, early_release):
7915 """Initializes this class.
7918 Tasklet.__init__(self, lu)
7921 self.instance_name = instance_name
7923 self.iallocator_name = iallocator_name
7924 self.remote_node = remote_node
7926 self.delay_iallocator = delay_iallocator
7927 self.early_release = early_release
7930 self.instance = None
7931 self.new_node = None
7932 self.target_node = None
7933 self.other_node = None
7934 self.remote_node_info = None
7935 self.node_secondary_ip = None
7938 def CheckArguments(mode, remote_node, iallocator):
7939 """Helper function for users of this class.
7942 # check for valid parameter combination
7943 if mode == constants.REPLACE_DISK_CHG:
7944 if remote_node is None and iallocator is None:
7945 raise errors.OpPrereqError("When changing the secondary either an"
7946 " iallocator script must be used or the"
7947 " new node given", errors.ECODE_INVAL)
7949 if remote_node is not None and iallocator is not None:
7950 raise errors.OpPrereqError("Give either the iallocator or the new"
7951 " secondary, not both", errors.ECODE_INVAL)
7953 elif remote_node is not None or iallocator is not None:
7954 # Not replacing the secondary
7955 raise errors.OpPrereqError("The iallocator and new node options can"
7956 " only be used when changing the"
7957 " secondary node", errors.ECODE_INVAL)
7960 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7961 """Compute a new secondary node using an IAllocator.
7964 ial = IAllocator(lu.cfg, lu.rpc,
7965 mode=constants.IALLOCATOR_MODE_RELOC,
7967 relocate_from=relocate_from)
7969 ial.Run(iallocator_name)
7972 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7973 " %s" % (iallocator_name, ial.info),
7976 if len(ial.result) != ial.required_nodes:
7977 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7978 " of nodes (%s), required %s" %
7980 len(ial.result), ial.required_nodes),
7983 remote_node_name = ial.result[0]
7985 lu.LogInfo("Selected new secondary for instance '%s': %s",
7986 instance_name, remote_node_name)
7988 return remote_node_name
7990 def _FindFaultyDisks(self, node_name):
7991 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7994 def CheckPrereq(self):
7995 """Check prerequisites.
7997 This checks that the instance is in the cluster.
8000 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8001 assert instance is not None, \
8002 "Cannot retrieve locked instance %s" % self.instance_name
8004 if instance.disk_template != constants.DT_DRBD8:
8005 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8006 " instances", errors.ECODE_INVAL)
8008 if len(instance.secondary_nodes) != 1:
8009 raise errors.OpPrereqError("The instance has a strange layout,"
8010 " expected one secondary but found %d" %
8011 len(instance.secondary_nodes),
8014 if not self.delay_iallocator:
8015 self._CheckPrereq2()
8017 def _CheckPrereq2(self):
8018 """Check prerequisites, second part.
8020 This function should always be part of CheckPrereq. It was separated and is
8021 now called from Exec because during node evacuation iallocator was only
8022 called with an unmodified cluster model, not taking planned changes into
8026 instance = self.instance
8027 secondary_node = instance.secondary_nodes[0]
8029 if self.iallocator_name is None:
8030 remote_node = self.remote_node
8032 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8033 instance.name, instance.secondary_nodes)
8035 if remote_node is not None:
8036 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8037 assert self.remote_node_info is not None, \
8038 "Cannot retrieve locked node %s" % remote_node
8040 self.remote_node_info = None
8042 if remote_node == self.instance.primary_node:
8043 raise errors.OpPrereqError("The specified node is the primary node of"
8044 " the instance.", errors.ECODE_INVAL)
8046 if remote_node == secondary_node:
8047 raise errors.OpPrereqError("The specified node is already the"
8048 " secondary node of the instance.",
8051 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8052 constants.REPLACE_DISK_CHG):
8053 raise errors.OpPrereqError("Cannot specify disks to be replaced",
8056 if self.mode == constants.REPLACE_DISK_AUTO:
8057 faulty_primary = self._FindFaultyDisks(instance.primary_node)
8058 faulty_secondary = self._FindFaultyDisks(secondary_node)
8060 if faulty_primary and faulty_secondary:
8061 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8062 " one node and can not be repaired"
8063 " automatically" % self.instance_name,
8067 self.disks = faulty_primary
8068 self.target_node = instance.primary_node
8069 self.other_node = secondary_node
8070 check_nodes = [self.target_node, self.other_node]
8071 elif faulty_secondary:
8072 self.disks = faulty_secondary
8073 self.target_node = secondary_node
8074 self.other_node = instance.primary_node
8075 check_nodes = [self.target_node, self.other_node]
8081 # Non-automatic modes
8082 if self.mode == constants.REPLACE_DISK_PRI:
8083 self.target_node = instance.primary_node
8084 self.other_node = secondary_node
8085 check_nodes = [self.target_node, self.other_node]
8087 elif self.mode == constants.REPLACE_DISK_SEC:
8088 self.target_node = secondary_node
8089 self.other_node = instance.primary_node
8090 check_nodes = [self.target_node, self.other_node]
8092 elif self.mode == constants.REPLACE_DISK_CHG:
8093 self.new_node = remote_node
8094 self.other_node = instance.primary_node
8095 self.target_node = secondary_node
8096 check_nodes = [self.new_node, self.other_node]
8098 _CheckNodeNotDrained(self.lu, remote_node)
8099 _CheckNodeVmCapable(self.lu, remote_node)
8101 old_node_info = self.cfg.GetNodeInfo(secondary_node)
8102 assert old_node_info is not None
8103 if old_node_info.offline and not self.early_release:
8104 # doesn't make sense to delay the release
8105 self.early_release = True
8106 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8107 " early-release mode", secondary_node)
8110 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8113 # If not specified all disks should be replaced
8115 self.disks = range(len(self.instance.disks))
8117 for node in check_nodes:
8118 _CheckNodeOnline(self.lu, node)
8120 # Check whether disks are valid
8121 for disk_idx in self.disks:
8122 instance.FindDisk(disk_idx)
8124 # Get secondary node IP addresses
8127 for node_name in [self.target_node, self.other_node, self.new_node]:
8128 if node_name is not None:
8129 node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8131 self.node_secondary_ip = node_2nd_ip
8133 def Exec(self, feedback_fn):
8134 """Execute disk replacement.
8136 This dispatches the disk replacement to the appropriate handler.
8139 if self.delay_iallocator:
8140 self._CheckPrereq2()
8143 feedback_fn("No disks need replacement")
8146 feedback_fn("Replacing disk(s) %s for %s" %
8147 (utils.CommaJoin(self.disks), self.instance.name))
8149 activate_disks = (not self.instance.admin_up)
8151 # Activate the instance disks if we're replacing them on a down instance
8153 _StartInstanceDisks(self.lu, self.instance, True)
8156 # Should we replace the secondary node?
8157 if self.new_node is not None:
8158 fn = self._ExecDrbd8Secondary
8160 fn = self._ExecDrbd8DiskOnly
8162 return fn(feedback_fn)
8165 # Deactivate the instance disks if we're replacing them on a
8168 _SafeShutdownInstanceDisks(self.lu, self.instance)
8170 def _CheckVolumeGroup(self, nodes):
8171 self.lu.LogInfo("Checking volume groups")
8173 vgname = self.cfg.GetVGName()
8175 # Make sure volume group exists on all involved nodes
8176 results = self.rpc.call_vg_list(nodes)
8178 raise errors.OpExecError("Can't list volume groups on the nodes")
8182 res.Raise("Error checking node %s" % node)
8183 if vgname not in res.payload:
8184 raise errors.OpExecError("Volume group '%s' not found on node %s" %
8187 def _CheckDisksExistence(self, nodes):
8188 # Check disk existence
8189 for idx, dev in enumerate(self.instance.disks):
8190 if idx not in self.disks:
8194 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8195 self.cfg.SetDiskID(dev, node)
8197 result = self.rpc.call_blockdev_find(node, dev)
8199 msg = result.fail_msg
8200 if msg or not result.payload:
8202 msg = "disk not found"
8203 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8206 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8207 for idx, dev in enumerate(self.instance.disks):
8208 if idx not in self.disks:
8211 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8214 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8216 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8217 " replace disks for instance %s" %
8218 (node_name, self.instance.name))
8220 def _CreateNewStorage(self, node_name):
8221 vgname = self.cfg.GetVGName()
8224 for idx, dev in enumerate(self.instance.disks):
8225 if idx not in self.disks:
8228 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8230 self.cfg.SetDiskID(dev, node_name)
8232 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8233 names = _GenerateUniqueNames(self.lu, lv_names)
8235 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8236 logical_id=(vgname, names[0]))
8237 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8238 logical_id=(vgname, names[1]))
8240 new_lvs = [lv_data, lv_meta]
8241 old_lvs = dev.children
8242 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8244 # we pass force_create=True to force the LVM creation
8245 for new_lv in new_lvs:
8246 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8247 _GetInstanceInfoText(self.instance), False)
8251 def _CheckDevices(self, node_name, iv_names):
8252 for name, (dev, _, _) in iv_names.iteritems():
8253 self.cfg.SetDiskID(dev, node_name)
8255 result = self.rpc.call_blockdev_find(node_name, dev)
8257 msg = result.fail_msg
8258 if msg or not result.payload:
8260 msg = "disk not found"
8261 raise errors.OpExecError("Can't find DRBD device %s: %s" %
8264 if result.payload.is_degraded:
8265 raise errors.OpExecError("DRBD device %s is degraded!" % name)
8267 def _RemoveOldStorage(self, node_name, iv_names):
8268 for name, (_, old_lvs, _) in iv_names.iteritems():
8269 self.lu.LogInfo("Remove logical volumes for %s" % name)
8272 self.cfg.SetDiskID(lv, node_name)
8274 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8276 self.lu.LogWarning("Can't remove old LV: %s" % msg,
8277 hint="remove unused LVs manually")
8279 def _ReleaseNodeLock(self, node_name):
8280 """Releases the lock for a given node."""
8281 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8283 def _ExecDrbd8DiskOnly(self, feedback_fn):
8284 """Replace a disk on the primary or secondary for DRBD 8.
8286 The algorithm for replace is quite complicated:
8288 1. for each disk to be replaced:
8290 1. create new LVs on the target node with unique names
8291 1. detach old LVs from the drbd device
8292 1. rename old LVs to name_replaced.<time_t>
8293 1. rename new LVs to old LVs
8294 1. attach the new LVs (with the old names now) to the drbd device
8296 1. wait for sync across all devices
8298 1. for each modified disk:
8300 1. remove old LVs (which have the name name_replaces.<time_t>)
8302 Failures are not very well handled.
8307 # Step: check device activation
8308 self.lu.LogStep(1, steps_total, "Check device existence")
8309 self._CheckDisksExistence([self.other_node, self.target_node])
8310 self._CheckVolumeGroup([self.target_node, self.other_node])
8312 # Step: check other node consistency
8313 self.lu.LogStep(2, steps_total, "Check peer consistency")
8314 self._CheckDisksConsistency(self.other_node,
8315 self.other_node == self.instance.primary_node,
8318 # Step: create new storage
8319 self.lu.LogStep(3, steps_total, "Allocate new storage")
8320 iv_names = self._CreateNewStorage(self.target_node)
8322 # Step: for each lv, detach+rename*2+attach
8323 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8324 for dev, old_lvs, new_lvs in iv_names.itervalues():
8325 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8327 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8329 result.Raise("Can't detach drbd from local storage on node"
8330 " %s for device %s" % (self.target_node, dev.iv_name))
8332 #cfg.Update(instance)
8334 # ok, we created the new LVs, so now we know we have the needed
8335 # storage; as such, we proceed on the target node to rename
8336 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8337 # using the assumption that logical_id == physical_id (which in
8338 # turn is the unique_id on that node)
8340 # FIXME(iustin): use a better name for the replaced LVs
8341 temp_suffix = int(time.time())
8342 ren_fn = lambda d, suff: (d.physical_id[0],
8343 d.physical_id[1] + "_replaced-%s" % suff)
8345 # Build the rename list based on what LVs exist on the node
8346 rename_old_to_new = []
8347 for to_ren in old_lvs:
8348 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8349 if not result.fail_msg and result.payload:
8351 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8353 self.lu.LogInfo("Renaming the old LVs on the target node")
8354 result = self.rpc.call_blockdev_rename(self.target_node,
8356 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8358 # Now we rename the new LVs to the old LVs
8359 self.lu.LogInfo("Renaming the new LVs on the target node")
8360 rename_new_to_old = [(new, old.physical_id)
8361 for old, new in zip(old_lvs, new_lvs)]
8362 result = self.rpc.call_blockdev_rename(self.target_node,
8364 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8366 for old, new in zip(old_lvs, new_lvs):
8367 new.logical_id = old.logical_id
8368 self.cfg.SetDiskID(new, self.target_node)
8370 for disk in old_lvs:
8371 disk.logical_id = ren_fn(disk, temp_suffix)
8372 self.cfg.SetDiskID(disk, self.target_node)
8374 # Now that the new lvs have the old name, we can add them to the device
8375 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8376 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8378 msg = result.fail_msg
8380 for new_lv in new_lvs:
8381 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8384 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8385 hint=("cleanup manually the unused logical"
8387 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8389 dev.children = new_lvs
8391 self.cfg.Update(self.instance, feedback_fn)
8394 if self.early_release:
8395 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8397 self._RemoveOldStorage(self.target_node, iv_names)
8398 # WARNING: we release both node locks here, do not do other RPCs
8399 # than WaitForSync to the primary node
8400 self._ReleaseNodeLock([self.target_node, self.other_node])
8403 # This can fail as the old devices are degraded and _WaitForSync
8404 # does a combined result over all disks, so we don't check its return value
8405 self.lu.LogStep(cstep, steps_total, "Sync devices")
8407 _WaitForSync(self.lu, self.instance)
8409 # Check all devices manually
8410 self._CheckDevices(self.instance.primary_node, iv_names)
8412 # Step: remove old storage
8413 if not self.early_release:
8414 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8416 self._RemoveOldStorage(self.target_node, iv_names)
8418 def _ExecDrbd8Secondary(self, feedback_fn):
8419 """Replace the secondary node for DRBD 8.
8421 The algorithm for replace is quite complicated:
8422 - for all disks of the instance:
8423 - create new LVs on the new node with same names
8424 - shutdown the drbd device on the old secondary
8425 - disconnect the drbd network on the primary
8426 - create the drbd device on the new secondary
8427 - network attach the drbd on the primary, using an artifice:
8428 the drbd code for Attach() will connect to the network if it
8429 finds a device which is connected to the good local disks but
8431 - wait for sync across all devices
8432 - remove all disks from the old secondary
8434 Failures are not very well handled.
8439 # Step: check device activation
8440 self.lu.LogStep(1, steps_total, "Check device existence")
8441 self._CheckDisksExistence([self.instance.primary_node])
8442 self._CheckVolumeGroup([self.instance.primary_node])
8444 # Step: check other node consistency
8445 self.lu.LogStep(2, steps_total, "Check peer consistency")
8446 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8448 # Step: create new storage
8449 self.lu.LogStep(3, steps_total, "Allocate new storage")
8450 for idx, dev in enumerate(self.instance.disks):
8451 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8452 (self.new_node, idx))
8453 # we pass force_create=True to force LVM creation
8454 for new_lv in dev.children:
8455 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8456 _GetInstanceInfoText(self.instance), False)
8458 # Step 4: dbrd minors and drbd setups changes
8459 # after this, we must manually remove the drbd minors on both the
8460 # error and the success paths
8461 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8462 minors = self.cfg.AllocateDRBDMinor([self.new_node
8463 for dev in self.instance.disks],
8465 logging.debug("Allocated minors %r", minors)
8468 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8469 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8470 (self.new_node, idx))
8471 # create new devices on new_node; note that we create two IDs:
8472 # one without port, so the drbd will be activated without
8473 # networking information on the new node at this stage, and one
8474 # with network, for the latter activation in step 4
8475 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8476 if self.instance.primary_node == o_node1:
8479 assert self.instance.primary_node == o_node2, "Three-node instance?"
8482 new_alone_id = (self.instance.primary_node, self.new_node, None,
8483 p_minor, new_minor, o_secret)
8484 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8485 p_minor, new_minor, o_secret)
8487 iv_names[idx] = (dev, dev.children, new_net_id)
8488 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8490 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8491 logical_id=new_alone_id,
8492 children=dev.children,
8495 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8496 _GetInstanceInfoText(self.instance), False)
8497 except errors.GenericError:
8498 self.cfg.ReleaseDRBDMinors(self.instance.name)
8501 # We have new devices, shutdown the drbd on the old secondary
8502 for idx, dev in enumerate(self.instance.disks):
8503 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8504 self.cfg.SetDiskID(dev, self.target_node)
8505 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8507 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8508 "node: %s" % (idx, msg),
8509 hint=("Please cleanup this device manually as"
8510 " soon as possible"))
8512 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8513 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8514 self.node_secondary_ip,
8515 self.instance.disks)\
8516 [self.instance.primary_node]
8518 msg = result.fail_msg
8520 # detaches didn't succeed (unlikely)
8521 self.cfg.ReleaseDRBDMinors(self.instance.name)
8522 raise errors.OpExecError("Can't detach the disks from the network on"
8523 " old node: %s" % (msg,))
8525 # if we managed to detach at least one, we update all the disks of
8526 # the instance to point to the new secondary
8527 self.lu.LogInfo("Updating instance configuration")
8528 for dev, _, new_logical_id in iv_names.itervalues():
8529 dev.logical_id = new_logical_id
8530 self.cfg.SetDiskID(dev, self.instance.primary_node)
8532 self.cfg.Update(self.instance, feedback_fn)
8534 # and now perform the drbd attach
8535 self.lu.LogInfo("Attaching primary drbds to new secondary"
8536 " (standalone => connected)")
8537 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8539 self.node_secondary_ip,
8540 self.instance.disks,
8543 for to_node, to_result in result.items():
8544 msg = to_result.fail_msg
8546 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8548 hint=("please do a gnt-instance info to see the"
8549 " status of disks"))
8551 if self.early_release:
8552 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8554 self._RemoveOldStorage(self.target_node, iv_names)
8555 # WARNING: we release all node locks here, do not do other RPCs
8556 # than WaitForSync to the primary node
8557 self._ReleaseNodeLock([self.instance.primary_node,
8562 # This can fail as the old devices are degraded and _WaitForSync
8563 # does a combined result over all disks, so we don't check its return value
8564 self.lu.LogStep(cstep, steps_total, "Sync devices")
8566 _WaitForSync(self.lu, self.instance)
8568 # Check all devices manually
8569 self._CheckDevices(self.instance.primary_node, iv_names)
8571 # Step: remove old storage
8572 if not self.early_release:
8573 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8574 self._RemoveOldStorage(self.target_node, iv_names)
8577 class LURepairNodeStorage(NoHooksLU):
8578 """Repairs the volume group on a node.
8583 def CheckArguments(self):
8584 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8586 storage_type = self.op.storage_type
8588 if (constants.SO_FIX_CONSISTENCY not in
8589 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8590 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8591 " repaired" % storage_type,
8594 def ExpandNames(self):
8595 self.needed_locks = {
8596 locking.LEVEL_NODE: [self.op.node_name],
8599 def _CheckFaultyDisks(self, instance, node_name):
8600 """Ensure faulty disks abort the opcode or at least warn."""
8602 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8604 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8605 " node '%s'" % (instance.name, node_name),
8607 except errors.OpPrereqError, err:
8608 if self.op.ignore_consistency:
8609 self.proc.LogWarning(str(err.args[0]))
8613 def CheckPrereq(self):
8614 """Check prerequisites.
8617 # Check whether any instance on this node has faulty disks
8618 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8619 if not inst.admin_up:
8621 check_nodes = set(inst.all_nodes)
8622 check_nodes.discard(self.op.node_name)
8623 for inst_node_name in check_nodes:
8624 self._CheckFaultyDisks(inst, inst_node_name)
8626 def Exec(self, feedback_fn):
8627 feedback_fn("Repairing storage unit '%s' on %s ..." %
8628 (self.op.name, self.op.node_name))
8630 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8631 result = self.rpc.call_storage_execute(self.op.node_name,
8632 self.op.storage_type, st_args,
8634 constants.SO_FIX_CONSISTENCY)
8635 result.Raise("Failed to repair storage unit '%s' on %s" %
8636 (self.op.name, self.op.node_name))
8639 class LUNodeEvacStrategy(NoHooksLU):
8640 """Computes the node evacuation strategy.
8645 def CheckArguments(self):
8646 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8648 def ExpandNames(self):
8649 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8650 self.needed_locks = locks = {}
8651 if self.op.remote_node is None:
8652 locks[locking.LEVEL_NODE] = locking.ALL_SET
8654 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8655 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8657 def Exec(self, feedback_fn):
8658 if self.op.remote_node is not None:
8660 for node in self.op.nodes:
8661 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8664 if i.primary_node == self.op.remote_node:
8665 raise errors.OpPrereqError("Node %s is the primary node of"
8666 " instance %s, cannot use it as"
8668 (self.op.remote_node, i.name),
8670 result.append([i.name, self.op.remote_node])
8672 ial = IAllocator(self.cfg, self.rpc,
8673 mode=constants.IALLOCATOR_MODE_MEVAC,
8674 evac_nodes=self.op.nodes)
8675 ial.Run(self.op.iallocator, validate=True)
8677 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8683 class LUInstanceGrowDisk(LogicalUnit):
8684 """Grow a disk of an instance.
8688 HTYPE = constants.HTYPE_INSTANCE
8691 def ExpandNames(self):
8692 self._ExpandAndLockInstance()
8693 self.needed_locks[locking.LEVEL_NODE] = []
8694 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8696 def DeclareLocks(self, level):
8697 if level == locking.LEVEL_NODE:
8698 self._LockInstancesNodes()
8700 def BuildHooksEnv(self):
8703 This runs on the master, the primary and all the secondaries.
8707 "DISK": self.op.disk,
8708 "AMOUNT": self.op.amount,
8710 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8711 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8714 def CheckPrereq(self):
8715 """Check prerequisites.
8717 This checks that the instance is in the cluster.
8720 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8721 assert instance is not None, \
8722 "Cannot retrieve locked instance %s" % self.op.instance_name
8723 nodenames = list(instance.all_nodes)
8724 for node in nodenames:
8725 _CheckNodeOnline(self, node)
8727 self.instance = instance
8729 if instance.disk_template not in constants.DTS_GROWABLE:
8730 raise errors.OpPrereqError("Instance's disk layout does not support"
8731 " growing.", errors.ECODE_INVAL)
8733 self.disk = instance.FindDisk(self.op.disk)
8735 if instance.disk_template != constants.DT_FILE:
8736 # TODO: check the free disk space for file, when that feature
8738 _CheckNodesFreeDiskPerVG(self, nodenames,
8739 self.disk.ComputeGrowth(self.op.amount))
8741 def Exec(self, feedback_fn):
8742 """Execute disk grow.
8745 instance = self.instance
8748 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8750 raise errors.OpExecError("Cannot activate block device to grow")
8752 for node in instance.all_nodes:
8753 self.cfg.SetDiskID(disk, node)
8754 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8755 result.Raise("Grow request failed to node %s" % node)
8757 # TODO: Rewrite code to work properly
8758 # DRBD goes into sync mode for a short amount of time after executing the
8759 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8760 # calling "resize" in sync mode fails. Sleeping for a short amount of
8761 # time is a work-around.
8764 disk.RecordGrow(self.op.amount)
8765 self.cfg.Update(instance, feedback_fn)
8766 if self.op.wait_for_sync:
8767 disk_abort = not _WaitForSync(self, instance, disks=[disk])
8769 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8770 " status.\nPlease check the instance.")
8771 if not instance.admin_up:
8772 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8773 elif not instance.admin_up:
8774 self.proc.LogWarning("Not shutting down the disk even if the instance is"
8775 " not supposed to be running because no wait for"
8776 " sync mode was requested.")
8779 class LUInstanceQueryData(NoHooksLU):
8780 """Query runtime instance data.
8785 def ExpandNames(self):
8786 self.needed_locks = {}
8787 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8789 if self.op.instances:
8790 self.wanted_names = []
8791 for name in self.op.instances:
8792 full_name = _ExpandInstanceName(self.cfg, name)
8793 self.wanted_names.append(full_name)
8794 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8796 self.wanted_names = None
8797 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8799 self.needed_locks[locking.LEVEL_NODE] = []
8800 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8802 def DeclareLocks(self, level):
8803 if level == locking.LEVEL_NODE:
8804 self._LockInstancesNodes()
8806 def CheckPrereq(self):
8807 """Check prerequisites.
8809 This only checks the optional instance list against the existing names.
8812 if self.wanted_names is None:
8813 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8815 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8816 in self.wanted_names]
8818 def _ComputeBlockdevStatus(self, node, instance_name, dev):
8819 """Returns the status of a block device
8822 if self.op.static or not node:
8825 self.cfg.SetDiskID(dev, node)
8827 result = self.rpc.call_blockdev_find(node, dev)
8831 result.Raise("Can't compute disk status for %s" % instance_name)
8833 status = result.payload
8837 return (status.dev_path, status.major, status.minor,
8838 status.sync_percent, status.estimated_time,
8839 status.is_degraded, status.ldisk_status)
8841 def _ComputeDiskStatus(self, instance, snode, dev):
8842 """Compute block device status.
8845 if dev.dev_type in constants.LDS_DRBD:
8846 # we change the snode then (otherwise we use the one passed in)
8847 if dev.logical_id[0] == instance.primary_node:
8848 snode = dev.logical_id[1]
8850 snode = dev.logical_id[0]
8852 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8854 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8857 dev_children = [self._ComputeDiskStatus(instance, snode, child)
8858 for child in dev.children]
8863 "iv_name": dev.iv_name,
8864 "dev_type": dev.dev_type,
8865 "logical_id": dev.logical_id,
8866 "physical_id": dev.physical_id,
8867 "pstatus": dev_pstatus,
8868 "sstatus": dev_sstatus,
8869 "children": dev_children,
8876 def Exec(self, feedback_fn):
8877 """Gather and return data"""
8880 cluster = self.cfg.GetClusterInfo()
8882 for instance in self.wanted_instances:
8883 if not self.op.static:
8884 remote_info = self.rpc.call_instance_info(instance.primary_node,
8886 instance.hypervisor)
8887 remote_info.Raise("Error checking node %s" % instance.primary_node)
8888 remote_info = remote_info.payload
8889 if remote_info and "state" in remote_info:
8892 remote_state = "down"
8895 if instance.admin_up:
8898 config_state = "down"
8900 disks = [self._ComputeDiskStatus(instance, None, device)
8901 for device in instance.disks]
8904 "name": instance.name,
8905 "config_state": config_state,
8906 "run_state": remote_state,
8907 "pnode": instance.primary_node,
8908 "snodes": instance.secondary_nodes,
8910 # this happens to be the same format used for hooks
8911 "nics": _NICListToTuple(self, instance.nics),
8912 "disk_template": instance.disk_template,
8914 "hypervisor": instance.hypervisor,
8915 "network_port": instance.network_port,
8916 "hv_instance": instance.hvparams,
8917 "hv_actual": cluster.FillHV(instance, skip_globals=True),
8918 "be_instance": instance.beparams,
8919 "be_actual": cluster.FillBE(instance),
8920 "os_instance": instance.osparams,
8921 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
8922 "serial_no": instance.serial_no,
8923 "mtime": instance.mtime,
8924 "ctime": instance.ctime,
8925 "uuid": instance.uuid,
8928 result[instance.name] = idict
8933 class LUInstanceSetParams(LogicalUnit):
8934 """Modifies an instances's parameters.
8937 HPATH = "instance-modify"
8938 HTYPE = constants.HTYPE_INSTANCE
8941 def CheckArguments(self):
8942 if not (self.op.nics or self.op.disks or self.op.disk_template or
8943 self.op.hvparams or self.op.beparams or self.op.os_name):
8944 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8946 if self.op.hvparams:
8947 _CheckGlobalHvParams(self.op.hvparams)
8951 for disk_op, disk_dict in self.op.disks:
8952 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
8953 if disk_op == constants.DDM_REMOVE:
8956 elif disk_op == constants.DDM_ADD:
8959 if not isinstance(disk_op, int):
8960 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8961 if not isinstance(disk_dict, dict):
8962 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8963 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8965 if disk_op == constants.DDM_ADD:
8966 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8967 if mode not in constants.DISK_ACCESS_SET:
8968 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8970 size = disk_dict.get('size', None)
8972 raise errors.OpPrereqError("Required disk parameter size missing",
8976 except (TypeError, ValueError), err:
8977 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8978 str(err), errors.ECODE_INVAL)
8979 disk_dict['size'] = size
8981 # modification of disk
8982 if 'size' in disk_dict:
8983 raise errors.OpPrereqError("Disk size change not possible, use"
8984 " grow-disk", errors.ECODE_INVAL)
8986 if disk_addremove > 1:
8987 raise errors.OpPrereqError("Only one disk add or remove operation"
8988 " supported at a time", errors.ECODE_INVAL)
8990 if self.op.disks and self.op.disk_template is not None:
8991 raise errors.OpPrereqError("Disk template conversion and other disk"
8992 " changes not supported at the same time",
8995 if (self.op.disk_template and
8996 self.op.disk_template in constants.DTS_NET_MIRROR and
8997 self.op.remote_node is None):
8998 raise errors.OpPrereqError("Changing the disk template to a mirrored"
8999 " one requires specifying a secondary node",
9004 for nic_op, nic_dict in self.op.nics:
9005 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9006 if nic_op == constants.DDM_REMOVE:
9009 elif nic_op == constants.DDM_ADD:
9012 if not isinstance(nic_op, int):
9013 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9014 if not isinstance(nic_dict, dict):
9015 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9016 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9018 # nic_dict should be a dict
9019 nic_ip = nic_dict.get('ip', None)
9020 if nic_ip is not None:
9021 if nic_ip.lower() == constants.VALUE_NONE:
9022 nic_dict['ip'] = None
9024 if not netutils.IPAddress.IsValid(nic_ip):
9025 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9028 nic_bridge = nic_dict.get('bridge', None)
9029 nic_link = nic_dict.get('link', None)
9030 if nic_bridge and nic_link:
9031 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9032 " at the same time", errors.ECODE_INVAL)
9033 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9034 nic_dict['bridge'] = None
9035 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9036 nic_dict['link'] = None
9038 if nic_op == constants.DDM_ADD:
9039 nic_mac = nic_dict.get('mac', None)
9041 nic_dict['mac'] = constants.VALUE_AUTO
9043 if 'mac' in nic_dict:
9044 nic_mac = nic_dict['mac']
9045 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9046 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9048 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9049 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9050 " modifying an existing nic",
9053 if nic_addremove > 1:
9054 raise errors.OpPrereqError("Only one NIC add or remove operation"
9055 " supported at a time", errors.ECODE_INVAL)
9057 def ExpandNames(self):
9058 self._ExpandAndLockInstance()
9059 self.needed_locks[locking.LEVEL_NODE] = []
9060 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9062 def DeclareLocks(self, level):
9063 if level == locking.LEVEL_NODE:
9064 self._LockInstancesNodes()
9065 if self.op.disk_template and self.op.remote_node:
9066 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9067 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9069 def BuildHooksEnv(self):
9072 This runs on the master, primary and secondaries.
9076 if constants.BE_MEMORY in self.be_new:
9077 args['memory'] = self.be_new[constants.BE_MEMORY]
9078 if constants.BE_VCPUS in self.be_new:
9079 args['vcpus'] = self.be_new[constants.BE_VCPUS]
9080 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9081 # information at all.
9084 nic_override = dict(self.op.nics)
9085 for idx, nic in enumerate(self.instance.nics):
9086 if idx in nic_override:
9087 this_nic_override = nic_override[idx]
9089 this_nic_override = {}
9090 if 'ip' in this_nic_override:
9091 ip = this_nic_override['ip']
9094 if 'mac' in this_nic_override:
9095 mac = this_nic_override['mac']
9098 if idx in self.nic_pnew:
9099 nicparams = self.nic_pnew[idx]
9101 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9102 mode = nicparams[constants.NIC_MODE]
9103 link = nicparams[constants.NIC_LINK]
9104 args['nics'].append((ip, mac, mode, link))
9105 if constants.DDM_ADD in nic_override:
9106 ip = nic_override[constants.DDM_ADD].get('ip', None)
9107 mac = nic_override[constants.DDM_ADD]['mac']
9108 nicparams = self.nic_pnew[constants.DDM_ADD]
9109 mode = nicparams[constants.NIC_MODE]
9110 link = nicparams[constants.NIC_LINK]
9111 args['nics'].append((ip, mac, mode, link))
9112 elif constants.DDM_REMOVE in nic_override:
9113 del args['nics'][-1]
9115 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9116 if self.op.disk_template:
9117 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9118 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9121 def CheckPrereq(self):
9122 """Check prerequisites.
9124 This only checks the instance list against the existing names.
9127 # checking the new params on the primary/secondary nodes
9129 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9130 cluster = self.cluster = self.cfg.GetClusterInfo()
9131 assert self.instance is not None, \
9132 "Cannot retrieve locked instance %s" % self.op.instance_name
9133 pnode = instance.primary_node
9134 nodelist = list(instance.all_nodes)
9137 if self.op.os_name and not self.op.force:
9138 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9139 self.op.force_variant)
9140 instance_os = self.op.os_name
9142 instance_os = instance.os
9144 if self.op.disk_template:
9145 if instance.disk_template == self.op.disk_template:
9146 raise errors.OpPrereqError("Instance already has disk template %s" %
9147 instance.disk_template, errors.ECODE_INVAL)
9149 if (instance.disk_template,
9150 self.op.disk_template) not in self._DISK_CONVERSIONS:
9151 raise errors.OpPrereqError("Unsupported disk template conversion from"
9152 " %s to %s" % (instance.disk_template,
9153 self.op.disk_template),
9155 _CheckInstanceDown(self, instance, "cannot change disk template")
9156 if self.op.disk_template in constants.DTS_NET_MIRROR:
9157 if self.op.remote_node == pnode:
9158 raise errors.OpPrereqError("Given new secondary node %s is the same"
9159 " as the primary node of the instance" %
9160 self.op.remote_node, errors.ECODE_STATE)
9161 _CheckNodeOnline(self, self.op.remote_node)
9162 _CheckNodeNotDrained(self, self.op.remote_node)
9163 # FIXME: here we assume that the old instance type is DT_PLAIN
9164 assert instance.disk_template == constants.DT_PLAIN
9165 disks = [{"size": d.size, "vg": d.logical_id[0]}
9166 for d in instance.disks]
9167 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9168 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9170 # hvparams processing
9171 if self.op.hvparams:
9172 hv_type = instance.hypervisor
9173 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9174 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9175 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9178 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9179 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9180 self.hv_new = hv_new # the new actual values
9181 self.hv_inst = i_hvdict # the new dict (without defaults)
9183 self.hv_new = self.hv_inst = {}
9185 # beparams processing
9186 if self.op.beparams:
9187 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9189 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9190 be_new = cluster.SimpleFillBE(i_bedict)
9191 self.be_new = be_new # the new actual values
9192 self.be_inst = i_bedict # the new dict (without defaults)
9194 self.be_new = self.be_inst = {}
9196 # osparams processing
9197 if self.op.osparams:
9198 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9199 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9200 self.os_inst = i_osdict # the new dict (without defaults)
9206 if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9207 mem_check_list = [pnode]
9208 if be_new[constants.BE_AUTO_BALANCE]:
9209 # either we changed auto_balance to yes or it was from before
9210 mem_check_list.extend(instance.secondary_nodes)
9211 instance_info = self.rpc.call_instance_info(pnode, instance.name,
9212 instance.hypervisor)
9213 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9214 instance.hypervisor)
9215 pninfo = nodeinfo[pnode]
9216 msg = pninfo.fail_msg
9218 # Assume the primary node is unreachable and go ahead
9219 self.warn.append("Can't get info from primary node %s: %s" %
9221 elif not isinstance(pninfo.payload.get('memory_free', None), int):
9222 self.warn.append("Node data from primary node %s doesn't contain"
9223 " free memory information" % pnode)
9224 elif instance_info.fail_msg:
9225 self.warn.append("Can't get instance runtime information: %s" %
9226 instance_info.fail_msg)
9228 if instance_info.payload:
9229 current_mem = int(instance_info.payload['memory'])
9231 # Assume instance not running
9232 # (there is a slight race condition here, but it's not very probable,
9233 # and we have no other way to check)
9235 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9236 pninfo.payload['memory_free'])
9238 raise errors.OpPrereqError("This change will prevent the instance"
9239 " from starting, due to %d MB of memory"
9240 " missing on its primary node" % miss_mem,
9243 if be_new[constants.BE_AUTO_BALANCE]:
9244 for node, nres in nodeinfo.items():
9245 if node not in instance.secondary_nodes:
9249 self.warn.append("Can't get info from secondary node %s: %s" %
9251 elif not isinstance(nres.payload.get('memory_free', None), int):
9252 self.warn.append("Secondary node %s didn't return free"
9253 " memory information" % node)
9254 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9255 self.warn.append("Not enough memory to failover instance to"
9256 " secondary node %s" % node)
9261 for nic_op, nic_dict in self.op.nics:
9262 if nic_op == constants.DDM_REMOVE:
9263 if not instance.nics:
9264 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9267 if nic_op != constants.DDM_ADD:
9269 if not instance.nics:
9270 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9271 " no NICs" % nic_op,
9273 if nic_op < 0 or nic_op >= len(instance.nics):
9274 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9276 (nic_op, len(instance.nics) - 1),
9278 old_nic_params = instance.nics[nic_op].nicparams
9279 old_nic_ip = instance.nics[nic_op].ip
9284 update_params_dict = dict([(key, nic_dict[key])
9285 for key in constants.NICS_PARAMETERS
9286 if key in nic_dict])
9288 if 'bridge' in nic_dict:
9289 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9291 new_nic_params = _GetUpdatedParams(old_nic_params,
9293 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9294 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9295 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9296 self.nic_pinst[nic_op] = new_nic_params
9297 self.nic_pnew[nic_op] = new_filled_nic_params
9298 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9300 if new_nic_mode == constants.NIC_MODE_BRIDGED:
9301 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9302 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9304 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9306 self.warn.append(msg)
9308 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9309 if new_nic_mode == constants.NIC_MODE_ROUTED:
9310 if 'ip' in nic_dict:
9311 nic_ip = nic_dict['ip']
9315 raise errors.OpPrereqError('Cannot set the nic ip to None'
9316 ' on a routed nic', errors.ECODE_INVAL)
9317 if 'mac' in nic_dict:
9318 nic_mac = nic_dict['mac']
9320 raise errors.OpPrereqError('Cannot set the nic mac to None',
9322 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9323 # otherwise generate the mac
9324 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9326 # or validate/reserve the current one
9328 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9329 except errors.ReservationError:
9330 raise errors.OpPrereqError("MAC address %s already in use"
9331 " in cluster" % nic_mac,
9332 errors.ECODE_NOTUNIQUE)
9335 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9336 raise errors.OpPrereqError("Disk operations not supported for"
9337 " diskless instances",
9339 for disk_op, _ in self.op.disks:
9340 if disk_op == constants.DDM_REMOVE:
9341 if len(instance.disks) == 1:
9342 raise errors.OpPrereqError("Cannot remove the last disk of"
9343 " an instance", errors.ECODE_INVAL)
9344 _CheckInstanceDown(self, instance, "cannot remove disks")
9346 if (disk_op == constants.DDM_ADD and
9347 len(instance.disks) >= constants.MAX_DISKS):
9348 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9349 " add more" % constants.MAX_DISKS,
9351 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9353 if disk_op < 0 or disk_op >= len(instance.disks):
9354 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9356 (disk_op, len(instance.disks)),
9361 def _ConvertPlainToDrbd(self, feedback_fn):
9362 """Converts an instance from plain to drbd.
9365 feedback_fn("Converting template to drbd")
9366 instance = self.instance
9367 pnode = instance.primary_node
9368 snode = self.op.remote_node
9370 # create a fake disk info for _GenerateDiskTemplate
9371 disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9372 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9373 instance.name, pnode, [snode],
9374 disk_info, None, None, 0, feedback_fn)
9375 info = _GetInstanceInfoText(instance)
9376 feedback_fn("Creating aditional volumes...")
9377 # first, create the missing data and meta devices
9378 for disk in new_disks:
9379 # unfortunately this is... not too nice
9380 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9382 for child in disk.children:
9383 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9384 # at this stage, all new LVs have been created, we can rename the
9386 feedback_fn("Renaming original volumes...")
9387 rename_list = [(o, n.children[0].logical_id)
9388 for (o, n) in zip(instance.disks, new_disks)]
9389 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9390 result.Raise("Failed to rename original LVs")
9392 feedback_fn("Initializing DRBD devices...")
9393 # all child devices are in place, we can now create the DRBD devices
9394 for disk in new_disks:
9395 for node in [pnode, snode]:
9396 f_create = node == pnode
9397 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9399 # at this point, the instance has been modified
9400 instance.disk_template = constants.DT_DRBD8
9401 instance.disks = new_disks
9402 self.cfg.Update(instance, feedback_fn)
9404 # disks are created, waiting for sync
9405 disk_abort = not _WaitForSync(self, instance)
9407 raise errors.OpExecError("There are some degraded disks for"
9408 " this instance, please cleanup manually")
9410 def _ConvertDrbdToPlain(self, feedback_fn):
9411 """Converts an instance from drbd to plain.
9414 instance = self.instance
9415 assert len(instance.secondary_nodes) == 1
9416 pnode = instance.primary_node
9417 snode = instance.secondary_nodes[0]
9418 feedback_fn("Converting template to plain")
9420 old_disks = instance.disks
9421 new_disks = [d.children[0] for d in old_disks]
9423 # copy over size and mode
9424 for parent, child in zip(old_disks, new_disks):
9425 child.size = parent.size
9426 child.mode = parent.mode
9428 # update instance structure
9429 instance.disks = new_disks
9430 instance.disk_template = constants.DT_PLAIN
9431 self.cfg.Update(instance, feedback_fn)
9433 feedback_fn("Removing volumes on the secondary node...")
9434 for disk in old_disks:
9435 self.cfg.SetDiskID(disk, snode)
9436 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9438 self.LogWarning("Could not remove block device %s on node %s,"
9439 " continuing anyway: %s", disk.iv_name, snode, msg)
9441 feedback_fn("Removing unneeded volumes on the primary node...")
9442 for idx, disk in enumerate(old_disks):
9443 meta = disk.children[1]
9444 self.cfg.SetDiskID(meta, pnode)
9445 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9447 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9448 " continuing anyway: %s", idx, pnode, msg)
9450 def Exec(self, feedback_fn):
9451 """Modifies an instance.
9453 All parameters take effect only at the next restart of the instance.
9456 # Process here the warnings from CheckPrereq, as we don't have a
9457 # feedback_fn there.
9458 for warn in self.warn:
9459 feedback_fn("WARNING: %s" % warn)
9462 instance = self.instance
9464 for disk_op, disk_dict in self.op.disks:
9465 if disk_op == constants.DDM_REMOVE:
9466 # remove the last disk
9467 device = instance.disks.pop()
9468 device_idx = len(instance.disks)
9469 for node, disk in device.ComputeNodeTree(instance.primary_node):
9470 self.cfg.SetDiskID(disk, node)
9471 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9473 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9474 " continuing anyway", device_idx, node, msg)
9475 result.append(("disk/%d" % device_idx, "remove"))
9476 elif disk_op == constants.DDM_ADD:
9478 if instance.disk_template == constants.DT_FILE:
9479 file_driver, file_path = instance.disks[0].logical_id
9480 file_path = os.path.dirname(file_path)
9482 file_driver = file_path = None
9483 disk_idx_base = len(instance.disks)
9484 new_disk = _GenerateDiskTemplate(self,
9485 instance.disk_template,
9486 instance.name, instance.primary_node,
9487 instance.secondary_nodes,
9491 disk_idx_base, feedback_fn)[0]
9492 instance.disks.append(new_disk)
9493 info = _GetInstanceInfoText(instance)
9495 logging.info("Creating volume %s for instance %s",
9496 new_disk.iv_name, instance.name)
9497 # Note: this needs to be kept in sync with _CreateDisks
9499 for node in instance.all_nodes:
9500 f_create = node == instance.primary_node
9502 _CreateBlockDev(self, node, instance, new_disk,
9503 f_create, info, f_create)
9504 except errors.OpExecError, err:
9505 self.LogWarning("Failed to create volume %s (%s) on"
9507 new_disk.iv_name, new_disk, node, err)
9508 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9509 (new_disk.size, new_disk.mode)))
9511 # change a given disk
9512 instance.disks[disk_op].mode = disk_dict['mode']
9513 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9515 if self.op.disk_template:
9516 r_shut = _ShutdownInstanceDisks(self, instance)
9518 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9519 " proceed with disk template conversion")
9520 mode = (instance.disk_template, self.op.disk_template)
9522 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9524 self.cfg.ReleaseDRBDMinors(instance.name)
9526 result.append(("disk_template", self.op.disk_template))
9529 for nic_op, nic_dict in self.op.nics:
9530 if nic_op == constants.DDM_REMOVE:
9531 # remove the last nic
9532 del instance.nics[-1]
9533 result.append(("nic.%d" % len(instance.nics), "remove"))
9534 elif nic_op == constants.DDM_ADD:
9535 # mac and bridge should be set, by now
9536 mac = nic_dict['mac']
9537 ip = nic_dict.get('ip', None)
9538 nicparams = self.nic_pinst[constants.DDM_ADD]
9539 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9540 instance.nics.append(new_nic)
9541 result.append(("nic.%d" % (len(instance.nics) - 1),
9542 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9543 (new_nic.mac, new_nic.ip,
9544 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9545 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9548 for key in 'mac', 'ip':
9550 setattr(instance.nics[nic_op], key, nic_dict[key])
9551 if nic_op in self.nic_pinst:
9552 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9553 for key, val in nic_dict.iteritems():
9554 result.append(("nic.%s/%d" % (key, nic_op), val))
9557 if self.op.hvparams:
9558 instance.hvparams = self.hv_inst
9559 for key, val in self.op.hvparams.iteritems():
9560 result.append(("hv/%s" % key, val))
9563 if self.op.beparams:
9564 instance.beparams = self.be_inst
9565 for key, val in self.op.beparams.iteritems():
9566 result.append(("be/%s" % key, val))
9570 instance.os = self.op.os_name
9573 if self.op.osparams:
9574 instance.osparams = self.os_inst
9575 for key, val in self.op.osparams.iteritems():
9576 result.append(("os/%s" % key, val))
9578 self.cfg.Update(instance, feedback_fn)
9582 _DISK_CONVERSIONS = {
9583 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9584 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9588 class LUBackupQuery(NoHooksLU):
9589 """Query the exports list
9594 def ExpandNames(self):
9595 self.needed_locks = {}
9596 self.share_locks[locking.LEVEL_NODE] = 1
9597 if not self.op.nodes:
9598 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9600 self.needed_locks[locking.LEVEL_NODE] = \
9601 _GetWantedNodes(self, self.op.nodes)
9603 def Exec(self, feedback_fn):
9604 """Compute the list of all the exported system images.
9607 @return: a dictionary with the structure node->(export-list)
9608 where export-list is a list of the instances exported on
9612 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9613 rpcresult = self.rpc.call_export_list(self.nodes)
9615 for node in rpcresult:
9616 if rpcresult[node].fail_msg:
9617 result[node] = False
9619 result[node] = rpcresult[node].payload
9624 class LUBackupPrepare(NoHooksLU):
9625 """Prepares an instance for an export and returns useful information.
9630 def ExpandNames(self):
9631 self._ExpandAndLockInstance()
9633 def CheckPrereq(self):
9634 """Check prerequisites.
9637 instance_name = self.op.instance_name
9639 self.instance = self.cfg.GetInstanceInfo(instance_name)
9640 assert self.instance is not None, \
9641 "Cannot retrieve locked instance %s" % self.op.instance_name
9642 _CheckNodeOnline(self, self.instance.primary_node)
9644 self._cds = _GetClusterDomainSecret()
9646 def Exec(self, feedback_fn):
9647 """Prepares an instance for an export.
9650 instance = self.instance
9652 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9653 salt = utils.GenerateSecret(8)
9655 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9656 result = self.rpc.call_x509_cert_create(instance.primary_node,
9657 constants.RIE_CERT_VALIDITY)
9658 result.Raise("Can't create X509 key and certificate on %s" % result.node)
9660 (name, cert_pem) = result.payload
9662 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9666 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9667 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9669 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9675 class LUBackupExport(LogicalUnit):
9676 """Export an instance to an image in the cluster.
9679 HPATH = "instance-export"
9680 HTYPE = constants.HTYPE_INSTANCE
9683 def CheckArguments(self):
9684 """Check the arguments.
9687 self.x509_key_name = self.op.x509_key_name
9688 self.dest_x509_ca_pem = self.op.destination_x509_ca
9690 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9691 if not self.x509_key_name:
9692 raise errors.OpPrereqError("Missing X509 key name for encryption",
9695 if not self.dest_x509_ca_pem:
9696 raise errors.OpPrereqError("Missing destination X509 CA",
9699 def ExpandNames(self):
9700 self._ExpandAndLockInstance()
9702 # Lock all nodes for local exports
9703 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9704 # FIXME: lock only instance primary and destination node
9706 # Sad but true, for now we have do lock all nodes, as we don't know where
9707 # the previous export might be, and in this LU we search for it and
9708 # remove it from its current node. In the future we could fix this by:
9709 # - making a tasklet to search (share-lock all), then create the
9710 # new one, then one to remove, after
9711 # - removing the removal operation altogether
9712 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9714 def DeclareLocks(self, level):
9715 """Last minute lock declaration."""
9716 # All nodes are locked anyway, so nothing to do here.
9718 def BuildHooksEnv(self):
9721 This will run on the master, primary node and target node.
9725 "EXPORT_MODE": self.op.mode,
9726 "EXPORT_NODE": self.op.target_node,
9727 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9728 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9729 # TODO: Generic function for boolean env variables
9730 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9733 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9735 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9737 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9738 nl.append(self.op.target_node)
9742 def CheckPrereq(self):
9743 """Check prerequisites.
9745 This checks that the instance and node names are valid.
9748 instance_name = self.op.instance_name
9750 self.instance = self.cfg.GetInstanceInfo(instance_name)
9751 assert self.instance is not None, \
9752 "Cannot retrieve locked instance %s" % self.op.instance_name
9753 _CheckNodeOnline(self, self.instance.primary_node)
9755 if (self.op.remove_instance and self.instance.admin_up and
9756 not self.op.shutdown):
9757 raise errors.OpPrereqError("Can not remove instance without shutting it"
9760 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9761 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9762 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9763 assert self.dst_node is not None
9765 _CheckNodeOnline(self, self.dst_node.name)
9766 _CheckNodeNotDrained(self, self.dst_node.name)
9769 self.dest_disk_info = None
9770 self.dest_x509_ca = None
9772 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9773 self.dst_node = None
9775 if len(self.op.target_node) != len(self.instance.disks):
9776 raise errors.OpPrereqError(("Received destination information for %s"
9777 " disks, but instance %s has %s disks") %
9778 (len(self.op.target_node), instance_name,
9779 len(self.instance.disks)),
9782 cds = _GetClusterDomainSecret()
9784 # Check X509 key name
9786 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9787 except (TypeError, ValueError), err:
9788 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9790 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9791 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9794 # Load and verify CA
9796 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9797 except OpenSSL.crypto.Error, err:
9798 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9799 (err, ), errors.ECODE_INVAL)
9801 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9802 if errcode is not None:
9803 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9804 (msg, ), errors.ECODE_INVAL)
9806 self.dest_x509_ca = cert
9808 # Verify target information
9810 for idx, disk_data in enumerate(self.op.target_node):
9812 (host, port, magic) = \
9813 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9814 except errors.GenericError, err:
9815 raise errors.OpPrereqError("Target info for disk %s: %s" %
9816 (idx, err), errors.ECODE_INVAL)
9818 disk_info.append((host, port, magic))
9820 assert len(disk_info) == len(self.op.target_node)
9821 self.dest_disk_info = disk_info
9824 raise errors.ProgrammerError("Unhandled export mode %r" %
9827 # instance disk type verification
9828 # TODO: Implement export support for file-based disks
9829 for disk in self.instance.disks:
9830 if disk.dev_type == constants.LD_FILE:
9831 raise errors.OpPrereqError("Export not supported for instances with"
9832 " file-based disks", errors.ECODE_INVAL)
9834 def _CleanupExports(self, feedback_fn):
9835 """Removes exports of current instance from all other nodes.
9837 If an instance in a cluster with nodes A..D was exported to node C, its
9838 exports will be removed from the nodes A, B and D.
9841 assert self.op.mode != constants.EXPORT_MODE_REMOTE
9843 nodelist = self.cfg.GetNodeList()
9844 nodelist.remove(self.dst_node.name)
9846 # on one-node clusters nodelist will be empty after the removal
9847 # if we proceed the backup would be removed because OpBackupQuery
9848 # substitutes an empty list with the full cluster node list.
9849 iname = self.instance.name
9851 feedback_fn("Removing old exports for instance %s" % iname)
9852 exportlist = self.rpc.call_export_list(nodelist)
9853 for node in exportlist:
9854 if exportlist[node].fail_msg:
9856 if iname in exportlist[node].payload:
9857 msg = self.rpc.call_export_remove(node, iname).fail_msg
9859 self.LogWarning("Could not remove older export for instance %s"
9860 " on node %s: %s", iname, node, msg)
9862 def Exec(self, feedback_fn):
9863 """Export an instance to an image in the cluster.
9866 assert self.op.mode in constants.EXPORT_MODES
9868 instance = self.instance
9869 src_node = instance.primary_node
9871 if self.op.shutdown:
9872 # shutdown the instance, but not the disks
9873 feedback_fn("Shutting down instance %s" % instance.name)
9874 result = self.rpc.call_instance_shutdown(src_node, instance,
9875 self.op.shutdown_timeout)
9876 # TODO: Maybe ignore failures if ignore_remove_failures is set
9877 result.Raise("Could not shutdown instance %s on"
9878 " node %s" % (instance.name, src_node))
9880 # set the disks ID correctly since call_instance_start needs the
9881 # correct drbd minor to create the symlinks
9882 for disk in instance.disks:
9883 self.cfg.SetDiskID(disk, src_node)
9885 activate_disks = (not instance.admin_up)
9888 # Activate the instance disks if we'exporting a stopped instance
9889 feedback_fn("Activating disks for %s" % instance.name)
9890 _StartInstanceDisks(self, instance, None)
9893 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
9896 helper.CreateSnapshots()
9898 if (self.op.shutdown and instance.admin_up and
9899 not self.op.remove_instance):
9900 assert not activate_disks
9901 feedback_fn("Starting instance %s" % instance.name)
9902 result = self.rpc.call_instance_start(src_node, instance, None, None)
9903 msg = result.fail_msg
9905 feedback_fn("Failed to start instance: %s" % msg)
9906 _ShutdownInstanceDisks(self, instance)
9907 raise errors.OpExecError("Could not start instance: %s" % msg)
9909 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9910 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9911 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9912 connect_timeout = constants.RIE_CONNECT_TIMEOUT
9913 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9915 (key_name, _, _) = self.x509_key_name
9918 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
9921 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
9922 key_name, dest_ca_pem,
9927 # Check for backwards compatibility
9928 assert len(dresults) == len(instance.disks)
9929 assert compat.all(isinstance(i, bool) for i in dresults), \
9930 "Not all results are boolean: %r" % dresults
9934 feedback_fn("Deactivating disks for %s" % instance.name)
9935 _ShutdownInstanceDisks(self, instance)
9937 if not (compat.all(dresults) and fin_resu):
9940 failures.append("export finalization")
9941 if not compat.all(dresults):
9942 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
9944 failures.append("disk export: disk(s) %s" % fdsk)
9946 raise errors.OpExecError("Export failed, errors in %s" %
9947 utils.CommaJoin(failures))
9949 # At this point, the export was successful, we can cleanup/finish
9951 # Remove instance if requested
9952 if self.op.remove_instance:
9953 feedback_fn("Removing instance %s" % instance.name)
9954 _RemoveInstance(self, feedback_fn, instance,
9955 self.op.ignore_remove_failures)
9957 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9958 self._CleanupExports(feedback_fn)
9960 return fin_resu, dresults
9963 class LUBackupRemove(NoHooksLU):
9964 """Remove exports related to the named instance.
9969 def ExpandNames(self):
9970 self.needed_locks = {}
9971 # We need all nodes to be locked in order for RemoveExport to work, but we
9972 # don't need to lock the instance itself, as nothing will happen to it (and
9973 # we can remove exports also for a removed instance)
9974 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9976 def Exec(self, feedback_fn):
9977 """Remove any export.
9980 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9981 # If the instance was not found we'll try with the name that was passed in.
9982 # This will only work if it was an FQDN, though.
9984 if not instance_name:
9986 instance_name = self.op.instance_name
9988 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9989 exportlist = self.rpc.call_export_list(locked_nodes)
9991 for node in exportlist:
9992 msg = exportlist[node].fail_msg
9994 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9996 if instance_name in exportlist[node].payload:
9998 result = self.rpc.call_export_remove(node, instance_name)
9999 msg = result.fail_msg
10001 logging.error("Could not remove export for instance %s"
10002 " on node %s: %s", instance_name, node, msg)
10004 if fqdn_warn and not found:
10005 feedback_fn("Export not found. If trying to remove an export belonging"
10006 " to a deleted instance please use its Fully Qualified"
10010 class LUGroupAdd(LogicalUnit):
10011 """Logical unit for creating node groups.
10014 HPATH = "group-add"
10015 HTYPE = constants.HTYPE_GROUP
10018 def ExpandNames(self):
10019 # We need the new group's UUID here so that we can create and acquire the
10020 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10021 # that it should not check whether the UUID exists in the configuration.
10022 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10023 self.needed_locks = {}
10024 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10026 def CheckPrereq(self):
10027 """Check prerequisites.
10029 This checks that the given group name is not an existing node group
10034 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10035 except errors.OpPrereqError:
10038 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10039 " node group (UUID: %s)" %
10040 (self.op.group_name, existing_uuid),
10041 errors.ECODE_EXISTS)
10043 if self.op.ndparams:
10044 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10046 def BuildHooksEnv(self):
10047 """Build hooks env.
10051 "GROUP_NAME": self.op.group_name,
10053 mn = self.cfg.GetMasterNode()
10054 return env, [mn], [mn]
10056 def Exec(self, feedback_fn):
10057 """Add the node group to the cluster.
10060 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10061 uuid=self.group_uuid,
10062 alloc_policy=self.op.alloc_policy,
10063 ndparams=self.op.ndparams)
10065 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10066 del self.remove_locks[locking.LEVEL_NODEGROUP]
10069 class LUGroupAssignNodes(NoHooksLU):
10070 """Logical unit for assigning nodes to groups.
10075 def ExpandNames(self):
10076 # These raise errors.OpPrereqError on their own:
10077 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10078 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10080 # We want to lock all the affected nodes and groups. We have readily
10081 # available the list of nodes, and the *destination* group. To gather the
10082 # list of "source" groups, we need to fetch node information.
10083 self.node_data = self.cfg.GetAllNodesInfo()
10084 affected_groups = set(self.node_data[node].group for node in self.op.nodes)
10085 affected_groups.add(self.group_uuid)
10087 self.needed_locks = {
10088 locking.LEVEL_NODEGROUP: list(affected_groups),
10089 locking.LEVEL_NODE: self.op.nodes,
10092 def CheckPrereq(self):
10093 """Check prerequisites.
10096 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10097 instance_data = self.cfg.GetAllInstancesInfo()
10099 if self.group is None:
10100 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10101 (self.op.group_name, self.group_uuid))
10103 (new_splits, previous_splits) = \
10104 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10105 for node in self.op.nodes],
10106 self.node_data, instance_data)
10109 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10111 if not self.op.force:
10112 raise errors.OpExecError("The following instances get split by this"
10113 " change and --force was not given: %s" %
10116 self.LogWarning("This operation will split the following instances: %s",
10119 if previous_splits:
10120 self.LogWarning("In addition, these already-split instances continue"
10121 " to be spit across groups: %s",
10122 utils.CommaJoin(utils.NiceSort(previous_splits)))
10124 def Exec(self, feedback_fn):
10125 """Assign nodes to a new group.
10128 for node in self.op.nodes:
10129 self.node_data[node].group = self.group_uuid
10131 self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10134 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10135 """Check for split instances after a node assignment.
10137 This method considers a series of node assignments as an atomic operation,
10138 and returns information about split instances after applying the set of
10141 In particular, it returns information about newly split instances, and
10142 instances that were already split, and remain so after the change.
10144 Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
10147 @type changes: list of (node_name, new_group_uuid) pairs.
10148 @param changes: list of node assignments to consider.
10149 @param node_data: a dict with data for all nodes
10150 @param instance_data: a dict with all instances to consider
10151 @rtype: a two-tuple
10152 @return: a list of instances that were previously okay and result split as a
10153 consequence of this change, and a list of instances that were previously
10154 split and this change does not fix.
10157 changed_nodes = dict((node, group) for node, group in changes
10158 if node_data[node].group != group)
10160 all_split_instances = set()
10161 previously_split_instances = set()
10163 def InstanceNodes(instance):
10164 return [instance.primary_node] + list(instance.secondary_nodes)
10166 for inst in instance_data.values():
10167 if inst.disk_template not in constants.DTS_NET_MIRROR:
10170 instance_nodes = InstanceNodes(inst)
10172 if len(set(node_data[node].group for node in instance_nodes)) > 1:
10173 previously_split_instances.add(inst.name)
10175 if len(set(changed_nodes.get(node, node_data[node].group)
10176 for node in instance_nodes)) > 1:
10177 all_split_instances.add(inst.name)
10179 return (list(all_split_instances - previously_split_instances),
10180 list(previously_split_instances & all_split_instances))
10183 class _GroupQuery(_QueryBase):
10185 FIELDS = query.GROUP_FIELDS
10187 def ExpandNames(self, lu):
10188 lu.needed_locks = {}
10190 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10191 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10194 self.wanted = [name_to_uuid[name]
10195 for name in utils.NiceSort(name_to_uuid.keys())]
10197 # Accept names to be either names or UUIDs.
10200 all_uuid = frozenset(self._all_groups.keys())
10202 for name in self.names:
10203 if name in all_uuid:
10204 self.wanted.append(name)
10205 elif name in name_to_uuid:
10206 self.wanted.append(name_to_uuid[name])
10208 missing.append(name)
10211 raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
10212 errors.ECODE_NOENT)
10214 def DeclareLocks(self, lu, level):
10217 def _GetQueryData(self, lu):
10218 """Computes the list of node groups and their attributes.
10221 do_nodes = query.GQ_NODE in self.requested_data
10222 do_instances = query.GQ_INST in self.requested_data
10224 group_to_nodes = None
10225 group_to_instances = None
10227 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10228 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10229 # latter GetAllInstancesInfo() is not enough, for we have to go through
10230 # instance->node. Hence, we will need to process nodes even if we only need
10231 # instance information.
10232 if do_nodes or do_instances:
10233 all_nodes = lu.cfg.GetAllNodesInfo()
10234 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10237 for node in all_nodes.values():
10238 if node.group in group_to_nodes:
10239 group_to_nodes[node.group].append(node.name)
10240 node_to_group[node.name] = node.group
10243 all_instances = lu.cfg.GetAllInstancesInfo()
10244 group_to_instances = dict((uuid, []) for uuid in self.wanted)
10246 for instance in all_instances.values():
10247 node = instance.primary_node
10248 if node in node_to_group:
10249 group_to_instances[node_to_group[node]].append(instance.name)
10252 # Do not pass on node information if it was not requested.
10253 group_to_nodes = None
10255 return query.GroupQueryData([self._all_groups[uuid]
10256 for uuid in self.wanted],
10257 group_to_nodes, group_to_instances)
10260 class LUGroupQuery(NoHooksLU):
10261 """Logical unit for querying node groups.
10266 def CheckArguments(self):
10267 self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
10269 def ExpandNames(self):
10270 self.gq.ExpandNames(self)
10272 def Exec(self, feedback_fn):
10273 return self.gq.OldStyleQuery(self)
10276 class LUGroupSetParams(LogicalUnit):
10277 """Modifies the parameters of a node group.
10280 HPATH = "group-modify"
10281 HTYPE = constants.HTYPE_GROUP
10284 def CheckArguments(self):
10287 self.op.alloc_policy,
10290 if all_changes.count(None) == len(all_changes):
10291 raise errors.OpPrereqError("Please pass at least one modification",
10292 errors.ECODE_INVAL)
10294 def ExpandNames(self):
10295 # This raises errors.OpPrereqError on its own:
10296 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10298 self.needed_locks = {
10299 locking.LEVEL_NODEGROUP: [self.group_uuid],
10302 def CheckPrereq(self):
10303 """Check prerequisites.
10306 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10308 if self.group is None:
10309 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10310 (self.op.group_name, self.group_uuid))
10312 if self.op.ndparams:
10313 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10314 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10315 self.new_ndparams = new_ndparams
10317 def BuildHooksEnv(self):
10318 """Build hooks env.
10322 "GROUP_NAME": self.op.group_name,
10323 "NEW_ALLOC_POLICY": self.op.alloc_policy,
10325 mn = self.cfg.GetMasterNode()
10326 return env, [mn], [mn]
10328 def Exec(self, feedback_fn):
10329 """Modifies the node group.
10334 if self.op.ndparams:
10335 self.group.ndparams = self.new_ndparams
10336 result.append(("ndparams", str(self.group.ndparams)))
10338 if self.op.alloc_policy:
10339 self.group.alloc_policy = self.op.alloc_policy
10341 self.cfg.Update(self.group, feedback_fn)
10346 class LUGroupRemove(LogicalUnit):
10347 HPATH = "group-remove"
10348 HTYPE = constants.HTYPE_GROUP
10351 def ExpandNames(self):
10352 # This will raises errors.OpPrereqError on its own:
10353 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10354 self.needed_locks = {
10355 locking.LEVEL_NODEGROUP: [self.group_uuid],
10358 def CheckPrereq(self):
10359 """Check prerequisites.
10361 This checks that the given group name exists as a node group, that is
10362 empty (i.e., contains no nodes), and that is not the last group of the
10366 # Verify that the group is empty.
10367 group_nodes = [node.name
10368 for node in self.cfg.GetAllNodesInfo().values()
10369 if node.group == self.group_uuid]
10372 raise errors.OpPrereqError("Group '%s' not empty, has the following"
10374 (self.op.group_name,
10375 utils.CommaJoin(utils.NiceSort(group_nodes))),
10376 errors.ECODE_STATE)
10378 # Verify the cluster would not be left group-less.
10379 if len(self.cfg.GetNodeGroupList()) == 1:
10380 raise errors.OpPrereqError("Group '%s' is the last group in the cluster,"
10381 " which cannot be left without at least one"
10382 " group" % self.op.group_name,
10383 errors.ECODE_STATE)
10385 def BuildHooksEnv(self):
10386 """Build hooks env.
10390 "GROUP_NAME": self.op.group_name,
10392 mn = self.cfg.GetMasterNode()
10393 return env, [mn], [mn]
10395 def Exec(self, feedback_fn):
10396 """Remove the node group.
10400 self.cfg.RemoveNodeGroup(self.group_uuid)
10401 except errors.ConfigurationError:
10402 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10403 (self.op.group_name, self.group_uuid))
10405 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10408 class LUGroupRename(LogicalUnit):
10409 HPATH = "group-rename"
10410 HTYPE = constants.HTYPE_GROUP
10413 def ExpandNames(self):
10414 # This raises errors.OpPrereqError on its own:
10415 self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name)
10417 self.needed_locks = {
10418 locking.LEVEL_NODEGROUP: [self.group_uuid],
10421 def CheckPrereq(self):
10422 """Check prerequisites.
10424 This checks that the given old_name exists as a node group, and that
10429 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10430 except errors.OpPrereqError:
10433 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10434 " node group (UUID: %s)" %
10435 (self.op.new_name, new_name_uuid),
10436 errors.ECODE_EXISTS)
10438 def BuildHooksEnv(self):
10439 """Build hooks env.
10443 "OLD_NAME": self.op.old_name,
10444 "NEW_NAME": self.op.new_name,
10447 mn = self.cfg.GetMasterNode()
10448 all_nodes = self.cfg.GetAllNodesInfo()
10450 all_nodes.pop(mn, None)
10452 for node in all_nodes.values():
10453 if node.group == self.group_uuid:
10454 run_nodes.append(node.name)
10456 return env, run_nodes, run_nodes
10458 def Exec(self, feedback_fn):
10459 """Rename the node group.
10462 group = self.cfg.GetNodeGroup(self.group_uuid)
10465 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10466 (self.op.old_name, self.group_uuid))
10468 group.name = self.op.new_name
10469 self.cfg.Update(group, feedback_fn)
10471 return self.op.new_name
10474 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10475 """Generic tags LU.
10477 This is an abstract class which is the parent of all the other tags LUs.
10481 def ExpandNames(self):
10482 self.needed_locks = {}
10483 if self.op.kind == constants.TAG_NODE:
10484 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10485 self.needed_locks[locking.LEVEL_NODE] = self.op.name
10486 elif self.op.kind == constants.TAG_INSTANCE:
10487 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10488 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10490 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10491 # not possible to acquire the BGL based on opcode parameters)
10493 def CheckPrereq(self):
10494 """Check prerequisites.
10497 if self.op.kind == constants.TAG_CLUSTER:
10498 self.target = self.cfg.GetClusterInfo()
10499 elif self.op.kind == constants.TAG_NODE:
10500 self.target = self.cfg.GetNodeInfo(self.op.name)
10501 elif self.op.kind == constants.TAG_INSTANCE:
10502 self.target = self.cfg.GetInstanceInfo(self.op.name)
10504 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10505 str(self.op.kind), errors.ECODE_INVAL)
10508 class LUTagsGet(TagsLU):
10509 """Returns the tags of a given object.
10514 def ExpandNames(self):
10515 TagsLU.ExpandNames(self)
10517 # Share locks as this is only a read operation
10518 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10520 def Exec(self, feedback_fn):
10521 """Returns the tag list.
10524 return list(self.target.GetTags())
10527 class LUTagsSearch(NoHooksLU):
10528 """Searches the tags for a given pattern.
10533 def ExpandNames(self):
10534 self.needed_locks = {}
10536 def CheckPrereq(self):
10537 """Check prerequisites.
10539 This checks the pattern passed for validity by compiling it.
10543 self.re = re.compile(self.op.pattern)
10544 except re.error, err:
10545 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10546 (self.op.pattern, err), errors.ECODE_INVAL)
10548 def Exec(self, feedback_fn):
10549 """Returns the tag list.
10553 tgts = [("/cluster", cfg.GetClusterInfo())]
10554 ilist = cfg.GetAllInstancesInfo().values()
10555 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10556 nlist = cfg.GetAllNodesInfo().values()
10557 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10559 for path, target in tgts:
10560 for tag in target.GetTags():
10561 if self.re.search(tag):
10562 results.append((path, tag))
10566 class LUTagsSet(TagsLU):
10567 """Sets a tag on a given object.
10572 def CheckPrereq(self):
10573 """Check prerequisites.
10575 This checks the type and length of the tag name and value.
10578 TagsLU.CheckPrereq(self)
10579 for tag in self.op.tags:
10580 objects.TaggableObject.ValidateTag(tag)
10582 def Exec(self, feedback_fn):
10587 for tag in self.op.tags:
10588 self.target.AddTag(tag)
10589 except errors.TagError, err:
10590 raise errors.OpExecError("Error while setting tag: %s" % str(err))
10591 self.cfg.Update(self.target, feedback_fn)
10594 class LUTagsDel(TagsLU):
10595 """Delete a list of tags from a given object.
10600 def CheckPrereq(self):
10601 """Check prerequisites.
10603 This checks that we have the given tag.
10606 TagsLU.CheckPrereq(self)
10607 for tag in self.op.tags:
10608 objects.TaggableObject.ValidateTag(tag)
10609 del_tags = frozenset(self.op.tags)
10610 cur_tags = self.target.GetTags()
10612 diff_tags = del_tags - cur_tags
10614 diff_names = ("'%s'" % i for i in sorted(diff_tags))
10615 raise errors.OpPrereqError("Tag(s) %s not found" %
10616 (utils.CommaJoin(diff_names), ),
10617 errors.ECODE_NOENT)
10619 def Exec(self, feedback_fn):
10620 """Remove the tag from the object.
10623 for tag in self.op.tags:
10624 self.target.RemoveTag(tag)
10625 self.cfg.Update(self.target, feedback_fn)
10628 class LUTestDelay(NoHooksLU):
10629 """Sleep for a specified amount of time.
10631 This LU sleeps on the master and/or nodes for a specified amount of
10637 def ExpandNames(self):
10638 """Expand names and set required locks.
10640 This expands the node list, if any.
10643 self.needed_locks = {}
10644 if self.op.on_nodes:
10645 # _GetWantedNodes can be used here, but is not always appropriate to use
10646 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10647 # more information.
10648 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10649 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10651 def _TestDelay(self):
10652 """Do the actual sleep.
10655 if self.op.on_master:
10656 if not utils.TestDelay(self.op.duration):
10657 raise errors.OpExecError("Error during master delay test")
10658 if self.op.on_nodes:
10659 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10660 for node, node_result in result.items():
10661 node_result.Raise("Failure during rpc call to node %s" % node)
10663 def Exec(self, feedback_fn):
10664 """Execute the test delay opcode, with the wanted repetitions.
10667 if self.op.repeat == 0:
10670 top_value = self.op.repeat - 1
10671 for i in range(self.op.repeat):
10672 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10676 class LUTestJqueue(NoHooksLU):
10677 """Utility LU to test some aspects of the job queue.
10682 # Must be lower than default timeout for WaitForJobChange to see whether it
10683 # notices changed jobs
10684 _CLIENT_CONNECT_TIMEOUT = 20.0
10685 _CLIENT_CONFIRM_TIMEOUT = 60.0
10688 def _NotifyUsingSocket(cls, cb, errcls):
10689 """Opens a Unix socket and waits for another program to connect.
10692 @param cb: Callback to send socket name to client
10693 @type errcls: class
10694 @param errcls: Exception class to use for errors
10697 # Using a temporary directory as there's no easy way to create temporary
10698 # sockets without writing a custom loop around tempfile.mktemp and
10700 tmpdir = tempfile.mkdtemp()
10702 tmpsock = utils.PathJoin(tmpdir, "sock")
10704 logging.debug("Creating temporary socket at %s", tmpsock)
10705 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10710 # Send details to client
10713 # Wait for client to connect before continuing
10714 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10716 (conn, _) = sock.accept()
10717 except socket.error, err:
10718 raise errcls("Client didn't connect in time (%s)" % err)
10722 # Remove as soon as client is connected
10723 shutil.rmtree(tmpdir)
10725 # Wait for client to close
10728 # pylint: disable-msg=E1101
10729 # Instance of '_socketobject' has no ... member
10730 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10732 except socket.error, err:
10733 raise errcls("Client failed to confirm notification (%s)" % err)
10737 def _SendNotification(self, test, arg, sockname):
10738 """Sends a notification to the client.
10741 @param test: Test name
10742 @param arg: Test argument (depends on test)
10743 @type sockname: string
10744 @param sockname: Socket path
10747 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10749 def _Notify(self, prereq, test, arg):
10750 """Notifies the client of a test.
10753 @param prereq: Whether this is a prereq-phase test
10755 @param test: Test name
10756 @param arg: Test argument (depends on test)
10760 errcls = errors.OpPrereqError
10762 errcls = errors.OpExecError
10764 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
10768 def CheckArguments(self):
10769 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
10770 self.expandnames_calls = 0
10772 def ExpandNames(self):
10773 checkargs_calls = getattr(self, "checkargs_calls", 0)
10774 if checkargs_calls < 1:
10775 raise errors.ProgrammerError("CheckArguments was not called")
10777 self.expandnames_calls += 1
10779 if self.op.notify_waitlock:
10780 self._Notify(True, constants.JQT_EXPANDNAMES, None)
10782 self.LogInfo("Expanding names")
10784 # Get lock on master node (just to get a lock, not for a particular reason)
10785 self.needed_locks = {
10786 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10789 def Exec(self, feedback_fn):
10790 if self.expandnames_calls < 1:
10791 raise errors.ProgrammerError("ExpandNames was not called")
10793 if self.op.notify_exec:
10794 self._Notify(False, constants.JQT_EXEC, None)
10796 self.LogInfo("Executing")
10798 if self.op.log_messages:
10799 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
10800 for idx, msg in enumerate(self.op.log_messages):
10801 self.LogInfo("Sending log message %s", idx + 1)
10802 feedback_fn(constants.JQT_MSGPREFIX + msg)
10803 # Report how many test messages have been sent
10804 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10807 raise errors.OpExecError("Opcode failure was requested")
10812 class IAllocator(object):
10813 """IAllocator framework.
10815 An IAllocator instance has three sets of attributes:
10816 - cfg that is needed to query the cluster
10817 - input data (all members of the _KEYS class attribute are required)
10818 - four buffer attributes (in|out_data|text), that represent the
10819 input (to the external script) in text and data structure format,
10820 and the output from it, again in two formats
10821 - the result variables from the script (success, info, nodes) for
10825 # pylint: disable-msg=R0902
10826 # lots of instance attributes
10828 "name", "mem_size", "disks", "disk_template",
10829 "os", "tags", "nics", "vcpus", "hypervisor",
10832 "name", "relocate_from",
10838 def __init__(self, cfg, rpc, mode, **kwargs):
10841 # init buffer variables
10842 self.in_text = self.out_text = self.in_data = self.out_data = None
10843 # init all input fields so that pylint is happy
10845 self.mem_size = self.disks = self.disk_template = None
10846 self.os = self.tags = self.nics = self.vcpus = None
10847 self.hypervisor = None
10848 self.relocate_from = None
10850 self.evac_nodes = None
10852 self.required_nodes = None
10853 # init result fields
10854 self.success = self.info = self.result = None
10855 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10856 keyset = self._ALLO_KEYS
10857 fn = self._AddNewInstance
10858 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10859 keyset = self._RELO_KEYS
10860 fn = self._AddRelocateInstance
10861 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10862 keyset = self._EVAC_KEYS
10863 fn = self._AddEvacuateNodes
10865 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10866 " IAllocator" % self.mode)
10868 if key not in keyset:
10869 raise errors.ProgrammerError("Invalid input parameter '%s' to"
10870 " IAllocator" % key)
10871 setattr(self, key, kwargs[key])
10874 if key not in kwargs:
10875 raise errors.ProgrammerError("Missing input parameter '%s' to"
10876 " IAllocator" % key)
10877 self._BuildInputData(fn)
10879 def _ComputeClusterData(self):
10880 """Compute the generic allocator input data.
10882 This is the data that is independent of the actual operation.
10886 cluster_info = cfg.GetClusterInfo()
10889 "version": constants.IALLOCATOR_VERSION,
10890 "cluster_name": cfg.GetClusterName(),
10891 "cluster_tags": list(cluster_info.GetTags()),
10892 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10893 # we don't have job IDs
10895 ninfo = cfg.GetAllNodesInfo()
10896 iinfo = cfg.GetAllInstancesInfo().values()
10897 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10900 node_list = [n.name for n in ninfo.values() if n.vm_capable]
10902 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10903 hypervisor_name = self.hypervisor
10904 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10905 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10906 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10907 hypervisor_name = cluster_info.enabled_hypervisors[0]
10909 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10912 self.rpc.call_all_instances_info(node_list,
10913 cluster_info.enabled_hypervisors)
10915 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
10917 config_ndata = self._ComputeBasicNodeData(ninfo)
10918 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
10919 i_list, config_ndata)
10920 assert len(data["nodes"]) == len(ninfo), \
10921 "Incomplete node data computed"
10923 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
10925 self.in_data = data
10928 def _ComputeNodeGroupData(cfg):
10929 """Compute node groups data.
10933 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
10935 "name": gdata.name,
10936 "alloc_policy": gdata.alloc_policy,
10941 def _ComputeBasicNodeData(node_cfg):
10942 """Compute global node data.
10945 @returns: a dict of name: (node dict, node config)
10949 for ninfo in node_cfg.values():
10950 # fill in static (config-based) values
10952 "tags": list(ninfo.GetTags()),
10953 "primary_ip": ninfo.primary_ip,
10954 "secondary_ip": ninfo.secondary_ip,
10955 "offline": ninfo.offline,
10956 "drained": ninfo.drained,
10957 "master_candidate": ninfo.master_candidate,
10958 "group": ninfo.group,
10959 "master_capable": ninfo.master_capable,
10960 "vm_capable": ninfo.vm_capable,
10963 node_results[ninfo.name] = pnr
10965 return node_results
10968 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
10970 """Compute global node data.
10972 @param node_results: the basic node structures as filled from the config
10975 # make a copy of the current dict
10976 node_results = dict(node_results)
10977 for nname, nresult in node_data.items():
10978 assert nname in node_results, "Missing basic data for node %s" % nname
10979 ninfo = node_cfg[nname]
10981 if not (ninfo.offline or ninfo.drained):
10982 nresult.Raise("Can't get data for node %s" % nname)
10983 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
10985 remote_info = nresult.payload
10987 for attr in ['memory_total', 'memory_free', 'memory_dom0',
10988 'vg_size', 'vg_free', 'cpu_total']:
10989 if attr not in remote_info:
10990 raise errors.OpExecError("Node '%s' didn't return attribute"
10991 " '%s'" % (nname, attr))
10992 if not isinstance(remote_info[attr], int):
10993 raise errors.OpExecError("Node '%s' returned invalid value"
10995 (nname, attr, remote_info[attr]))
10996 # compute memory used by primary instances
10997 i_p_mem = i_p_up_mem = 0
10998 for iinfo, beinfo in i_list:
10999 if iinfo.primary_node == nname:
11000 i_p_mem += beinfo[constants.BE_MEMORY]
11001 if iinfo.name not in node_iinfo[nname].payload:
11004 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11005 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11006 remote_info['memory_free'] -= max(0, i_mem_diff)
11009 i_p_up_mem += beinfo[constants.BE_MEMORY]
11011 # compute memory used by instances
11013 "total_memory": remote_info['memory_total'],
11014 "reserved_memory": remote_info['memory_dom0'],
11015 "free_memory": remote_info['memory_free'],
11016 "total_disk": remote_info['vg_size'],
11017 "free_disk": remote_info['vg_free'],
11018 "total_cpus": remote_info['cpu_total'],
11019 "i_pri_memory": i_p_mem,
11020 "i_pri_up_memory": i_p_up_mem,
11022 pnr_dyn.update(node_results[nname])
11024 node_results[nname] = pnr_dyn
11026 return node_results
11029 def _ComputeInstanceData(cluster_info, i_list):
11030 """Compute global instance data.
11034 for iinfo, beinfo in i_list:
11036 for nic in iinfo.nics:
11037 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11038 nic_dict = {"mac": nic.mac,
11040 "mode": filled_params[constants.NIC_MODE],
11041 "link": filled_params[constants.NIC_LINK],
11043 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11044 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11045 nic_data.append(nic_dict)
11047 "tags": list(iinfo.GetTags()),
11048 "admin_up": iinfo.admin_up,
11049 "vcpus": beinfo[constants.BE_VCPUS],
11050 "memory": beinfo[constants.BE_MEMORY],
11052 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11054 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11055 "disk_template": iinfo.disk_template,
11056 "hypervisor": iinfo.hypervisor,
11058 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11060 instance_data[iinfo.name] = pir
11062 return instance_data
11064 def _AddNewInstance(self):
11065 """Add new instance data to allocator structure.
11067 This in combination with _AllocatorGetClusterData will create the
11068 correct structure needed as input for the allocator.
11070 The checks for the completeness of the opcode must have already been
11074 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11076 if self.disk_template in constants.DTS_NET_MIRROR:
11077 self.required_nodes = 2
11079 self.required_nodes = 1
11082 "disk_template": self.disk_template,
11085 "vcpus": self.vcpus,
11086 "memory": self.mem_size,
11087 "disks": self.disks,
11088 "disk_space_total": disk_space,
11090 "required_nodes": self.required_nodes,
11094 def _AddRelocateInstance(self):
11095 """Add relocate instance data to allocator structure.
11097 This in combination with _IAllocatorGetClusterData will create the
11098 correct structure needed as input for the allocator.
11100 The checks for the completeness of the opcode must have already been
11104 instance = self.cfg.GetInstanceInfo(self.name)
11105 if instance is None:
11106 raise errors.ProgrammerError("Unknown instance '%s' passed to"
11107 " IAllocator" % self.name)
11109 if instance.disk_template not in constants.DTS_NET_MIRROR:
11110 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11111 errors.ECODE_INVAL)
11113 if len(instance.secondary_nodes) != 1:
11114 raise errors.OpPrereqError("Instance has not exactly one secondary node",
11115 errors.ECODE_STATE)
11117 self.required_nodes = 1
11118 disk_sizes = [{'size': disk.size} for disk in instance.disks]
11119 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11123 "disk_space_total": disk_space,
11124 "required_nodes": self.required_nodes,
11125 "relocate_from": self.relocate_from,
11129 def _AddEvacuateNodes(self):
11130 """Add evacuate nodes data to allocator structure.
11134 "evac_nodes": self.evac_nodes
11138 def _BuildInputData(self, fn):
11139 """Build input data structures.
11142 self._ComputeClusterData()
11145 request["type"] = self.mode
11146 self.in_data["request"] = request
11148 self.in_text = serializer.Dump(self.in_data)
11150 def Run(self, name, validate=True, call_fn=None):
11151 """Run an instance allocator and return the results.
11154 if call_fn is None:
11155 call_fn = self.rpc.call_iallocator_runner
11157 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11158 result.Raise("Failure while running the iallocator script")
11160 self.out_text = result.payload
11162 self._ValidateResult()
11164 def _ValidateResult(self):
11165 """Process the allocator results.
11167 This will process and if successful save the result in
11168 self.out_data and the other parameters.
11172 rdict = serializer.Load(self.out_text)
11173 except Exception, err:
11174 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11176 if not isinstance(rdict, dict):
11177 raise errors.OpExecError("Can't parse iallocator results: not a dict")
11179 # TODO: remove backwards compatiblity in later versions
11180 if "nodes" in rdict and "result" not in rdict:
11181 rdict["result"] = rdict["nodes"]
11184 for key in "success", "info", "result":
11185 if key not in rdict:
11186 raise errors.OpExecError("Can't parse iallocator results:"
11187 " missing key '%s'" % key)
11188 setattr(self, key, rdict[key])
11190 if not isinstance(rdict["result"], list):
11191 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11193 self.out_data = rdict
11196 class LUTestAllocator(NoHooksLU):
11197 """Run allocator tests.
11199 This LU runs the allocator tests
11202 def CheckPrereq(self):
11203 """Check prerequisites.
11205 This checks the opcode parameters depending on the director and mode test.
11208 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11209 for attr in ["mem_size", "disks", "disk_template",
11210 "os", "tags", "nics", "vcpus"]:
11211 if not hasattr(self.op, attr):
11212 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11213 attr, errors.ECODE_INVAL)
11214 iname = self.cfg.ExpandInstanceName(self.op.name)
11215 if iname is not None:
11216 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11217 iname, errors.ECODE_EXISTS)
11218 if not isinstance(self.op.nics, list):
11219 raise errors.OpPrereqError("Invalid parameter 'nics'",
11220 errors.ECODE_INVAL)
11221 if not isinstance(self.op.disks, list):
11222 raise errors.OpPrereqError("Invalid parameter 'disks'",
11223 errors.ECODE_INVAL)
11224 for row in self.op.disks:
11225 if (not isinstance(row, dict) or
11226 "size" not in row or
11227 not isinstance(row["size"], int) or
11228 "mode" not in row or
11229 row["mode"] not in ['r', 'w']):
11230 raise errors.OpPrereqError("Invalid contents of the 'disks'"
11231 " parameter", errors.ECODE_INVAL)
11232 if self.op.hypervisor is None:
11233 self.op.hypervisor = self.cfg.GetHypervisorType()
11234 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11235 fname = _ExpandInstanceName(self.cfg, self.op.name)
11236 self.op.name = fname
11237 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11238 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11239 if not hasattr(self.op, "evac_nodes"):
11240 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11241 " opcode input", errors.ECODE_INVAL)
11243 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11244 self.op.mode, errors.ECODE_INVAL)
11246 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11247 if self.op.allocator is None:
11248 raise errors.OpPrereqError("Missing allocator name",
11249 errors.ECODE_INVAL)
11250 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11251 raise errors.OpPrereqError("Wrong allocator test '%s'" %
11252 self.op.direction, errors.ECODE_INVAL)
11254 def Exec(self, feedback_fn):
11255 """Run the allocator test.
11258 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11259 ial = IAllocator(self.cfg, self.rpc,
11262 mem_size=self.op.mem_size,
11263 disks=self.op.disks,
11264 disk_template=self.op.disk_template,
11268 vcpus=self.op.vcpus,
11269 hypervisor=self.op.hypervisor,
11271 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11272 ial = IAllocator(self.cfg, self.rpc,
11275 relocate_from=list(self.relocate_from),
11277 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11278 ial = IAllocator(self.cfg, self.rpc,
11280 evac_nodes=self.op.evac_nodes)
11282 raise errors.ProgrammerError("Uncatched mode %s in"
11283 " LUTestAllocator.Exec", self.op.mode)
11285 if self.op.direction == constants.IALLOCATOR_DIR_IN:
11286 result = ial.in_text
11288 ial.Run(self.op.allocator, validate=False)
11289 result = ial.out_text
11293 #: Query type implementations
11295 constants.QR_INSTANCE: _InstanceQuery,
11296 constants.QR_NODE: _NodeQuery,
11297 constants.QR_GROUP: _GroupQuery,
11301 def _GetQueryImplementation(name):
11302 """Returns the implemtnation for a query type.
11304 @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11308 return _QUERY_IMPL[name]
11310 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11311 errors.ECODE_INVAL)