4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 import ganeti.masterd.instance # pylint: disable-msg=W0611
64 def _SupportsOob(cfg, node):
65 """Tells if node supports OOB.
67 @type cfg: L{config.ConfigWriter}
68 @param cfg: The cluster configuration
69 @type node: L{objects.Node}
71 @return: The OOB script if supported or an empty string otherwise
74 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
78 class LogicalUnit(object):
79 """Logical Unit base class.
81 Subclasses must follow these rules:
82 - implement ExpandNames
83 - implement CheckPrereq (except when tasklets are used)
84 - implement Exec (except when tasklets are used)
85 - implement BuildHooksEnv
86 - redefine HPATH and HTYPE
87 - optionally redefine their run requirements:
88 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
90 Note that all commands require root permissions.
92 @ivar dry_run_result: the value (if any) that will be returned to the caller
93 in dry-run mode (signalled by opcode dry_run parameter)
100 def __init__(self, processor, op, context, rpc):
101 """Constructor for LogicalUnit.
103 This needs to be overridden in derived classes in order to check op
107 self.proc = processor
109 self.cfg = context.cfg
110 self.context = context
112 # Dicts used to declare locking needs to mcpu
113 self.needed_locks = None
114 self.acquired_locks = {}
115 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
117 self.remove_locks = {}
118 # Used to force good behavior when calling helper functions
119 self.recalculate_locks = {}
122 self.Log = processor.Log # pylint: disable-msg=C0103
123 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126 # support for dry-run
127 self.dry_run_result = None
128 # support for generic debug attribute
129 if (not hasattr(self.op, "debug_level") or
130 not isinstance(self.op.debug_level, int)):
131 self.op.debug_level = 0
136 # Validate opcode parameters and set defaults
137 self.op.Validate(True)
139 self.CheckArguments()
142 """Returns the SshRunner object
146 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
149 ssh = property(fget=__GetSSH)
151 def CheckArguments(self):
152 """Check syntactic validity for the opcode arguments.
154 This method is for doing a simple syntactic check and ensure
155 validity of opcode parameters, without any cluster-related
156 checks. While the same can be accomplished in ExpandNames and/or
157 CheckPrereq, doing these separate is better because:
159 - ExpandNames is left as as purely a lock-related function
160 - CheckPrereq is run after we have acquired locks (and possible
163 The function is allowed to change the self.op attribute so that
164 later methods can no longer worry about missing parameters.
169 def ExpandNames(self):
170 """Expand names for this LU.
172 This method is called before starting to execute the opcode, and it should
173 update all the parameters of the opcode to their canonical form (e.g. a
174 short node name must be fully expanded after this method has successfully
175 completed). This way locking, hooks, logging, etc. can work correctly.
177 LUs which implement this method must also populate the self.needed_locks
178 member, as a dict with lock levels as keys, and a list of needed lock names
181 - use an empty dict if you don't need any lock
182 - if you don't need any lock at a particular level omit that level
183 - don't put anything for the BGL level
184 - if you want all locks at a level use locking.ALL_SET as a value
186 If you need to share locks (rather than acquire them exclusively) at one
187 level you can modify self.share_locks, setting a true value (usually 1) for
188 that level. By default locks are not shared.
190 This function can also define a list of tasklets, which then will be
191 executed in order instead of the usual LU-level CheckPrereq and Exec
192 functions, if those are not defined by the LU.
196 # Acquire all nodes and one instance
197 self.needed_locks = {
198 locking.LEVEL_NODE: locking.ALL_SET,
199 locking.LEVEL_INSTANCE: ['instance1.example.com'],
201 # Acquire just two nodes
202 self.needed_locks = {
203 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206 self.needed_locks = {} # No, you can't leave it to the default value None
209 # The implementation of this method is mandatory only if the new LU is
210 # concurrent, so that old LUs don't need to be changed all at the same
213 self.needed_locks = {} # Exclusive LUs don't need locks.
215 raise NotImplementedError
217 def DeclareLocks(self, level):
218 """Declare LU locking needs for a level
220 While most LUs can just declare their locking needs at ExpandNames time,
221 sometimes there's the need to calculate some locks after having acquired
222 the ones before. This function is called just before acquiring locks at a
223 particular level, but after acquiring the ones at lower levels, and permits
224 such calculations. It can be used to modify self.needed_locks, and by
225 default it does nothing.
227 This function is only called if you have something already set in
228 self.needed_locks for the level.
230 @param level: Locking level which is going to be locked
231 @type level: member of ganeti.locking.LEVELS
235 def CheckPrereq(self):
236 """Check prerequisites for this LU.
238 This method should check that the prerequisites for the execution
239 of this LU are fulfilled. It can do internode communication, but
240 it should be idempotent - no cluster or system changes are
243 The method should raise errors.OpPrereqError in case something is
244 not fulfilled. Its return value is ignored.
246 This method should also update all the parameters of the opcode to
247 their canonical form if it hasn't been done by ExpandNames before.
250 if self.tasklets is not None:
251 for (idx, tl) in enumerate(self.tasklets):
252 logging.debug("Checking prerequisites for tasklet %s/%s",
253 idx + 1, len(self.tasklets))
258 def Exec(self, feedback_fn):
261 This method should implement the actual work. It should raise
262 errors.OpExecError for failures that are somewhat dealt with in
266 if self.tasklets is not None:
267 for (idx, tl) in enumerate(self.tasklets):
268 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271 raise NotImplementedError
273 def BuildHooksEnv(self):
274 """Build hooks environment for this LU.
276 This method should return a three-node tuple consisting of: a dict
277 containing the environment that will be used for running the
278 specific hook for this LU, a list of node names on which the hook
279 should run before the execution, and a list of node names on which
280 the hook should run after the execution.
282 The keys of the dict must not have 'GANETI_' prefixed as this will
283 be handled in the hooks runner. Also note additional keys will be
284 added by the hooks runner. If the LU doesn't define any
285 environment, an empty dict (and not None) should be returned.
287 No nodes should be returned as an empty list (and not None).
289 Note that if the HPATH for a LU class is None, this function will
293 raise NotImplementedError
295 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296 """Notify the LU about the results of its hooks.
298 This method is called every time a hooks phase is executed, and notifies
299 the Logical Unit about the hooks' result. The LU can then use it to alter
300 its result based on the hooks. By default the method does nothing and the
301 previous result is passed back unchanged but any LU can define it if it
302 wants to use the local cluster hook-scripts somehow.
304 @param phase: one of L{constants.HOOKS_PHASE_POST} or
305 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306 @param hook_results: the results of the multi-node hooks rpc call
307 @param feedback_fn: function used send feedback back to the caller
308 @param lu_result: the previous Exec result this LU had, or None
310 @return: the new Exec result, based on the previous result
314 # API must be kept, thus we ignore the unused argument and could
315 # be a function warnings
316 # pylint: disable-msg=W0613,R0201
319 def _ExpandAndLockInstance(self):
320 """Helper function to expand and lock an instance.
322 Many LUs that work on an instance take its name in self.op.instance_name
323 and need to expand it and then declare the expanded name for locking. This
324 function does it, and then updates self.op.instance_name to the expanded
325 name. It also initializes needed_locks as a dict, if this hasn't been done
329 if self.needed_locks is None:
330 self.needed_locks = {}
332 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333 "_ExpandAndLockInstance called with instance-level locks set"
334 self.op.instance_name = _ExpandInstanceName(self.cfg,
335 self.op.instance_name)
336 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
338 def _LockInstancesNodes(self, primary_only=False):
339 """Helper function to declare instances' nodes for locking.
341 This function should be called after locking one or more instances to lock
342 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343 with all primary or secondary nodes for instances already locked and
344 present in self.needed_locks[locking.LEVEL_INSTANCE].
346 It should be called from DeclareLocks, and for safety only works if
347 self.recalculate_locks[locking.LEVEL_NODE] is set.
349 In the future it may grow parameters to just lock some instance's nodes, or
350 to just lock primaries or secondary nodes, if needed.
352 If should be called in DeclareLocks in a way similar to::
354 if level == locking.LEVEL_NODE:
355 self._LockInstancesNodes()
357 @type primary_only: boolean
358 @param primary_only: only lock primary nodes of locked instances
361 assert locking.LEVEL_NODE in self.recalculate_locks, \
362 "_LockInstancesNodes helper function called with no nodes to recalculate"
364 # TODO: check if we're really been called with the instance locks held
366 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367 # future we might want to have different behaviors depending on the value
368 # of self.recalculate_locks[locking.LEVEL_NODE]
370 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371 instance = self.context.cfg.GetInstanceInfo(instance_name)
372 wanted_nodes.append(instance.primary_node)
374 wanted_nodes.extend(instance.secondary_nodes)
376 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
381 del self.recalculate_locks[locking.LEVEL_NODE]
384 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385 """Simple LU which runs no hooks.
387 This LU is intended as a parent for other LogicalUnits which will
388 run no hooks, in order to reduce duplicate code.
394 def BuildHooksEnv(self):
395 """Empty BuildHooksEnv for NoHooksLu.
397 This just raises an error.
400 assert False, "BuildHooksEnv called for NoHooksLUs"
404 """Tasklet base class.
406 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407 they can mix legacy code with tasklets. Locking needs to be done in the LU,
408 tasklets know nothing about locks.
410 Subclasses must follow these rules:
411 - Implement CheckPrereq
415 def __init__(self, lu):
422 def CheckPrereq(self):
423 """Check prerequisites for this tasklets.
425 This method should check whether the prerequisites for the execution of
426 this tasklet are fulfilled. It can do internode communication, but it
427 should be idempotent - no cluster or system changes are allowed.
429 The method should raise errors.OpPrereqError in case something is not
430 fulfilled. Its return value is ignored.
432 This method should also update all parameters to their canonical form if it
433 hasn't been done before.
438 def Exec(self, feedback_fn):
439 """Execute the tasklet.
441 This method should implement the actual work. It should raise
442 errors.OpExecError for failures that are somewhat dealt with in code, or
446 raise NotImplementedError
450 """Base for query utility classes.
453 #: Attribute holding field definitions
456 def __init__(self, names, fields, use_locking):
457 """Initializes this class.
461 self.use_locking = use_locking
463 self.query = query.Query(self.FIELDS, fields)
464 self.requested_data = self.query.RequestedData()
466 self.do_locking = None
469 def _GetNames(self, lu, all_names, lock_level):
470 """Helper function to determine names asked for in the query.
474 names = lu.acquired_locks[lock_level]
478 if self.wanted == locking.ALL_SET:
479 assert not self.names
480 # caller didn't specify names, so ordering is not important
481 return utils.NiceSort(names)
483 # caller specified names and we must keep the same order
485 assert not self.do_locking or lu.acquired_locks[lock_level]
487 missing = set(self.wanted).difference(names)
489 raise errors.OpExecError("Some items were removed before retrieving"
490 " their data: %s" % missing)
492 # Return expanded names
496 def FieldsQuery(cls, fields):
497 """Returns list of available fields.
499 @return: List of L{objects.QueryFieldDefinition}
502 return query.QueryFields(cls.FIELDS, fields)
504 def ExpandNames(self, lu):
505 """Expand names for this query.
507 See L{LogicalUnit.ExpandNames}.
510 raise NotImplementedError()
512 def DeclareLocks(self, lu, level):
513 """Declare locks for this query.
515 See L{LogicalUnit.DeclareLocks}.
518 raise NotImplementedError()
520 def _GetQueryData(self, lu):
521 """Collects all data for this query.
523 @return: Query data object
526 raise NotImplementedError()
528 def NewStyleQuery(self, lu):
529 """Collect data and execute query.
532 return query.GetQueryResponse(self.query, self._GetQueryData(lu))
534 def OldStyleQuery(self, lu):
535 """Collect data and execute query.
538 return self.query.OldStyleQuery(self._GetQueryData(lu))
541 def _GetWantedNodes(lu, nodes):
542 """Returns list of checked and expanded node names.
544 @type lu: L{LogicalUnit}
545 @param lu: the logical unit on whose behalf we execute
547 @param nodes: list of node names or None for all nodes
549 @return: the list of nodes, sorted
550 @raise errors.ProgrammerError: if the nodes parameter is wrong type
554 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
556 return utils.NiceSort(lu.cfg.GetNodeList())
559 def _GetWantedInstances(lu, instances):
560 """Returns list of checked and expanded instance names.
562 @type lu: L{LogicalUnit}
563 @param lu: the logical unit on whose behalf we execute
564 @type instances: list
565 @param instances: list of instance names or None for all instances
567 @return: the list of instances, sorted
568 @raise errors.OpPrereqError: if the instances parameter is wrong type
569 @raise errors.OpPrereqError: if any of the passed instances is not found
573 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
575 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
579 def _GetUpdatedParams(old_params, update_dict,
580 use_default=True, use_none=False):
581 """Return the new version of a parameter dictionary.
583 @type old_params: dict
584 @param old_params: old parameters
585 @type update_dict: dict
586 @param update_dict: dict containing new parameter values, or
587 constants.VALUE_DEFAULT to reset the parameter to its default
589 @param use_default: boolean
590 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
591 values as 'to be deleted' values
592 @param use_none: boolean
593 @type use_none: whether to recognise C{None} values as 'to be
596 @return: the new parameter dictionary
599 params_copy = copy.deepcopy(old_params)
600 for key, val in update_dict.iteritems():
601 if ((use_default and val == constants.VALUE_DEFAULT) or
602 (use_none and val is None)):
608 params_copy[key] = val
612 def _CheckOutputFields(static, dynamic, selected):
613 """Checks whether all selected fields are valid.
615 @type static: L{utils.FieldSet}
616 @param static: static fields set
617 @type dynamic: L{utils.FieldSet}
618 @param dynamic: dynamic fields set
625 delta = f.NonMatching(selected)
627 raise errors.OpPrereqError("Unknown output fields selected: %s"
628 % ",".join(delta), errors.ECODE_INVAL)
631 def _CheckGlobalHvParams(params):
632 """Validates that given hypervisor params are not global ones.
634 This will ensure that instances don't get customised versions of
638 used_globals = constants.HVC_GLOBALS.intersection(params)
640 msg = ("The following hypervisor parameters are global and cannot"
641 " be customized at instance level, please modify them at"
642 " cluster level: %s" % utils.CommaJoin(used_globals))
643 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
646 def _CheckNodeOnline(lu, node, msg=None):
647 """Ensure that a given node is online.
649 @param lu: the LU on behalf of which we make the check
650 @param node: the node to check
651 @param msg: if passed, should be a message to replace the default one
652 @raise errors.OpPrereqError: if the node is offline
656 msg = "Can't use offline node"
657 if lu.cfg.GetNodeInfo(node).offline:
658 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
661 def _CheckNodeNotDrained(lu, node):
662 """Ensure that a given node is not drained.
664 @param lu: the LU on behalf of which we make the check
665 @param node: the node to check
666 @raise errors.OpPrereqError: if the node is drained
669 if lu.cfg.GetNodeInfo(node).drained:
670 raise errors.OpPrereqError("Can't use drained node %s" % node,
674 def _CheckNodeVmCapable(lu, node):
675 """Ensure that a given node is vm capable.
677 @param lu: the LU on behalf of which we make the check
678 @param node: the node to check
679 @raise errors.OpPrereqError: if the node is not vm capable
682 if not lu.cfg.GetNodeInfo(node).vm_capable:
683 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
687 def _CheckNodeHasOS(lu, node, os_name, force_variant):
688 """Ensure that a node supports a given OS.
690 @param lu: the LU on behalf of which we make the check
691 @param node: the node to check
692 @param os_name: the OS to query about
693 @param force_variant: whether to ignore variant errors
694 @raise errors.OpPrereqError: if the node is not supporting the OS
697 result = lu.rpc.call_os_get(node, os_name)
698 result.Raise("OS '%s' not in supported OS list for node %s" %
700 prereq=True, ecode=errors.ECODE_INVAL)
701 if not force_variant:
702 _CheckOSVariant(result.payload, os_name)
705 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
706 """Ensure that a node has the given secondary ip.
708 @type lu: L{LogicalUnit}
709 @param lu: the LU on behalf of which we make the check
711 @param node: the node to check
712 @type secondary_ip: string
713 @param secondary_ip: the ip to check
714 @type prereq: boolean
715 @param prereq: whether to throw a prerequisite or an execute error
716 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
717 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
720 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
721 result.Raise("Failure checking secondary ip on node %s" % node,
722 prereq=prereq, ecode=errors.ECODE_ENVIRON)
723 if not result.payload:
724 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
725 " please fix and re-run this command" % secondary_ip)
727 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
729 raise errors.OpExecError(msg)
732 def _GetClusterDomainSecret():
733 """Reads the cluster domain secret.
736 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
740 def _CheckInstanceDown(lu, instance, reason):
741 """Ensure that an instance is not running."""
742 if instance.admin_up:
743 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
744 (instance.name, reason), errors.ECODE_STATE)
746 pnode = instance.primary_node
747 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
748 ins_l.Raise("Can't contact node %s for instance information" % pnode,
749 prereq=True, ecode=errors.ECODE_ENVIRON)
751 if instance.name in ins_l.payload:
752 raise errors.OpPrereqError("Instance %s is running, %s" %
753 (instance.name, reason), errors.ECODE_STATE)
756 def _ExpandItemName(fn, name, kind):
757 """Expand an item name.
759 @param fn: the function to use for expansion
760 @param name: requested item name
761 @param kind: text description ('Node' or 'Instance')
762 @return: the resolved (full) name
763 @raise errors.OpPrereqError: if the item is not found
767 if full_name is None:
768 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
773 def _ExpandNodeName(cfg, name):
774 """Wrapper over L{_ExpandItemName} for nodes."""
775 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
778 def _ExpandInstanceName(cfg, name):
779 """Wrapper over L{_ExpandItemName} for instance."""
780 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
783 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
784 memory, vcpus, nics, disk_template, disks,
785 bep, hvp, hypervisor_name):
786 """Builds instance related env variables for hooks
788 This builds the hook environment from individual variables.
791 @param name: the name of the instance
792 @type primary_node: string
793 @param primary_node: the name of the instance's primary node
794 @type secondary_nodes: list
795 @param secondary_nodes: list of secondary nodes as strings
796 @type os_type: string
797 @param os_type: the name of the instance's OS
798 @type status: boolean
799 @param status: the should_run status of the instance
801 @param memory: the memory size of the instance
803 @param vcpus: the count of VCPUs the instance has
805 @param nics: list of tuples (ip, mac, mode, link) representing
806 the NICs the instance has
807 @type disk_template: string
808 @param disk_template: the disk template of the instance
810 @param disks: the list of (size, mode) pairs
812 @param bep: the backend parameters for the instance
814 @param hvp: the hypervisor parameters for the instance
815 @type hypervisor_name: string
816 @param hypervisor_name: the hypervisor for the instance
818 @return: the hook environment for this instance
827 "INSTANCE_NAME": name,
828 "INSTANCE_PRIMARY": primary_node,
829 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
830 "INSTANCE_OS_TYPE": os_type,
831 "INSTANCE_STATUS": str_status,
832 "INSTANCE_MEMORY": memory,
833 "INSTANCE_VCPUS": vcpus,
834 "INSTANCE_DISK_TEMPLATE": disk_template,
835 "INSTANCE_HYPERVISOR": hypervisor_name,
839 nic_count = len(nics)
840 for idx, (ip, mac, mode, link) in enumerate(nics):
843 env["INSTANCE_NIC%d_IP" % idx] = ip
844 env["INSTANCE_NIC%d_MAC" % idx] = mac
845 env["INSTANCE_NIC%d_MODE" % idx] = mode
846 env["INSTANCE_NIC%d_LINK" % idx] = link
847 if mode == constants.NIC_MODE_BRIDGED:
848 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
852 env["INSTANCE_NIC_COUNT"] = nic_count
855 disk_count = len(disks)
856 for idx, (size, mode) in enumerate(disks):
857 env["INSTANCE_DISK%d_SIZE" % idx] = size
858 env["INSTANCE_DISK%d_MODE" % idx] = mode
862 env["INSTANCE_DISK_COUNT"] = disk_count
864 for source, kind in [(bep, "BE"), (hvp, "HV")]:
865 for key, value in source.items():
866 env["INSTANCE_%s_%s" % (kind, key)] = value
871 def _NICListToTuple(lu, nics):
872 """Build a list of nic information tuples.
874 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
875 value in LUInstanceQueryData.
877 @type lu: L{LogicalUnit}
878 @param lu: the logical unit on whose behalf we execute
879 @type nics: list of L{objects.NIC}
880 @param nics: list of nics to convert to hooks tuples
884 cluster = lu.cfg.GetClusterInfo()
888 filled_params = cluster.SimpleFillNIC(nic.nicparams)
889 mode = filled_params[constants.NIC_MODE]
890 link = filled_params[constants.NIC_LINK]
891 hooks_nics.append((ip, mac, mode, link))
895 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
896 """Builds instance related env variables for hooks from an object.
898 @type lu: L{LogicalUnit}
899 @param lu: the logical unit on whose behalf we execute
900 @type instance: L{objects.Instance}
901 @param instance: the instance for which we should build the
904 @param override: dictionary with key/values that will override
907 @return: the hook environment dictionary
910 cluster = lu.cfg.GetClusterInfo()
911 bep = cluster.FillBE(instance)
912 hvp = cluster.FillHV(instance)
914 'name': instance.name,
915 'primary_node': instance.primary_node,
916 'secondary_nodes': instance.secondary_nodes,
917 'os_type': instance.os,
918 'status': instance.admin_up,
919 'memory': bep[constants.BE_MEMORY],
920 'vcpus': bep[constants.BE_VCPUS],
921 'nics': _NICListToTuple(lu, instance.nics),
922 'disk_template': instance.disk_template,
923 'disks': [(disk.size, disk.mode) for disk in instance.disks],
926 'hypervisor_name': instance.hypervisor,
929 args.update(override)
930 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
933 def _AdjustCandidatePool(lu, exceptions):
934 """Adjust the candidate pool after node operations.
937 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
939 lu.LogInfo("Promoted nodes to master candidate role: %s",
940 utils.CommaJoin(node.name for node in mod_list))
941 for name in mod_list:
942 lu.context.ReaddNode(name)
943 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
945 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
949 def _DecideSelfPromotion(lu, exceptions=None):
950 """Decide whether I should promote myself as a master candidate.
953 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
954 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
955 # the new node will increase mc_max with one, so:
956 mc_should = min(mc_should + 1, cp_size)
957 return mc_now < mc_should
960 def _CheckNicsBridgesExist(lu, target_nics, target_node):
961 """Check that the brigdes needed by a list of nics exist.
964 cluster = lu.cfg.GetClusterInfo()
965 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
966 brlist = [params[constants.NIC_LINK] for params in paramslist
967 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
969 result = lu.rpc.call_bridges_exist(target_node, brlist)
970 result.Raise("Error checking bridges on destination node '%s'" %
971 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
974 def _CheckInstanceBridgesExist(lu, instance, node=None):
975 """Check that the brigdes needed by an instance exist.
979 node = instance.primary_node
980 _CheckNicsBridgesExist(lu, instance.nics, node)
983 def _CheckOSVariant(os_obj, name):
984 """Check whether an OS name conforms to the os variants specification.
986 @type os_obj: L{objects.OS}
987 @param os_obj: OS object to check
989 @param name: OS name passed by the user, to check for validity
992 if not os_obj.supported_variants:
994 variant = objects.OS.GetVariant(name)
996 raise errors.OpPrereqError("OS name must include a variant",
999 if variant not in os_obj.supported_variants:
1000 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1003 def _GetNodeInstancesInner(cfg, fn):
1004 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1007 def _GetNodeInstances(cfg, node_name):
1008 """Returns a list of all primary and secondary instances on a node.
1012 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1015 def _GetNodePrimaryInstances(cfg, node_name):
1016 """Returns primary instances on a node.
1019 return _GetNodeInstancesInner(cfg,
1020 lambda inst: node_name == inst.primary_node)
1023 def _GetNodeSecondaryInstances(cfg, node_name):
1024 """Returns secondary instances on a node.
1027 return _GetNodeInstancesInner(cfg,
1028 lambda inst: node_name in inst.secondary_nodes)
1031 def _GetStorageTypeArgs(cfg, storage_type):
1032 """Returns the arguments for a storage type.
1035 # Special case for file storage
1036 if storage_type == constants.ST_FILE:
1037 # storage.FileStorage wants a list of storage directories
1038 return [[cfg.GetFileStorageDir()]]
1043 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1046 for dev in instance.disks:
1047 cfg.SetDiskID(dev, node_name)
1049 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1050 result.Raise("Failed to get disk status from node %s" % node_name,
1051 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1053 for idx, bdev_status in enumerate(result.payload):
1054 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1060 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1061 """Check the sanity of iallocator and node arguments and use the
1062 cluster-wide iallocator if appropriate.
1064 Check that at most one of (iallocator, node) is specified. If none is
1065 specified, then the LU's opcode's iallocator slot is filled with the
1066 cluster-wide default iallocator.
1068 @type iallocator_slot: string
1069 @param iallocator_slot: the name of the opcode iallocator slot
1070 @type node_slot: string
1071 @param node_slot: the name of the opcode target node slot
1074 node = getattr(lu.op, node_slot, None)
1075 iallocator = getattr(lu.op, iallocator_slot, None)
1077 if node is not None and iallocator is not None:
1078 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1080 elif node is None and iallocator is None:
1081 default_iallocator = lu.cfg.GetDefaultIAllocator()
1082 if default_iallocator:
1083 setattr(lu.op, iallocator_slot, default_iallocator)
1085 raise errors.OpPrereqError("No iallocator or node given and no"
1086 " cluster-wide default iallocator found."
1087 " Please specify either an iallocator or a"
1088 " node, or set a cluster-wide default"
1092 class LUClusterPostInit(LogicalUnit):
1093 """Logical unit for running hooks after cluster initialization.
1096 HPATH = "cluster-init"
1097 HTYPE = constants.HTYPE_CLUSTER
1099 def BuildHooksEnv(self):
1103 env = {"OP_TARGET": self.cfg.GetClusterName()}
1104 mn = self.cfg.GetMasterNode()
1105 return env, [], [mn]
1107 def Exec(self, feedback_fn):
1114 class LUClusterDestroy(LogicalUnit):
1115 """Logical unit for destroying the cluster.
1118 HPATH = "cluster-destroy"
1119 HTYPE = constants.HTYPE_CLUSTER
1121 def BuildHooksEnv(self):
1125 env = {"OP_TARGET": self.cfg.GetClusterName()}
1128 def CheckPrereq(self):
1129 """Check prerequisites.
1131 This checks whether the cluster is empty.
1133 Any errors are signaled by raising errors.OpPrereqError.
1136 master = self.cfg.GetMasterNode()
1138 nodelist = self.cfg.GetNodeList()
1139 if len(nodelist) != 1 or nodelist[0] != master:
1140 raise errors.OpPrereqError("There are still %d node(s) in"
1141 " this cluster." % (len(nodelist) - 1),
1143 instancelist = self.cfg.GetInstanceList()
1145 raise errors.OpPrereqError("There are still %d instance(s) in"
1146 " this cluster." % len(instancelist),
1149 def Exec(self, feedback_fn):
1150 """Destroys the cluster.
1153 master = self.cfg.GetMasterNode()
1155 # Run post hooks on master node before it's removed
1156 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1158 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1160 # pylint: disable-msg=W0702
1161 self.LogWarning("Errors occurred running hooks on %s" % master)
1163 result = self.rpc.call_node_stop_master(master, False)
1164 result.Raise("Could not disable the master role")
1169 def _VerifyCertificate(filename):
1170 """Verifies a certificate for LUClusterVerify.
1172 @type filename: string
1173 @param filename: Path to PEM file
1177 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1178 utils.ReadFile(filename))
1179 except Exception, err: # pylint: disable-msg=W0703
1180 return (LUClusterVerify.ETYPE_ERROR,
1181 "Failed to load X509 certificate %s: %s" % (filename, err))
1184 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1185 constants.SSL_CERT_EXPIRATION_ERROR)
1188 fnamemsg = "While verifying %s: %s" % (filename, msg)
1193 return (None, fnamemsg)
1194 elif errcode == utils.CERT_WARNING:
1195 return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1196 elif errcode == utils.CERT_ERROR:
1197 return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1199 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1202 class LUClusterVerify(LogicalUnit):
1203 """Verifies the cluster status.
1206 HPATH = "cluster-verify"
1207 HTYPE = constants.HTYPE_CLUSTER
1210 TCLUSTER = "cluster"
1212 TINSTANCE = "instance"
1214 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1215 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1216 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1217 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1218 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1219 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1220 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1221 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1222 EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1223 ENODEDRBD = (TNODE, "ENODEDRBD")
1224 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1225 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1226 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1227 ENODEHV = (TNODE, "ENODEHV")
1228 ENODELVM = (TNODE, "ENODELVM")
1229 ENODEN1 = (TNODE, "ENODEN1")
1230 ENODENET = (TNODE, "ENODENET")
1231 ENODEOS = (TNODE, "ENODEOS")
1232 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1233 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1234 ENODERPC = (TNODE, "ENODERPC")
1235 ENODESSH = (TNODE, "ENODESSH")
1236 ENODEVERSION = (TNODE, "ENODEVERSION")
1237 ENODESETUP = (TNODE, "ENODESETUP")
1238 ENODETIME = (TNODE, "ENODETIME")
1239 ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1241 ETYPE_FIELD = "code"
1242 ETYPE_ERROR = "ERROR"
1243 ETYPE_WARNING = "WARNING"
1245 _HOOKS_INDENT_RE = re.compile("^", re.M)
1247 class NodeImage(object):
1248 """A class representing the logical and physical status of a node.
1251 @ivar name: the node name to which this object refers
1252 @ivar volumes: a structure as returned from
1253 L{ganeti.backend.GetVolumeList} (runtime)
1254 @ivar instances: a list of running instances (runtime)
1255 @ivar pinst: list of configured primary instances (config)
1256 @ivar sinst: list of configured secondary instances (config)
1257 @ivar sbp: diction of {secondary-node: list of instances} of all peers
1258 of this node (config)
1259 @ivar mfree: free memory, as reported by hypervisor (runtime)
1260 @ivar dfree: free disk, as reported by the node (runtime)
1261 @ivar offline: the offline status (config)
1262 @type rpc_fail: boolean
1263 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1264 not whether the individual keys were correct) (runtime)
1265 @type lvm_fail: boolean
1266 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1267 @type hyp_fail: boolean
1268 @ivar hyp_fail: whether the RPC call didn't return the instance list
1269 @type ghost: boolean
1270 @ivar ghost: whether this is a known node or not (config)
1271 @type os_fail: boolean
1272 @ivar os_fail: whether the RPC call didn't return valid OS data
1274 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1275 @type vm_capable: boolean
1276 @ivar vm_capable: whether the node can host instances
1279 def __init__(self, offline=False, name=None, vm_capable=True):
1288 self.offline = offline
1289 self.vm_capable = vm_capable
1290 self.rpc_fail = False
1291 self.lvm_fail = False
1292 self.hyp_fail = False
1294 self.os_fail = False
1297 def ExpandNames(self):
1298 self.needed_locks = {
1299 locking.LEVEL_NODE: locking.ALL_SET,
1300 locking.LEVEL_INSTANCE: locking.ALL_SET,
1302 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1304 def _Error(self, ecode, item, msg, *args, **kwargs):
1305 """Format an error message.
1307 Based on the opcode's error_codes parameter, either format a
1308 parseable error code, or a simpler error string.
1310 This must be called only from Exec and functions called from Exec.
1313 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1315 # first complete the msg
1318 # then format the whole message
1319 if self.op.error_codes:
1320 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1326 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1327 # and finally report it via the feedback_fn
1328 self._feedback_fn(" - %s" % msg)
1330 def _ErrorIf(self, cond, *args, **kwargs):
1331 """Log an error message if the passed condition is True.
1334 cond = bool(cond) or self.op.debug_simulate_errors
1336 self._Error(*args, **kwargs)
1337 # do not mark the operation as failed for WARN cases only
1338 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1339 self.bad = self.bad or cond
1341 def _VerifyNode(self, ninfo, nresult):
1342 """Perform some basic validation on data returned from a node.
1344 - check the result data structure is well formed and has all the
1346 - check ganeti version
1348 @type ninfo: L{objects.Node}
1349 @param ninfo: the node to check
1350 @param nresult: the results from the node
1352 @return: whether overall this call was successful (and we can expect
1353 reasonable values in the respose)
1357 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1359 # main result, nresult should be a non-empty dict
1360 test = not nresult or not isinstance(nresult, dict)
1361 _ErrorIf(test, self.ENODERPC, node,
1362 "unable to verify node: no data returned")
1366 # compares ganeti version
1367 local_version = constants.PROTOCOL_VERSION
1368 remote_version = nresult.get("version", None)
1369 test = not (remote_version and
1370 isinstance(remote_version, (list, tuple)) and
1371 len(remote_version) == 2)
1372 _ErrorIf(test, self.ENODERPC, node,
1373 "connection to node returned invalid data")
1377 test = local_version != remote_version[0]
1378 _ErrorIf(test, self.ENODEVERSION, node,
1379 "incompatible protocol versions: master %s,"
1380 " node %s", local_version, remote_version[0])
1384 # node seems compatible, we can actually try to look into its results
1386 # full package version
1387 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1388 self.ENODEVERSION, node,
1389 "software version mismatch: master %s, node %s",
1390 constants.RELEASE_VERSION, remote_version[1],
1391 code=self.ETYPE_WARNING)
1393 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1394 if ninfo.vm_capable and isinstance(hyp_result, dict):
1395 for hv_name, hv_result in hyp_result.iteritems():
1396 test = hv_result is not None
1397 _ErrorIf(test, self.ENODEHV, node,
1398 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1400 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1401 if ninfo.vm_capable and isinstance(hvp_result, list):
1402 for item, hv_name, hv_result in hvp_result:
1403 _ErrorIf(True, self.ENODEHV, node,
1404 "hypervisor %s parameter verify failure (source %s): %s",
1405 hv_name, item, hv_result)
1407 test = nresult.get(constants.NV_NODESETUP,
1408 ["Missing NODESETUP results"])
1409 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1414 def _VerifyNodeTime(self, ninfo, nresult,
1415 nvinfo_starttime, nvinfo_endtime):
1416 """Check the node time.
1418 @type ninfo: L{objects.Node}
1419 @param ninfo: the node to check
1420 @param nresult: the remote results for the node
1421 @param nvinfo_starttime: the start time of the RPC call
1422 @param nvinfo_endtime: the end time of the RPC call
1426 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1428 ntime = nresult.get(constants.NV_TIME, None)
1430 ntime_merged = utils.MergeTime(ntime)
1431 except (ValueError, TypeError):
1432 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1435 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1436 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1437 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1438 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1442 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1443 "Node time diverges by at least %s from master node time",
1446 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1447 """Check the node time.
1449 @type ninfo: L{objects.Node}
1450 @param ninfo: the node to check
1451 @param nresult: the remote results for the node
1452 @param vg_name: the configured VG name
1459 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1461 # checks vg existence and size > 20G
1462 vglist = nresult.get(constants.NV_VGLIST, None)
1464 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1466 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1467 constants.MIN_VG_SIZE)
1468 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1471 pvlist = nresult.get(constants.NV_PVLIST, None)
1472 test = pvlist is None
1473 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1475 # check that ':' is not present in PV names, since it's a
1476 # special character for lvcreate (denotes the range of PEs to
1478 for _, pvname, owner_vg in pvlist:
1479 test = ":" in pvname
1480 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1481 " '%s' of VG '%s'", pvname, owner_vg)
1483 def _VerifyNodeNetwork(self, ninfo, nresult):
1484 """Check the node time.
1486 @type ninfo: L{objects.Node}
1487 @param ninfo: the node to check
1488 @param nresult: the remote results for the node
1492 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1494 test = constants.NV_NODELIST not in nresult
1495 _ErrorIf(test, self.ENODESSH, node,
1496 "node hasn't returned node ssh connectivity data")
1498 if nresult[constants.NV_NODELIST]:
1499 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1500 _ErrorIf(True, self.ENODESSH, node,
1501 "ssh communication with node '%s': %s", a_node, a_msg)
1503 test = constants.NV_NODENETTEST not in nresult
1504 _ErrorIf(test, self.ENODENET, node,
1505 "node hasn't returned node tcp connectivity data")
1507 if nresult[constants.NV_NODENETTEST]:
1508 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1510 _ErrorIf(True, self.ENODENET, node,
1511 "tcp communication with node '%s': %s",
1512 anode, nresult[constants.NV_NODENETTEST][anode])
1514 test = constants.NV_MASTERIP not in nresult
1515 _ErrorIf(test, self.ENODENET, node,
1516 "node hasn't returned node master IP reachability data")
1518 if not nresult[constants.NV_MASTERIP]:
1519 if node == self.master_node:
1520 msg = "the master node cannot reach the master IP (not configured?)"
1522 msg = "cannot reach the master IP"
1523 _ErrorIf(True, self.ENODENET, node, msg)
1525 def _VerifyInstance(self, instance, instanceconfig, node_image,
1527 """Verify an instance.
1529 This function checks to see if the required block devices are
1530 available on the instance's node.
1533 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1534 node_current = instanceconfig.primary_node
1536 node_vol_should = {}
1537 instanceconfig.MapLVsByNode(node_vol_should)
1539 for node in node_vol_should:
1540 n_img = node_image[node]
1541 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1542 # ignore missing volumes on offline or broken nodes
1544 for volume in node_vol_should[node]:
1545 test = volume not in n_img.volumes
1546 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1547 "volume %s missing on node %s", volume, node)
1549 if instanceconfig.admin_up:
1550 pri_img = node_image[node_current]
1551 test = instance not in pri_img.instances and not pri_img.offline
1552 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1553 "instance not running on its primary node %s",
1556 for node, n_img in node_image.items():
1557 if node != node_current:
1558 test = instance in n_img.instances
1559 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1560 "instance should not run on node %s", node)
1562 diskdata = [(nname, success, status, idx)
1563 for (nname, disks) in diskstatus.items()
1564 for idx, (success, status) in enumerate(disks)]
1566 for nname, success, bdev_status, idx in diskdata:
1567 # the 'ghost node' construction in Exec() ensures that we have a
1569 snode = node_image[nname]
1570 bad_snode = snode.ghost or snode.offline
1571 _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1572 self.EINSTANCEFAULTYDISK, instance,
1573 "couldn't retrieve status for disk/%s on %s: %s",
1574 idx, nname, bdev_status)
1575 _ErrorIf((instanceconfig.admin_up and success and
1576 bdev_status.ldisk_status == constants.LDS_FAULTY),
1577 self.EINSTANCEFAULTYDISK, instance,
1578 "disk/%s on %s is faulty", idx, nname)
1580 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1581 """Verify if there are any unknown volumes in the cluster.
1583 The .os, .swap and backup volumes are ignored. All other volumes are
1584 reported as unknown.
1586 @type reserved: L{ganeti.utils.FieldSet}
1587 @param reserved: a FieldSet of reserved volume names
1590 for node, n_img in node_image.items():
1591 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1592 # skip non-healthy nodes
1594 for volume in n_img.volumes:
1595 test = ((node not in node_vol_should or
1596 volume not in node_vol_should[node]) and
1597 not reserved.Matches(volume))
1598 self._ErrorIf(test, self.ENODEORPHANLV, node,
1599 "volume %s is unknown", volume)
1601 def _VerifyOrphanInstances(self, instancelist, node_image):
1602 """Verify the list of running instances.
1604 This checks what instances are running but unknown to the cluster.
1607 for node, n_img in node_image.items():
1608 for o_inst in n_img.instances:
1609 test = o_inst not in instancelist
1610 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1611 "instance %s on node %s should not exist", o_inst, node)
1613 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1614 """Verify N+1 Memory Resilience.
1616 Check that if one single node dies we can still start all the
1617 instances it was primary for.
1620 for node, n_img in node_image.items():
1621 # This code checks that every node which is now listed as
1622 # secondary has enough memory to host all instances it is
1623 # supposed to should a single other node in the cluster fail.
1624 # FIXME: not ready for failover to an arbitrary node
1625 # FIXME: does not support file-backed instances
1626 # WARNING: we currently take into account down instances as well
1627 # as up ones, considering that even if they're down someone
1628 # might want to start them even in the event of a node failure.
1630 # we're skipping offline nodes from the N+1 warning, since
1631 # most likely we don't have good memory infromation from them;
1632 # we already list instances living on such nodes, and that's
1635 for prinode, instances in n_img.sbp.items():
1637 for instance in instances:
1638 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1639 if bep[constants.BE_AUTO_BALANCE]:
1640 needed_mem += bep[constants.BE_MEMORY]
1641 test = n_img.mfree < needed_mem
1642 self._ErrorIf(test, self.ENODEN1, node,
1643 "not enough memory to accomodate instance failovers"
1644 " should node %s fail", prinode)
1646 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1648 """Verifies and computes the node required file checksums.
1650 @type ninfo: L{objects.Node}
1651 @param ninfo: the node to check
1652 @param nresult: the remote results for the node
1653 @param file_list: required list of files
1654 @param local_cksum: dictionary of local files and their checksums
1655 @param master_files: list of files that only masters should have
1659 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1661 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1662 test = not isinstance(remote_cksum, dict)
1663 _ErrorIf(test, self.ENODEFILECHECK, node,
1664 "node hasn't returned file checksum data")
1668 for file_name in file_list:
1669 node_is_mc = ninfo.master_candidate
1670 must_have = (file_name not in master_files) or node_is_mc
1672 test1 = file_name not in remote_cksum
1674 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1676 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1677 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1678 "file '%s' missing", file_name)
1679 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1680 "file '%s' has wrong checksum", file_name)
1681 # not candidate and this is not a must-have file
1682 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1683 "file '%s' should not exist on non master"
1684 " candidates (and the file is outdated)", file_name)
1685 # all good, except non-master/non-must have combination
1686 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1687 "file '%s' should not exist"
1688 " on non master candidates", file_name)
1690 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1692 """Verifies and the node DRBD status.
1694 @type ninfo: L{objects.Node}
1695 @param ninfo: the node to check
1696 @param nresult: the remote results for the node
1697 @param instanceinfo: the dict of instances
1698 @param drbd_helper: the configured DRBD usermode helper
1699 @param drbd_map: the DRBD map as returned by
1700 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1704 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1707 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1708 test = (helper_result == None)
1709 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1710 "no drbd usermode helper returned")
1712 status, payload = helper_result
1714 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1715 "drbd usermode helper check unsuccessful: %s", payload)
1716 test = status and (payload != drbd_helper)
1717 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1718 "wrong drbd usermode helper: %s", payload)
1720 # compute the DRBD minors
1722 for minor, instance in drbd_map[node].items():
1723 test = instance not in instanceinfo
1724 _ErrorIf(test, self.ECLUSTERCFG, None,
1725 "ghost instance '%s' in temporary DRBD map", instance)
1726 # ghost instance should not be running, but otherwise we
1727 # don't give double warnings (both ghost instance and
1728 # unallocated minor in use)
1730 node_drbd[minor] = (instance, False)
1732 instance = instanceinfo[instance]
1733 node_drbd[minor] = (instance.name, instance.admin_up)
1735 # and now check them
1736 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1737 test = not isinstance(used_minors, (tuple, list))
1738 _ErrorIf(test, self.ENODEDRBD, node,
1739 "cannot parse drbd status file: %s", str(used_minors))
1741 # we cannot check drbd status
1744 for minor, (iname, must_exist) in node_drbd.items():
1745 test = minor not in used_minors and must_exist
1746 _ErrorIf(test, self.ENODEDRBD, node,
1747 "drbd minor %d of instance %s is not active", minor, iname)
1748 for minor in used_minors:
1749 test = minor not in node_drbd
1750 _ErrorIf(test, self.ENODEDRBD, node,
1751 "unallocated drbd minor %d is in use", minor)
1753 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1754 """Builds the node OS structures.
1756 @type ninfo: L{objects.Node}
1757 @param ninfo: the node to check
1758 @param nresult: the remote results for the node
1759 @param nimg: the node image object
1763 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1765 remote_os = nresult.get(constants.NV_OSLIST, None)
1766 test = (not isinstance(remote_os, list) or
1767 not compat.all(isinstance(v, list) and len(v) == 7
1768 for v in remote_os))
1770 _ErrorIf(test, self.ENODEOS, node,
1771 "node hasn't returned valid OS data")
1780 for (name, os_path, status, diagnose,
1781 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1783 if name not in os_dict:
1786 # parameters is a list of lists instead of list of tuples due to
1787 # JSON lacking a real tuple type, fix it:
1788 parameters = [tuple(v) for v in parameters]
1789 os_dict[name].append((os_path, status, diagnose,
1790 set(variants), set(parameters), set(api_ver)))
1792 nimg.oslist = os_dict
1794 def _VerifyNodeOS(self, ninfo, nimg, base):
1795 """Verifies the node OS list.
1797 @type ninfo: L{objects.Node}
1798 @param ninfo: the node to check
1799 @param nimg: the node image object
1800 @param base: the 'template' node we match against (e.g. from the master)
1804 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1806 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1808 for os_name, os_data in nimg.oslist.items():
1809 assert os_data, "Empty OS status for OS %s?!" % os_name
1810 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1811 _ErrorIf(not f_status, self.ENODEOS, node,
1812 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1813 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1814 "OS '%s' has multiple entries (first one shadows the rest): %s",
1815 os_name, utils.CommaJoin([v[0] for v in os_data]))
1816 # this will catched in backend too
1817 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1818 and not f_var, self.ENODEOS, node,
1819 "OS %s with API at least %d does not declare any variant",
1820 os_name, constants.OS_API_V15)
1821 # comparisons with the 'base' image
1822 test = os_name not in base.oslist
1823 _ErrorIf(test, self.ENODEOS, node,
1824 "Extra OS %s not present on reference node (%s)",
1828 assert base.oslist[os_name], "Base node has empty OS status?"
1829 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1831 # base OS is invalid, skipping
1833 for kind, a, b in [("API version", f_api, b_api),
1834 ("variants list", f_var, b_var),
1835 ("parameters", f_param, b_param)]:
1836 _ErrorIf(a != b, self.ENODEOS, node,
1837 "OS %s %s differs from reference node %s: %s vs. %s",
1838 kind, os_name, base.name,
1839 utils.CommaJoin(a), utils.CommaJoin(b))
1841 # check any missing OSes
1842 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1843 _ErrorIf(missing, self.ENODEOS, node,
1844 "OSes present on reference node %s but missing on this node: %s",
1845 base.name, utils.CommaJoin(missing))
1847 def _VerifyOob(self, ninfo, nresult):
1848 """Verifies out of band functionality of a node.
1850 @type ninfo: L{objects.Node}
1851 @param ninfo: the node to check
1852 @param nresult: the remote results for the node
1856 # We just have to verify the paths on master and/or master candidates
1857 # as the oob helper is invoked on the master
1858 if ((ninfo.master_candidate or ninfo.master_capable) and
1859 constants.NV_OOB_PATHS in nresult):
1860 for path_result in nresult[constants.NV_OOB_PATHS]:
1861 self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1863 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1864 """Verifies and updates the node volume data.
1866 This function will update a L{NodeImage}'s internal structures
1867 with data from the remote call.
1869 @type ninfo: L{objects.Node}
1870 @param ninfo: the node to check
1871 @param nresult: the remote results for the node
1872 @param nimg: the node image object
1873 @param vg_name: the configured VG name
1877 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1879 nimg.lvm_fail = True
1880 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1883 elif isinstance(lvdata, basestring):
1884 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1885 utils.SafeEncode(lvdata))
1886 elif not isinstance(lvdata, dict):
1887 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1889 nimg.volumes = lvdata
1890 nimg.lvm_fail = False
1892 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1893 """Verifies and updates the node instance list.
1895 If the listing was successful, then updates this node's instance
1896 list. Otherwise, it marks the RPC call as failed for the instance
1899 @type ninfo: L{objects.Node}
1900 @param ninfo: the node to check
1901 @param nresult: the remote results for the node
1902 @param nimg: the node image object
1905 idata = nresult.get(constants.NV_INSTANCELIST, None)
1906 test = not isinstance(idata, list)
1907 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1908 " (instancelist): %s", utils.SafeEncode(str(idata)))
1910 nimg.hyp_fail = True
1912 nimg.instances = idata
1914 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1915 """Verifies and computes a node information map
1917 @type ninfo: L{objects.Node}
1918 @param ninfo: the node to check
1919 @param nresult: the remote results for the node
1920 @param nimg: the node image object
1921 @param vg_name: the configured VG name
1925 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1927 # try to read free memory (from the hypervisor)
1928 hv_info = nresult.get(constants.NV_HVINFO, None)
1929 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1930 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1933 nimg.mfree = int(hv_info["memory_free"])
1934 except (ValueError, TypeError):
1935 _ErrorIf(True, self.ENODERPC, node,
1936 "node returned invalid nodeinfo, check hypervisor")
1938 # FIXME: devise a free space model for file based instances as well
1939 if vg_name is not None:
1940 test = (constants.NV_VGLIST not in nresult or
1941 vg_name not in nresult[constants.NV_VGLIST])
1942 _ErrorIf(test, self.ENODELVM, node,
1943 "node didn't return data for the volume group '%s'"
1944 " - it is either missing or broken", vg_name)
1947 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1948 except (ValueError, TypeError):
1949 _ErrorIf(True, self.ENODERPC, node,
1950 "node returned invalid LVM info, check LVM status")
1952 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1953 """Gets per-disk status information for all instances.
1955 @type nodelist: list of strings
1956 @param nodelist: Node names
1957 @type node_image: dict of (name, L{objects.Node})
1958 @param node_image: Node objects
1959 @type instanceinfo: dict of (name, L{objects.Instance})
1960 @param instanceinfo: Instance objects
1961 @rtype: {instance: {node: [(succes, payload)]}}
1962 @return: a dictionary of per-instance dictionaries with nodes as
1963 keys and disk information as values; the disk information is a
1964 list of tuples (success, payload)
1967 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1970 node_disks_devonly = {}
1971 diskless_instances = set()
1972 diskless = constants.DT_DISKLESS
1974 for nname in nodelist:
1975 node_instances = list(itertools.chain(node_image[nname].pinst,
1976 node_image[nname].sinst))
1977 diskless_instances.update(inst for inst in node_instances
1978 if instanceinfo[inst].disk_template == diskless)
1979 disks = [(inst, disk)
1980 for inst in node_instances
1981 for disk in instanceinfo[inst].disks]
1984 # No need to collect data
1987 node_disks[nname] = disks
1989 # Creating copies as SetDiskID below will modify the objects and that can
1990 # lead to incorrect data returned from nodes
1991 devonly = [dev.Copy() for (_, dev) in disks]
1994 self.cfg.SetDiskID(dev, nname)
1996 node_disks_devonly[nname] = devonly
1998 assert len(node_disks) == len(node_disks_devonly)
2000 # Collect data from all nodes with disks
2001 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2004 assert len(result) == len(node_disks)
2008 for (nname, nres) in result.items():
2009 disks = node_disks[nname]
2012 # No data from this node
2013 data = len(disks) * [(False, "node offline")]
2016 _ErrorIf(msg, self.ENODERPC, nname,
2017 "while getting disk information: %s", msg)
2019 # No data from this node
2020 data = len(disks) * [(False, msg)]
2023 for idx, i in enumerate(nres.payload):
2024 if isinstance(i, (tuple, list)) and len(i) == 2:
2027 logging.warning("Invalid result from node %s, entry %d: %s",
2029 data.append((False, "Invalid result from the remote node"))
2031 for ((inst, _), status) in zip(disks, data):
2032 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2034 # Add empty entries for diskless instances.
2035 for inst in diskless_instances:
2036 assert inst not in instdisk
2039 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2040 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2041 compat.all(isinstance(s, (tuple, list)) and
2042 len(s) == 2 for s in statuses)
2043 for inst, nnames in instdisk.items()
2044 for nname, statuses in nnames.items())
2045 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2049 def _VerifyHVP(self, hvp_data):
2050 """Verifies locally the syntax of the hypervisor parameters.
2053 for item, hv_name, hv_params in hvp_data:
2054 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2057 hv_class = hypervisor.GetHypervisor(hv_name)
2058 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2059 hv_class.CheckParameterSyntax(hv_params)
2060 except errors.GenericError, err:
2061 self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2064 def BuildHooksEnv(self):
2067 Cluster-Verify hooks just ran in the post phase and their failure makes
2068 the output be logged in the verify output and the verification to fail.
2071 all_nodes = self.cfg.GetNodeList()
2073 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2075 for node in self.cfg.GetAllNodesInfo().values():
2076 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2078 return env, [], all_nodes
2080 def Exec(self, feedback_fn):
2081 """Verify integrity of cluster, performing various test on nodes.
2084 # This method has too many local variables. pylint: disable-msg=R0914
2086 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2087 verbose = self.op.verbose
2088 self._feedback_fn = feedback_fn
2089 feedback_fn("* Verifying global settings")
2090 for msg in self.cfg.VerifyConfig():
2091 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2093 # Check the cluster certificates
2094 for cert_filename in constants.ALL_CERT_FILES:
2095 (errcode, msg) = _VerifyCertificate(cert_filename)
2096 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2098 vg_name = self.cfg.GetVGName()
2099 drbd_helper = self.cfg.GetDRBDHelper()
2100 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2101 cluster = self.cfg.GetClusterInfo()
2102 nodelist = utils.NiceSort(self.cfg.GetNodeList())
2103 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2104 nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2105 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2106 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2107 for iname in instancelist)
2108 groupinfo = self.cfg.GetAllNodeGroupsInfo()
2109 i_non_redundant = [] # Non redundant instances
2110 i_non_a_balanced = [] # Non auto-balanced instances
2111 n_offline = 0 # Count of offline nodes
2112 n_drained = 0 # Count of nodes being drained
2113 node_vol_should = {}
2115 # FIXME: verify OS list
2116 # do local checksums
2117 master_files = [constants.CLUSTER_CONF_FILE]
2118 master_node = self.master_node = self.cfg.GetMasterNode()
2119 master_ip = self.cfg.GetMasterIP()
2121 file_names = ssconf.SimpleStore().GetFileList()
2122 file_names.extend(constants.ALL_CERT_FILES)
2123 file_names.extend(master_files)
2124 if cluster.modify_etc_hosts:
2125 file_names.append(constants.ETC_HOSTS)
2127 local_checksums = utils.FingerprintFiles(file_names)
2129 # Compute the set of hypervisor parameters
2131 for hv_name in hypervisors:
2132 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2133 for os_name, os_hvp in cluster.os_hvp.items():
2134 for hv_name, hv_params in os_hvp.items():
2137 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2138 hvp_data.append(("os %s" % os_name, hv_name, full_params))
2139 # TODO: collapse identical parameter values in a single one
2140 for instance in instanceinfo.values():
2141 if not instance.hvparams:
2143 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2144 cluster.FillHV(instance)))
2145 # and verify them locally
2146 self._VerifyHVP(hvp_data)
2148 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2149 node_verify_param = {
2150 constants.NV_FILELIST: file_names,
2151 constants.NV_NODELIST: [node.name for node in nodeinfo
2152 if not node.offline],
2153 constants.NV_HYPERVISOR: hypervisors,
2154 constants.NV_HVPARAMS: hvp_data,
2155 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2156 node.secondary_ip) for node in nodeinfo
2157 if not node.offline],
2158 constants.NV_INSTANCELIST: hypervisors,
2159 constants.NV_VERSION: None,
2160 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2161 constants.NV_NODESETUP: None,
2162 constants.NV_TIME: None,
2163 constants.NV_MASTERIP: (master_node, master_ip),
2164 constants.NV_OSLIST: None,
2165 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2168 if vg_name is not None:
2169 node_verify_param[constants.NV_VGLIST] = None
2170 node_verify_param[constants.NV_LVLIST] = vg_name
2171 node_verify_param[constants.NV_PVLIST] = [vg_name]
2172 node_verify_param[constants.NV_DRBDLIST] = None
2175 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2177 # Build our expected cluster state
2178 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2180 vm_capable=node.vm_capable))
2181 for node in nodeinfo)
2185 for node in nodeinfo:
2186 path = _SupportsOob(self.cfg, node)
2187 if path and path not in oob_paths:
2188 oob_paths.append(path)
2191 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2193 for instance in instancelist:
2194 inst_config = instanceinfo[instance]
2196 for nname in inst_config.all_nodes:
2197 if nname not in node_image:
2199 gnode = self.NodeImage(name=nname)
2201 node_image[nname] = gnode
2203 inst_config.MapLVsByNode(node_vol_should)
2205 pnode = inst_config.primary_node
2206 node_image[pnode].pinst.append(instance)
2208 for snode in inst_config.secondary_nodes:
2209 nimg = node_image[snode]
2210 nimg.sinst.append(instance)
2211 if pnode not in nimg.sbp:
2212 nimg.sbp[pnode] = []
2213 nimg.sbp[pnode].append(instance)
2215 # At this point, we have the in-memory data structures complete,
2216 # except for the runtime information, which we'll gather next
2218 # Due to the way our RPC system works, exact response times cannot be
2219 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2220 # time before and after executing the request, we can at least have a time
2222 nvinfo_starttime = time.time()
2223 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2224 self.cfg.GetClusterName())
2225 nvinfo_endtime = time.time()
2227 all_drbd_map = self.cfg.ComputeDRBDMap()
2229 feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2230 instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2232 feedback_fn("* Verifying node status")
2236 for node_i in nodeinfo:
2238 nimg = node_image[node]
2242 feedback_fn("* Skipping offline node %s" % (node,))
2246 if node == master_node:
2248 elif node_i.master_candidate:
2249 ntype = "master candidate"
2250 elif node_i.drained:
2256 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2258 msg = all_nvinfo[node].fail_msg
2259 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2261 nimg.rpc_fail = True
2264 nresult = all_nvinfo[node].payload
2266 nimg.call_ok = self._VerifyNode(node_i, nresult)
2267 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2268 self._VerifyNodeNetwork(node_i, nresult)
2269 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2272 self._VerifyOob(node_i, nresult)
2275 self._VerifyNodeLVM(node_i, nresult, vg_name)
2276 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2279 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2280 self._UpdateNodeInstances(node_i, nresult, nimg)
2281 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2282 self._UpdateNodeOS(node_i, nresult, nimg)
2283 if not nimg.os_fail:
2284 if refos_img is None:
2286 self._VerifyNodeOS(node_i, nimg, refos_img)
2288 feedback_fn("* Verifying instance status")
2289 for instance in instancelist:
2291 feedback_fn("* Verifying instance %s" % instance)
2292 inst_config = instanceinfo[instance]
2293 self._VerifyInstance(instance, inst_config, node_image,
2295 inst_nodes_offline = []
2297 pnode = inst_config.primary_node
2298 pnode_img = node_image[pnode]
2299 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2300 self.ENODERPC, pnode, "instance %s, connection to"
2301 " primary node failed", instance)
2303 _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2304 "instance lives on offline node %s", inst_config.primary_node)
2306 # If the instance is non-redundant we cannot survive losing its primary
2307 # node, so we are not N+1 compliant. On the other hand we have no disk
2308 # templates with more than one secondary so that situation is not well
2310 # FIXME: does not support file-backed instances
2311 if not inst_config.secondary_nodes:
2312 i_non_redundant.append(instance)
2314 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2315 instance, "instance has multiple secondary nodes: %s",
2316 utils.CommaJoin(inst_config.secondary_nodes),
2317 code=self.ETYPE_WARNING)
2319 if inst_config.disk_template in constants.DTS_NET_MIRROR:
2320 pnode = inst_config.primary_node
2321 instance_nodes = utils.NiceSort(inst_config.all_nodes)
2322 instance_groups = {}
2324 for node in instance_nodes:
2325 instance_groups.setdefault(nodeinfo_byname[node].group,
2329 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2330 # Sort so that we always list the primary node first.
2331 for group, nodes in sorted(instance_groups.items(),
2332 key=lambda (_, nodes): pnode in nodes,
2335 self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2336 instance, "instance has primary and secondary nodes in"
2337 " different groups: %s", utils.CommaJoin(pretty_list),
2338 code=self.ETYPE_WARNING)
2340 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2341 i_non_a_balanced.append(instance)
2343 for snode in inst_config.secondary_nodes:
2344 s_img = node_image[snode]
2345 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2346 "instance %s, connection to secondary node failed", instance)
2349 inst_nodes_offline.append(snode)
2351 # warn that the instance lives on offline nodes
2352 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2353 "instance has offline secondary node(s) %s",
2354 utils.CommaJoin(inst_nodes_offline))
2355 # ... or ghost/non-vm_capable nodes
2356 for node in inst_config.all_nodes:
2357 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2358 "instance lives on ghost node %s", node)
2359 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2360 instance, "instance lives on non-vm_capable node %s", node)
2362 feedback_fn("* Verifying orphan volumes")
2363 reserved = utils.FieldSet(*cluster.reserved_lvs)
2364 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2366 feedback_fn("* Verifying orphan instances")
2367 self._VerifyOrphanInstances(instancelist, node_image)
2369 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2370 feedback_fn("* Verifying N+1 Memory redundancy")
2371 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2373 feedback_fn("* Other Notes")
2375 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2376 % len(i_non_redundant))
2378 if i_non_a_balanced:
2379 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2380 % len(i_non_a_balanced))
2383 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2386 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2390 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2391 """Analyze the post-hooks' result
2393 This method analyses the hook result, handles it, and sends some
2394 nicely-formatted feedback back to the user.
2396 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2397 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2398 @param hooks_results: the results of the multi-node hooks rpc call
2399 @param feedback_fn: function used send feedback back to the caller
2400 @param lu_result: previous Exec result
2401 @return: the new Exec result, based on the previous result
2405 # We only really run POST phase hooks, and are only interested in
2407 if phase == constants.HOOKS_PHASE_POST:
2408 # Used to change hooks' output to proper indentation
2409 feedback_fn("* Hooks Results")
2410 assert hooks_results, "invalid result from hooks"
2412 for node_name in hooks_results:
2413 res = hooks_results[node_name]
2415 test = msg and not res.offline
2416 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2417 "Communication failure in hooks execution: %s", msg)
2418 if res.offline or msg:
2419 # No need to investigate payload if node is offline or gave an error.
2420 # override manually lu_result here as _ErrorIf only
2421 # overrides self.bad
2424 for script, hkr, output in res.payload:
2425 test = hkr == constants.HKR_FAIL
2426 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2427 "Script %s failed, output:", script)
2429 output = self._HOOKS_INDENT_RE.sub(' ', output)
2430 feedback_fn("%s" % output)
2436 class LUClusterVerifyDisks(NoHooksLU):
2437 """Verifies the cluster disks status.
2442 def ExpandNames(self):
2443 self.needed_locks = {
2444 locking.LEVEL_NODE: locking.ALL_SET,
2445 locking.LEVEL_INSTANCE: locking.ALL_SET,
2447 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2449 def Exec(self, feedback_fn):
2450 """Verify integrity of cluster disks.
2452 @rtype: tuple of three items
2453 @return: a tuple of (dict of node-to-node_error, list of instances
2454 which need activate-disks, dict of instance: (node, volume) for
2458 result = res_nodes, res_instances, res_missing = {}, [], {}
2460 nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2461 instances = self.cfg.GetAllInstancesInfo().values()
2464 for inst in instances:
2466 if not inst.admin_up:
2468 inst.MapLVsByNode(inst_lvs)
2469 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2470 for node, vol_list in inst_lvs.iteritems():
2471 for vol in vol_list:
2472 nv_dict[(node, vol)] = inst
2477 node_lvs = self.rpc.call_lv_list(nodes, [])
2478 for node, node_res in node_lvs.items():
2479 if node_res.offline:
2481 msg = node_res.fail_msg
2483 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2484 res_nodes[node] = msg
2487 lvs = node_res.payload
2488 for lv_name, (_, _, lv_online) in lvs.items():
2489 inst = nv_dict.pop((node, lv_name), None)
2490 if (not lv_online and inst is not None
2491 and inst.name not in res_instances):
2492 res_instances.append(inst.name)
2494 # any leftover items in nv_dict are missing LVs, let's arrange the
2496 for key, inst in nv_dict.iteritems():
2497 if inst.name not in res_missing:
2498 res_missing[inst.name] = []
2499 res_missing[inst.name].append(key)
2504 class LUClusterRepairDiskSizes(NoHooksLU):
2505 """Verifies the cluster disks sizes.
2510 def ExpandNames(self):
2511 if self.op.instances:
2512 self.wanted_names = []
2513 for name in self.op.instances:
2514 full_name = _ExpandInstanceName(self.cfg, name)
2515 self.wanted_names.append(full_name)
2516 self.needed_locks = {
2517 locking.LEVEL_NODE: [],
2518 locking.LEVEL_INSTANCE: self.wanted_names,
2520 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2522 self.wanted_names = None
2523 self.needed_locks = {
2524 locking.LEVEL_NODE: locking.ALL_SET,
2525 locking.LEVEL_INSTANCE: locking.ALL_SET,
2527 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2529 def DeclareLocks(self, level):
2530 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2531 self._LockInstancesNodes(primary_only=True)
2533 def CheckPrereq(self):
2534 """Check prerequisites.
2536 This only checks the optional instance list against the existing names.
2539 if self.wanted_names is None:
2540 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2542 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2543 in self.wanted_names]
2545 def _EnsureChildSizes(self, disk):
2546 """Ensure children of the disk have the needed disk size.
2548 This is valid mainly for DRBD8 and fixes an issue where the
2549 children have smaller disk size.
2551 @param disk: an L{ganeti.objects.Disk} object
2554 if disk.dev_type == constants.LD_DRBD8:
2555 assert disk.children, "Empty children for DRBD8?"
2556 fchild = disk.children[0]
2557 mismatch = fchild.size < disk.size
2559 self.LogInfo("Child disk has size %d, parent %d, fixing",
2560 fchild.size, disk.size)
2561 fchild.size = disk.size
2563 # and we recurse on this child only, not on the metadev
2564 return self._EnsureChildSizes(fchild) or mismatch
2568 def Exec(self, feedback_fn):
2569 """Verify the size of cluster disks.
2572 # TODO: check child disks too
2573 # TODO: check differences in size between primary/secondary nodes
2575 for instance in self.wanted_instances:
2576 pnode = instance.primary_node
2577 if pnode not in per_node_disks:
2578 per_node_disks[pnode] = []
2579 for idx, disk in enumerate(instance.disks):
2580 per_node_disks[pnode].append((instance, idx, disk))
2583 for node, dskl in per_node_disks.items():
2584 newl = [v[2].Copy() for v in dskl]
2586 self.cfg.SetDiskID(dsk, node)
2587 result = self.rpc.call_blockdev_getsize(node, newl)
2589 self.LogWarning("Failure in blockdev_getsize call to node"
2590 " %s, ignoring", node)
2592 if len(result.payload) != len(dskl):
2593 logging.warning("Invalid result from node %s: len(dksl)=%d,"
2594 " result.payload=%s", node, len(dskl), result.payload)
2595 self.LogWarning("Invalid result from node %s, ignoring node results",
2598 for ((instance, idx, disk), size) in zip(dskl, result.payload):
2600 self.LogWarning("Disk %d of instance %s did not return size"
2601 " information, ignoring", idx, instance.name)
2603 if not isinstance(size, (int, long)):
2604 self.LogWarning("Disk %d of instance %s did not return valid"
2605 " size information, ignoring", idx, instance.name)
2608 if size != disk.size:
2609 self.LogInfo("Disk %d of instance %s has mismatched size,"
2610 " correcting: recorded %d, actual %d", idx,
2611 instance.name, disk.size, size)
2613 self.cfg.Update(instance, feedback_fn)
2614 changed.append((instance.name, idx, size))
2615 if self._EnsureChildSizes(disk):
2616 self.cfg.Update(instance, feedback_fn)
2617 changed.append((instance.name, idx, disk.size))
2621 class LUClusterRename(LogicalUnit):
2622 """Rename the cluster.
2625 HPATH = "cluster-rename"
2626 HTYPE = constants.HTYPE_CLUSTER
2628 def BuildHooksEnv(self):
2633 "OP_TARGET": self.cfg.GetClusterName(),
2634 "NEW_NAME": self.op.name,
2636 mn = self.cfg.GetMasterNode()
2637 all_nodes = self.cfg.GetNodeList()
2638 return env, [mn], all_nodes
2640 def CheckPrereq(self):
2641 """Verify that the passed name is a valid one.
2644 hostname = netutils.GetHostname(name=self.op.name,
2645 family=self.cfg.GetPrimaryIPFamily())
2647 new_name = hostname.name
2648 self.ip = new_ip = hostname.ip
2649 old_name = self.cfg.GetClusterName()
2650 old_ip = self.cfg.GetMasterIP()
2651 if new_name == old_name and new_ip == old_ip:
2652 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2653 " cluster has changed",
2655 if new_ip != old_ip:
2656 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2657 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2658 " reachable on the network" %
2659 new_ip, errors.ECODE_NOTUNIQUE)
2661 self.op.name = new_name
2663 def Exec(self, feedback_fn):
2664 """Rename the cluster.
2667 clustername = self.op.name
2670 # shutdown the master IP
2671 master = self.cfg.GetMasterNode()
2672 result = self.rpc.call_node_stop_master(master, False)
2673 result.Raise("Could not disable the master role")
2676 cluster = self.cfg.GetClusterInfo()
2677 cluster.cluster_name = clustername
2678 cluster.master_ip = ip
2679 self.cfg.Update(cluster, feedback_fn)
2681 # update the known hosts file
2682 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2683 node_list = self.cfg.GetOnlineNodeList()
2685 node_list.remove(master)
2688 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2690 result = self.rpc.call_node_start_master(master, False, False)
2691 msg = result.fail_msg
2693 self.LogWarning("Could not re-enable the master role on"
2694 " the master, please restart manually: %s", msg)
2699 class LUClusterSetParams(LogicalUnit):
2700 """Change the parameters of the cluster.
2703 HPATH = "cluster-modify"
2704 HTYPE = constants.HTYPE_CLUSTER
2707 def CheckArguments(self):
2711 if self.op.uid_pool:
2712 uidpool.CheckUidPool(self.op.uid_pool)
2714 if self.op.add_uids:
2715 uidpool.CheckUidPool(self.op.add_uids)
2717 if self.op.remove_uids:
2718 uidpool.CheckUidPool(self.op.remove_uids)
2720 def ExpandNames(self):
2721 # FIXME: in the future maybe other cluster params won't require checking on
2722 # all nodes to be modified.
2723 self.needed_locks = {
2724 locking.LEVEL_NODE: locking.ALL_SET,
2726 self.share_locks[locking.LEVEL_NODE] = 1
2728 def BuildHooksEnv(self):
2733 "OP_TARGET": self.cfg.GetClusterName(),
2734 "NEW_VG_NAME": self.op.vg_name,
2736 mn = self.cfg.GetMasterNode()
2737 return env, [mn], [mn]
2739 def CheckPrereq(self):
2740 """Check prerequisites.
2742 This checks whether the given params don't conflict and
2743 if the given volume group is valid.
2746 if self.op.vg_name is not None and not self.op.vg_name:
2747 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2748 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2749 " instances exist", errors.ECODE_INVAL)
2751 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2752 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2753 raise errors.OpPrereqError("Cannot disable drbd helper while"
2754 " drbd-based instances exist",
2757 node_list = self.acquired_locks[locking.LEVEL_NODE]
2759 # if vg_name not None, checks given volume group on all nodes
2761 vglist = self.rpc.call_vg_list(node_list)
2762 for node in node_list:
2763 msg = vglist[node].fail_msg
2765 # ignoring down node
2766 self.LogWarning("Error while gathering data on node %s"
2767 " (ignoring node): %s", node, msg)
2769 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2771 constants.MIN_VG_SIZE)
2773 raise errors.OpPrereqError("Error on node '%s': %s" %
2774 (node, vgstatus), errors.ECODE_ENVIRON)
2776 if self.op.drbd_helper:
2777 # checks given drbd helper on all nodes
2778 helpers = self.rpc.call_drbd_helper(node_list)
2779 for node in node_list:
2780 ninfo = self.cfg.GetNodeInfo(node)
2782 self.LogInfo("Not checking drbd helper on offline node %s", node)
2784 msg = helpers[node].fail_msg
2786 raise errors.OpPrereqError("Error checking drbd helper on node"
2787 " '%s': %s" % (node, msg),
2788 errors.ECODE_ENVIRON)
2789 node_helper = helpers[node].payload
2790 if node_helper != self.op.drbd_helper:
2791 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2792 (node, node_helper), errors.ECODE_ENVIRON)
2794 self.cluster = cluster = self.cfg.GetClusterInfo()
2795 # validate params changes
2796 if self.op.beparams:
2797 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2798 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2800 if self.op.ndparams:
2801 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2802 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2804 if self.op.nicparams:
2805 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2806 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2807 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2810 # check all instances for consistency
2811 for instance in self.cfg.GetAllInstancesInfo().values():
2812 for nic_idx, nic in enumerate(instance.nics):
2813 params_copy = copy.deepcopy(nic.nicparams)
2814 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2816 # check parameter syntax
2818 objects.NIC.CheckParameterSyntax(params_filled)
2819 except errors.ConfigurationError, err:
2820 nic_errors.append("Instance %s, nic/%d: %s" %
2821 (instance.name, nic_idx, err))
2823 # if we're moving instances to routed, check that they have an ip
2824 target_mode = params_filled[constants.NIC_MODE]
2825 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2826 nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2827 (instance.name, nic_idx))
2829 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2830 "\n".join(nic_errors))
2832 # hypervisor list/parameters
2833 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2834 if self.op.hvparams:
2835 for hv_name, hv_dict in self.op.hvparams.items():
2836 if hv_name not in self.new_hvparams:
2837 self.new_hvparams[hv_name] = hv_dict
2839 self.new_hvparams[hv_name].update(hv_dict)
2841 # os hypervisor parameters
2842 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2844 for os_name, hvs in self.op.os_hvp.items():
2845 if os_name not in self.new_os_hvp:
2846 self.new_os_hvp[os_name] = hvs
2848 for hv_name, hv_dict in hvs.items():
2849 if hv_name not in self.new_os_hvp[os_name]:
2850 self.new_os_hvp[os_name][hv_name] = hv_dict
2852 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2855 self.new_osp = objects.FillDict(cluster.osparams, {})
2856 if self.op.osparams:
2857 for os_name, osp in self.op.osparams.items():
2858 if os_name not in self.new_osp:
2859 self.new_osp[os_name] = {}
2861 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2864 if not self.new_osp[os_name]:
2865 # we removed all parameters
2866 del self.new_osp[os_name]
2868 # check the parameter validity (remote check)
2869 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2870 os_name, self.new_osp[os_name])
2872 # changes to the hypervisor list
2873 if self.op.enabled_hypervisors is not None:
2874 self.hv_list = self.op.enabled_hypervisors
2875 for hv in self.hv_list:
2876 # if the hypervisor doesn't already exist in the cluster
2877 # hvparams, we initialize it to empty, and then (in both
2878 # cases) we make sure to fill the defaults, as we might not
2879 # have a complete defaults list if the hypervisor wasn't
2881 if hv not in new_hvp:
2883 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2884 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2886 self.hv_list = cluster.enabled_hypervisors
2888 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2889 # either the enabled list has changed, or the parameters have, validate
2890 for hv_name, hv_params in self.new_hvparams.items():
2891 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2892 (self.op.enabled_hypervisors and
2893 hv_name in self.op.enabled_hypervisors)):
2894 # either this is a new hypervisor, or its parameters have changed
2895 hv_class = hypervisor.GetHypervisor(hv_name)
2896 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2897 hv_class.CheckParameterSyntax(hv_params)
2898 _CheckHVParams(self, node_list, hv_name, hv_params)
2901 # no need to check any newly-enabled hypervisors, since the
2902 # defaults have already been checked in the above code-block
2903 for os_name, os_hvp in self.new_os_hvp.items():
2904 for hv_name, hv_params in os_hvp.items():
2905 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2906 # we need to fill in the new os_hvp on top of the actual hv_p
2907 cluster_defaults = self.new_hvparams.get(hv_name, {})
2908 new_osp = objects.FillDict(cluster_defaults, hv_params)
2909 hv_class = hypervisor.GetHypervisor(hv_name)
2910 hv_class.CheckParameterSyntax(new_osp)
2911 _CheckHVParams(self, node_list, hv_name, new_osp)
2913 if self.op.default_iallocator:
2914 alloc_script = utils.FindFile(self.op.default_iallocator,
2915 constants.IALLOCATOR_SEARCH_PATH,
2917 if alloc_script is None:
2918 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2919 " specified" % self.op.default_iallocator,
2922 def Exec(self, feedback_fn):
2923 """Change the parameters of the cluster.
2926 if self.op.vg_name is not None:
2927 new_volume = self.op.vg_name
2930 if new_volume != self.cfg.GetVGName():
2931 self.cfg.SetVGName(new_volume)
2933 feedback_fn("Cluster LVM configuration already in desired"
2934 " state, not changing")
2935 if self.op.drbd_helper is not None:
2936 new_helper = self.op.drbd_helper
2939 if new_helper != self.cfg.GetDRBDHelper():
2940 self.cfg.SetDRBDHelper(new_helper)
2942 feedback_fn("Cluster DRBD helper already in desired state,"
2944 if self.op.hvparams:
2945 self.cluster.hvparams = self.new_hvparams
2947 self.cluster.os_hvp = self.new_os_hvp
2948 if self.op.enabled_hypervisors is not None:
2949 self.cluster.hvparams = self.new_hvparams
2950 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2951 if self.op.beparams:
2952 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2953 if self.op.nicparams:
2954 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2955 if self.op.osparams:
2956 self.cluster.osparams = self.new_osp
2957 if self.op.ndparams:
2958 self.cluster.ndparams = self.new_ndparams
2960 if self.op.candidate_pool_size is not None:
2961 self.cluster.candidate_pool_size = self.op.candidate_pool_size
2962 # we need to update the pool size here, otherwise the save will fail
2963 _AdjustCandidatePool(self, [])
2965 if self.op.maintain_node_health is not None:
2966 self.cluster.maintain_node_health = self.op.maintain_node_health
2968 if self.op.prealloc_wipe_disks is not None:
2969 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2971 if self.op.add_uids is not None:
2972 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2974 if self.op.remove_uids is not None:
2975 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2977 if self.op.uid_pool is not None:
2978 self.cluster.uid_pool = self.op.uid_pool
2980 if self.op.default_iallocator is not None:
2981 self.cluster.default_iallocator = self.op.default_iallocator
2983 if self.op.reserved_lvs is not None:
2984 self.cluster.reserved_lvs = self.op.reserved_lvs
2986 def helper_os(aname, mods, desc):
2988 lst = getattr(self.cluster, aname)
2989 for key, val in mods:
2990 if key == constants.DDM_ADD:
2992 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
2995 elif key == constants.DDM_REMOVE:
2999 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3001 raise errors.ProgrammerError("Invalid modification '%s'" % key)
3003 if self.op.hidden_os:
3004 helper_os("hidden_os", self.op.hidden_os, "hidden")
3006 if self.op.blacklisted_os:
3007 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3009 if self.op.master_netdev:
3010 master = self.cfg.GetMasterNode()
3011 feedback_fn("Shutting down master ip on the current netdev (%s)" %
3012 self.cluster.master_netdev)
3013 result = self.rpc.call_node_stop_master(master, False)
3014 result.Raise("Could not disable the master ip")
3015 feedback_fn("Changing master_netdev from %s to %s" %
3016 (self.cluster.master_netdev, self.op.master_netdev))
3017 self.cluster.master_netdev = self.op.master_netdev
3019 self.cfg.Update(self.cluster, feedback_fn)
3021 if self.op.master_netdev:
3022 feedback_fn("Starting the master ip on the new master netdev (%s)" %
3023 self.op.master_netdev)
3024 result = self.rpc.call_node_start_master(master, False, False)
3026 self.LogWarning("Could not re-enable the master ip on"
3027 " the master, please restart manually: %s",
3031 def _UploadHelper(lu, nodes, fname):
3032 """Helper for uploading a file and showing warnings.
3035 if os.path.exists(fname):
3036 result = lu.rpc.call_upload_file(nodes, fname)
3037 for to_node, to_result in result.items():
3038 msg = to_result.fail_msg
3040 msg = ("Copy of file %s to node %s failed: %s" %
3041 (fname, to_node, msg))
3042 lu.proc.LogWarning(msg)
3045 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3046 """Distribute additional files which are part of the cluster configuration.
3048 ConfigWriter takes care of distributing the config and ssconf files, but
3049 there are more files which should be distributed to all nodes. This function
3050 makes sure those are copied.
3052 @param lu: calling logical unit
3053 @param additional_nodes: list of nodes not in the config to distribute to
3054 @type additional_vm: boolean
3055 @param additional_vm: whether the additional nodes are vm-capable or not
3058 # 1. Gather target nodes
3059 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3060 dist_nodes = lu.cfg.GetOnlineNodeList()
3061 nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3062 vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3063 if additional_nodes is not None:
3064 dist_nodes.extend(additional_nodes)
3066 vm_nodes.extend(additional_nodes)
3067 if myself.name in dist_nodes:
3068 dist_nodes.remove(myself.name)
3069 if myself.name in vm_nodes:
3070 vm_nodes.remove(myself.name)
3072 # 2. Gather files to distribute
3073 dist_files = set([constants.ETC_HOSTS,
3074 constants.SSH_KNOWN_HOSTS_FILE,
3075 constants.RAPI_CERT_FILE,
3076 constants.RAPI_USERS_FILE,
3077 constants.CONFD_HMAC_KEY,
3078 constants.CLUSTER_DOMAIN_SECRET_FILE,
3082 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3083 for hv_name in enabled_hypervisors:
3084 hv_class = hypervisor.GetHypervisor(hv_name)
3085 vm_files.update(hv_class.GetAncillaryFiles())
3087 # 3. Perform the files upload
3088 for fname in dist_files:
3089 _UploadHelper(lu, dist_nodes, fname)
3090 for fname in vm_files:
3091 _UploadHelper(lu, vm_nodes, fname)
3094 class LUClusterRedistConf(NoHooksLU):
3095 """Force the redistribution of cluster configuration.
3097 This is a very simple LU.
3102 def ExpandNames(self):
3103 self.needed_locks = {
3104 locking.LEVEL_NODE: locking.ALL_SET,
3106 self.share_locks[locking.LEVEL_NODE] = 1
3108 def Exec(self, feedback_fn):
3109 """Redistribute the configuration.
3112 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3113 _RedistributeAncillaryFiles(self)
3116 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3117 """Sleep and poll for an instance's disk to sync.
3120 if not instance.disks or disks is not None and not disks:
3123 disks = _ExpandCheckDisks(instance, disks)
3126 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3128 node = instance.primary_node
3131 lu.cfg.SetDiskID(dev, node)
3133 # TODO: Convert to utils.Retry
3136 degr_retries = 10 # in seconds, as we sleep 1 second each time
3140 cumul_degraded = False
3141 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3142 msg = rstats.fail_msg
3144 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3147 raise errors.RemoteError("Can't contact node %s for mirror data,"
3148 " aborting." % node)
3151 rstats = rstats.payload
3153 for i, mstat in enumerate(rstats):
3155 lu.LogWarning("Can't compute data for node %s/%s",
3156 node, disks[i].iv_name)
3159 cumul_degraded = (cumul_degraded or
3160 (mstat.is_degraded and mstat.sync_percent is None))
3161 if mstat.sync_percent is not None:
3163 if mstat.estimated_time is not None:
3164 rem_time = ("%s remaining (estimated)" %
3165 utils.FormatSeconds(mstat.estimated_time))
3166 max_time = mstat.estimated_time
3168 rem_time = "no time estimate"
3169 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3170 (disks[i].iv_name, mstat.sync_percent, rem_time))
3172 # if we're done but degraded, let's do a few small retries, to
3173 # make sure we see a stable and not transient situation; therefore
3174 # we force restart of the loop
3175 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3176 logging.info("Degraded disks found, %d retries left", degr_retries)
3184 time.sleep(min(60, max_time))
3187 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3188 return not cumul_degraded
3191 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3192 """Check that mirrors are not degraded.
3194 The ldisk parameter, if True, will change the test from the
3195 is_degraded attribute (which represents overall non-ok status for
3196 the device(s)) to the ldisk (representing the local storage status).
3199 lu.cfg.SetDiskID(dev, node)
3203 if on_primary or dev.AssembleOnSecondary():
3204 rstats = lu.rpc.call_blockdev_find(node, dev)
3205 msg = rstats.fail_msg
3207 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3209 elif not rstats.payload:
3210 lu.LogWarning("Can't find disk on node %s", node)
3214 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3216 result = result and not rstats.payload.is_degraded
3219 for child in dev.children:
3220 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3225 class LUOobCommand(NoHooksLU):
3226 """Logical unit for OOB handling.
3231 def CheckPrereq(self):
3232 """Check prerequisites.
3235 - the node exists in the configuration
3238 Any errors are signaled by raising errors.OpPrereqError.
3242 for node_name in self.op.node_names:
3243 node = self.cfg.GetNodeInfo(node_name)
3246 raise errors.OpPrereqError("Node %s not found" % node_name,
3249 self.nodes.append(node)
3251 if (self.op.command == constants.OOB_POWER_OFF and not node.offline):
3252 raise errors.OpPrereqError(("Cannot power off node %s because it is"
3253 " not marked offline") % node_name,
3256 def ExpandNames(self):
3257 """Gather locks we need.
3260 if self.op.node_names:
3261 self.op.node_names = [_ExpandNodeName(self.cfg, name)
3262 for name in self.op.node_names]
3264 self.op.node_names = self.cfg.GetNodeList()
3266 self.needed_locks = {
3267 locking.LEVEL_NODE: self.op.node_names,
3270 def Exec(self, feedback_fn):
3271 """Execute OOB and return result if we expect any.
3274 master_node = self.cfg.GetMasterNode()
3277 for node in self.nodes:
3278 node_entry = [(constants.RS_NORMAL, node.name)]
3279 ret.append(node_entry)
3281 oob_program = _SupportsOob(self.cfg, node)
3284 node_entry.append((constants.RS_UNAVAIL, None))
3287 logging.info("Executing out-of-band command '%s' using '%s' on %s",
3288 self.op.command, oob_program, node.name)
3289 result = self.rpc.call_run_oob(master_node, oob_program,
3290 self.op.command, node.name,
3294 self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3295 node.name, result.fail_msg)
3296 node_entry.append((constants.RS_NODATA, None))
3299 self._CheckPayload(result)
3300 except errors.OpExecError, err:
3301 self.LogWarning("The payload returned by '%s' is not valid: %s",
3303 node_entry.append((constants.RS_NODATA, None))
3305 if self.op.command == constants.OOB_HEALTH:
3306 # For health we should log important events
3307 for item, status in result.payload:
3308 if status in [constants.OOB_STATUS_WARNING,
3309 constants.OOB_STATUS_CRITICAL]:
3310 self.LogWarning("On node '%s' item '%s' has status '%s'",
3311 node.name, item, status)
3313 if self.op.command == constants.OOB_POWER_ON:
3315 elif self.op.command == constants.OOB_POWER_OFF:
3316 node.powered = False
3317 elif self.op.command == constants.OOB_POWER_STATUS:
3318 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3319 if powered != node.powered:
3320 logging.warning(("Recorded power state (%s) of node '%s' does not"
3321 " match actual power state (%s)"), node.powered,
3324 # For configuration changing commands we should update the node
3325 if self.op.command in (constants.OOB_POWER_ON,
3326 constants.OOB_POWER_OFF):
3327 self.cfg.Update(node, feedback_fn)
3329 node_entry.append((constants.RS_NORMAL, result.payload))
3333 def _CheckPayload(self, result):
3334 """Checks if the payload is valid.
3336 @param result: RPC result
3337 @raises errors.OpExecError: If payload is not valid
3341 if self.op.command == constants.OOB_HEALTH:
3342 if not isinstance(result.payload, list):
3343 errs.append("command 'health' is expected to return a list but got %s" %
3344 type(result.payload))
3346 for item, status in result.payload:
3347 if status not in constants.OOB_STATUSES:
3348 errs.append("health item '%s' has invalid status '%s'" %
3351 if self.op.command == constants.OOB_POWER_STATUS:
3352 if not isinstance(result.payload, dict):
3353 errs.append("power-status is expected to return a dict but got %s" %
3354 type(result.payload))
3356 if self.op.command in [
3357 constants.OOB_POWER_ON,
3358 constants.OOB_POWER_OFF,
3359 constants.OOB_POWER_CYCLE,
3361 if result.payload is not None:
3362 errs.append("%s is expected to not return payload but got '%s'" %
3363 (self.op.command, result.payload))
3366 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3367 utils.CommaJoin(errs))
3371 class LUOsDiagnose(NoHooksLU):
3372 """Logical unit for OS diagnose/query.
3377 _BLK = "blacklisted"
3379 _FIELDS_STATIC = utils.FieldSet()
3380 _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3381 "parameters", "api_versions", _HID, _BLK)
3383 def CheckArguments(self):
3385 raise errors.OpPrereqError("Selective OS query not supported",
3388 _CheckOutputFields(static=self._FIELDS_STATIC,
3389 dynamic=self._FIELDS_DYNAMIC,
3390 selected=self.op.output_fields)
3392 def ExpandNames(self):
3393 # Lock all nodes, in shared mode
3394 # Temporary removal of locks, should be reverted later
3395 # TODO: reintroduce locks when they are lighter-weight
3396 self.needed_locks = {}
3397 #self.share_locks[locking.LEVEL_NODE] = 1
3398 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3401 def _DiagnoseByOS(rlist):
3402 """Remaps a per-node return list into an a per-os per-node dictionary
3404 @param rlist: a map with node names as keys and OS objects as values
3407 @return: a dictionary with osnames as keys and as value another
3408 map, with nodes as keys and tuples of (path, status, diagnose,
3409 variants, parameters, api_versions) as values, eg::
3411 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3412 (/srv/..., False, "invalid api")],
3413 "node2": [(/srv/..., True, "", [], [])]}
3418 # we build here the list of nodes that didn't fail the RPC (at RPC
3419 # level), so that nodes with a non-responding node daemon don't
3420 # make all OSes invalid
3421 good_nodes = [node_name for node_name in rlist
3422 if not rlist[node_name].fail_msg]
3423 for node_name, nr in rlist.items():
3424 if nr.fail_msg or not nr.payload:
3426 for (name, path, status, diagnose, variants,
3427 params, api_versions) in nr.payload:
3428 if name not in all_os:
3429 # build a list of nodes for this os containing empty lists
3430 # for each node in node_list
3432 for nname in good_nodes:
3433 all_os[name][nname] = []
3434 # convert params from [name, help] to (name, help)
3435 params = [tuple(v) for v in params]
3436 all_os[name][node_name].append((path, status, diagnose,
3437 variants, params, api_versions))
3440 def Exec(self, feedback_fn):
3441 """Compute the list of OSes.
3444 valid_nodes = [node.name
3445 for node in self.cfg.GetAllNodesInfo().values()
3446 if not node.offline and node.vm_capable]
3447 node_data = self.rpc.call_os_diagnose(valid_nodes)
3448 pol = self._DiagnoseByOS(node_data)
3450 cluster = self.cfg.GetClusterInfo()
3452 for os_name in utils.NiceSort(pol.keys()):
3453 os_data = pol[os_name]
3456 (variants, params, api_versions) = null_state = (set(), set(), set())
3457 for idx, osl in enumerate(os_data.values()):
3458 valid = bool(valid and osl and osl[0][1])
3460 (variants, params, api_versions) = null_state
3462 node_variants, node_params, node_api = osl[0][3:6]
3463 if idx == 0: # first entry
3464 variants = set(node_variants)
3465 params = set(node_params)
3466 api_versions = set(node_api)
3467 else: # keep consistency
3468 variants.intersection_update(node_variants)
3469 params.intersection_update(node_params)
3470 api_versions.intersection_update(node_api)
3472 is_hid = os_name in cluster.hidden_os
3473 is_blk = os_name in cluster.blacklisted_os
3474 if ((self._HID not in self.op.output_fields and is_hid) or
3475 (self._BLK not in self.op.output_fields and is_blk) or
3476 (self._VLD not in self.op.output_fields and not valid)):
3479 for field in self.op.output_fields:
3482 elif field == self._VLD:
3484 elif field == "node_status":
3485 # this is just a copy of the dict
3487 for node_name, nos_list in os_data.items():
3488 val[node_name] = nos_list
3489 elif field == "variants":
3490 val = utils.NiceSort(list(variants))
3491 elif field == "parameters":
3493 elif field == "api_versions":
3494 val = list(api_versions)
3495 elif field == self._HID:
3497 elif field == self._BLK:
3500 raise errors.ParameterError(field)
3507 class LUNodeRemove(LogicalUnit):
3508 """Logical unit for removing a node.
3511 HPATH = "node-remove"
3512 HTYPE = constants.HTYPE_NODE
3514 def BuildHooksEnv(self):
3517 This doesn't run on the target node in the pre phase as a failed
3518 node would then be impossible to remove.
3522 "OP_TARGET": self.op.node_name,
3523 "NODE_NAME": self.op.node_name,
3525 all_nodes = self.cfg.GetNodeList()
3527 all_nodes.remove(self.op.node_name)
3529 logging.warning("Node %s which is about to be removed not found"
3530 " in the all nodes list", self.op.node_name)
3531 return env, all_nodes, all_nodes
3533 def CheckPrereq(self):
3534 """Check prerequisites.
3537 - the node exists in the configuration
3538 - it does not have primary or secondary instances
3539 - it's not the master
3541 Any errors are signaled by raising errors.OpPrereqError.
3544 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3545 node = self.cfg.GetNodeInfo(self.op.node_name)
3546 assert node is not None
3548 instance_list = self.cfg.GetInstanceList()
3550 masternode = self.cfg.GetMasterNode()
3551 if node.name == masternode:
3552 raise errors.OpPrereqError("Node is the master node,"
3553 " you need to failover first.",
3556 for instance_name in instance_list:
3557 instance = self.cfg.GetInstanceInfo(instance_name)
3558 if node.name in instance.all_nodes:
3559 raise errors.OpPrereqError("Instance %s is still running on the node,"
3560 " please remove first." % instance_name,
3562 self.op.node_name = node.name
3565 def Exec(self, feedback_fn):
3566 """Removes the node from the cluster.
3570 logging.info("Stopping the node daemon and removing configs from node %s",
3573 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3575 # Promote nodes to master candidate as needed
3576 _AdjustCandidatePool(self, exceptions=[node.name])
3577 self.context.RemoveNode(node.name)
3579 # Run post hooks on the node before it's removed
3580 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3582 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3584 # pylint: disable-msg=W0702
3585 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3587 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3588 msg = result.fail_msg
3590 self.LogWarning("Errors encountered on the remote node while leaving"
3591 " the cluster: %s", msg)
3593 # Remove node from our /etc/hosts
3594 if self.cfg.GetClusterInfo().modify_etc_hosts:
3595 master_node = self.cfg.GetMasterNode()
3596 result = self.rpc.call_etc_hosts_modify(master_node,
3597 constants.ETC_HOSTS_REMOVE,
3599 result.Raise("Can't update hosts file with new host data")
3600 _RedistributeAncillaryFiles(self)
3603 class _NodeQuery(_QueryBase):
3604 FIELDS = query.NODE_FIELDS
3606 def ExpandNames(self, lu):
3607 lu.needed_locks = {}
3608 lu.share_locks[locking.LEVEL_NODE] = 1
3611 self.wanted = _GetWantedNodes(lu, self.names)
3613 self.wanted = locking.ALL_SET
3615 self.do_locking = (self.use_locking and
3616 query.NQ_LIVE in self.requested_data)
3619 # if we don't request only static fields, we need to lock the nodes
3620 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3622 def DeclareLocks(self, lu, level):
3625 def _GetQueryData(self, lu):
3626 """Computes the list of nodes and their attributes.
3629 all_info = lu.cfg.GetAllNodesInfo()
3631 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3633 # Gather data as requested
3634 if query.NQ_LIVE in self.requested_data:
3635 # filter out non-vm_capable nodes
3636 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3638 node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3639 lu.cfg.GetHypervisorType())
3640 live_data = dict((name, nresult.payload)
3641 for (name, nresult) in node_data.items()
3642 if not nresult.fail_msg and nresult.payload)
3646 if query.NQ_INST in self.requested_data:
3647 node_to_primary = dict([(name, set()) for name in nodenames])
3648 node_to_secondary = dict([(name, set()) for name in nodenames])
3650 inst_data = lu.cfg.GetAllInstancesInfo()
3652 for inst in inst_data.values():
3653 if inst.primary_node in node_to_primary:
3654 node_to_primary[inst.primary_node].add(inst.name)
3655 for secnode in inst.secondary_nodes:
3656 if secnode in node_to_secondary:
3657 node_to_secondary[secnode].add(inst.name)
3659 node_to_primary = None
3660 node_to_secondary = None
3662 if query.NQ_OOB in self.requested_data:
3663 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3664 for name, node in all_info.iteritems())
3668 if query.NQ_GROUP in self.requested_data:
3669 groups = lu.cfg.GetAllNodeGroupsInfo()
3673 return query.NodeQueryData([all_info[name] for name in nodenames],
3674 live_data, lu.cfg.GetMasterNode(),
3675 node_to_primary, node_to_secondary, groups,
3676 oob_support, lu.cfg.GetClusterInfo())
3679 class LUNodeQuery(NoHooksLU):
3680 """Logical unit for querying nodes.
3683 # pylint: disable-msg=W0142
3686 def CheckArguments(self):
3687 self.nq = _NodeQuery(self.op.names, self.op.output_fields,
3688 self.op.use_locking)
3690 def ExpandNames(self):
3691 self.nq.ExpandNames(self)
3693 def Exec(self, feedback_fn):
3694 return self.nq.OldStyleQuery(self)
3697 class LUNodeQueryvols(NoHooksLU):
3698 """Logical unit for getting volumes on node(s).
3702 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3703 _FIELDS_STATIC = utils.FieldSet("node")
3705 def CheckArguments(self):
3706 _CheckOutputFields(static=self._FIELDS_STATIC,
3707 dynamic=self._FIELDS_DYNAMIC,
3708 selected=self.op.output_fields)
3710 def ExpandNames(self):
3711 self.needed_locks = {}
3712 self.share_locks[locking.LEVEL_NODE] = 1
3713 if not self.op.nodes:
3714 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3716 self.needed_locks[locking.LEVEL_NODE] = \
3717 _GetWantedNodes(self, self.op.nodes)
3719 def Exec(self, feedback_fn):
3720 """Computes the list of nodes and their attributes.
3723 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3724 volumes = self.rpc.call_node_volumes(nodenames)
3726 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3727 in self.cfg.GetInstanceList()]
3729 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3732 for node in nodenames:
3733 nresult = volumes[node]
3736 msg = nresult.fail_msg
3738 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3741 node_vols = nresult.payload[:]
3742 node_vols.sort(key=lambda vol: vol['dev'])
3744 for vol in node_vols:
3746 for field in self.op.output_fields:
3749 elif field == "phys":
3753 elif field == "name":
3755 elif field == "size":
3756 val = int(float(vol['size']))
3757 elif field == "instance":
3759 if node not in lv_by_node[inst]:
3761 if vol['name'] in lv_by_node[inst][node]:
3767 raise errors.ParameterError(field)
3768 node_output.append(str(val))
3770 output.append(node_output)
3775 class LUNodeQueryStorage(NoHooksLU):
3776 """Logical unit for getting information on storage units on node(s).
3779 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3782 def CheckArguments(self):
3783 _CheckOutputFields(static=self._FIELDS_STATIC,
3784 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3785 selected=self.op.output_fields)
3787 def ExpandNames(self):
3788 self.needed_locks = {}
3789 self.share_locks[locking.LEVEL_NODE] = 1
3792 self.needed_locks[locking.LEVEL_NODE] = \
3793 _GetWantedNodes(self, self.op.nodes)
3795 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3797 def Exec(self, feedback_fn):
3798 """Computes the list of nodes and their attributes.
3801 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3803 # Always get name to sort by
3804 if constants.SF_NAME in self.op.output_fields:
3805 fields = self.op.output_fields[:]
3807 fields = [constants.SF_NAME] + self.op.output_fields
3809 # Never ask for node or type as it's only known to the LU
3810 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3811 while extra in fields:
3812 fields.remove(extra)
3814 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3815 name_idx = field_idx[constants.SF_NAME]
3817 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3818 data = self.rpc.call_storage_list(self.nodes,
3819 self.op.storage_type, st_args,
3820 self.op.name, fields)
3824 for node in utils.NiceSort(self.nodes):
3825 nresult = data[node]
3829 msg = nresult.fail_msg
3831 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3834 rows = dict([(row[name_idx], row) for row in nresult.payload])
3836 for name in utils.NiceSort(rows.keys()):
3841 for field in self.op.output_fields:
3842 if field == constants.SF_NODE:
3844 elif field == constants.SF_TYPE:
3845 val = self.op.storage_type
3846 elif field in field_idx:
3847 val = row[field_idx[field]]
3849 raise errors.ParameterError(field)
3858 class _InstanceQuery(_QueryBase):
3859 FIELDS = query.INSTANCE_FIELDS
3861 def ExpandNames(self, lu):
3862 lu.needed_locks = {}
3863 lu.share_locks[locking.LEVEL_INSTANCE] = 1
3864 lu.share_locks[locking.LEVEL_NODE] = 1
3867 self.wanted = _GetWantedInstances(lu, self.names)
3869 self.wanted = locking.ALL_SET
3871 self.do_locking = (self.use_locking and
3872 query.IQ_LIVE in self.requested_data)
3874 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3875 lu.needed_locks[locking.LEVEL_NODE] = []
3876 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3878 def DeclareLocks(self, lu, level):
3879 if level == locking.LEVEL_NODE and self.do_locking:
3880 lu._LockInstancesNodes() # pylint: disable-msg=W0212
3882 def _GetQueryData(self, lu):
3883 """Computes the list of instances and their attributes.
3886 cluster = lu.cfg.GetClusterInfo()
3887 all_info = lu.cfg.GetAllInstancesInfo()
3889 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3891 instance_list = [all_info[name] for name in instance_names]
3892 nodes = frozenset(itertools.chain(*(inst.all_nodes
3893 for inst in instance_list)))
3894 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3897 wrongnode_inst = set()
3899 # Gather data as requested
3900 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3902 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3904 result = node_data[name]
3906 # offline nodes will be in both lists
3907 assert result.fail_msg
3908 offline_nodes.append(name)
3910 bad_nodes.append(name)
3911 elif result.payload:
3912 for inst in result.payload:
3913 if inst in all_info:
3914 if all_info[inst].primary_node == name:
3915 live_data.update(result.payload)
3917 wrongnode_inst.add(inst)
3919 # orphan instance; we don't list it here as we don't
3920 # handle this case yet in the output of instance listing
3921 logging.warning("Orphan instance '%s' found on node %s",
3923 # else no instance is alive
3927 if query.IQ_DISKUSAGE in self.requested_data:
3928 disk_usage = dict((inst.name,
3929 _ComputeDiskSize(inst.disk_template,
3930 [{"size": disk.size}
3931 for disk in inst.disks]))
3932 for inst in instance_list)
3936 if query.IQ_CONSOLE in self.requested_data:
3938 for inst in instance_list:
3939 if inst.name in live_data:
3940 # Instance is running
3941 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3943 consinfo[inst.name] = None
3944 assert set(consinfo.keys()) == set(instance_names)
3948 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3949 disk_usage, offline_nodes, bad_nodes,
3950 live_data, wrongnode_inst, consinfo)
3953 class LUQuery(NoHooksLU):
3954 """Query for resources/items of a certain kind.
3957 # pylint: disable-msg=W0142
3960 def CheckArguments(self):
3961 qcls = _GetQueryImplementation(self.op.what)
3962 names = qlang.ReadSimpleFilter("name", self.op.filter)
3964 self.impl = qcls(names, self.op.fields, False)
3966 def ExpandNames(self):
3967 self.impl.ExpandNames(self)
3969 def DeclareLocks(self, level):
3970 self.impl.DeclareLocks(self, level)
3972 def Exec(self, feedback_fn):
3973 return self.impl.NewStyleQuery(self)
3976 class LUQueryFields(NoHooksLU):
3977 """Query for resources/items of a certain kind.
3980 # pylint: disable-msg=W0142
3983 def CheckArguments(self):
3984 self.qcls = _GetQueryImplementation(self.op.what)
3986 def ExpandNames(self):
3987 self.needed_locks = {}
3989 def Exec(self, feedback_fn):
3990 return self.qcls.FieldsQuery(self.op.fields)
3993 class LUNodeModifyStorage(NoHooksLU):
3994 """Logical unit for modifying a storage volume on a node.
3999 def CheckArguments(self):
4000 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4002 storage_type = self.op.storage_type
4005 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4007 raise errors.OpPrereqError("Storage units of type '%s' can not be"
4008 " modified" % storage_type,
4011 diff = set(self.op.changes.keys()) - modifiable
4013 raise errors.OpPrereqError("The following fields can not be modified for"
4014 " storage units of type '%s': %r" %
4015 (storage_type, list(diff)),
4018 def ExpandNames(self):
4019 self.needed_locks = {
4020 locking.LEVEL_NODE: self.op.node_name,
4023 def Exec(self, feedback_fn):
4024 """Computes the list of nodes and their attributes.
4027 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4028 result = self.rpc.call_storage_modify(self.op.node_name,
4029 self.op.storage_type, st_args,
4030 self.op.name, self.op.changes)
4031 result.Raise("Failed to modify storage unit '%s' on %s" %
4032 (self.op.name, self.op.node_name))
4035 class LUNodeAdd(LogicalUnit):
4036 """Logical unit for adding node to the cluster.
4040 HTYPE = constants.HTYPE_NODE
4041 _NFLAGS = ["master_capable", "vm_capable"]
4043 def CheckArguments(self):
4044 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4045 # validate/normalize the node name
4046 self.hostname = netutils.GetHostname(name=self.op.node_name,
4047 family=self.primary_ip_family)
4048 self.op.node_name = self.hostname.name
4049 if self.op.readd and self.op.group:
4050 raise errors.OpPrereqError("Cannot pass a node group when a node is"
4051 " being readded", errors.ECODE_INVAL)
4053 def BuildHooksEnv(self):
4056 This will run on all nodes before, and on all nodes + the new node after.
4060 "OP_TARGET": self.op.node_name,
4061 "NODE_NAME": self.op.node_name,
4062 "NODE_PIP": self.op.primary_ip,
4063 "NODE_SIP": self.op.secondary_ip,
4064 "MASTER_CAPABLE": str(self.op.master_capable),
4065 "VM_CAPABLE": str(self.op.vm_capable),
4067 nodes_0 = self.cfg.GetNodeList()
4068 nodes_1 = nodes_0 + [self.op.node_name, ]
4069 return env, nodes_0, nodes_1
4071 def CheckPrereq(self):
4072 """Check prerequisites.
4075 - the new node is not already in the config
4077 - its parameters (single/dual homed) matches the cluster
4079 Any errors are signaled by raising errors.OpPrereqError.
4083 hostname = self.hostname
4084 node = hostname.name
4085 primary_ip = self.op.primary_ip = hostname.ip
4086 if self.op.secondary_ip is None:
4087 if self.primary_ip_family == netutils.IP6Address.family:
4088 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4089 " IPv4 address must be given as secondary",
4091 self.op.secondary_ip = primary_ip
4093 secondary_ip = self.op.secondary_ip
4094 if not netutils.IP4Address.IsValid(secondary_ip):
4095 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4096 " address" % secondary_ip, errors.ECODE_INVAL)
4098 node_list = cfg.GetNodeList()
4099 if not self.op.readd and node in node_list:
4100 raise errors.OpPrereqError("Node %s is already in the configuration" %
4101 node, errors.ECODE_EXISTS)
4102 elif self.op.readd and node not in node_list:
4103 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4106 self.changed_primary_ip = False
4108 for existing_node_name in node_list:
4109 existing_node = cfg.GetNodeInfo(existing_node_name)
4111 if self.op.readd and node == existing_node_name:
4112 if existing_node.secondary_ip != secondary_ip:
4113 raise errors.OpPrereqError("Readded node doesn't have the same IP"
4114 " address configuration as before",
4116 if existing_node.primary_ip != primary_ip:
4117 self.changed_primary_ip = True
4121 if (existing_node.primary_ip == primary_ip or
4122 existing_node.secondary_ip == primary_ip or
4123 existing_node.primary_ip == secondary_ip or
4124 existing_node.secondary_ip == secondary_ip):
4125 raise errors.OpPrereqError("New node ip address(es) conflict with"
4126 " existing node %s" % existing_node.name,
4127 errors.ECODE_NOTUNIQUE)
4129 # After this 'if' block, None is no longer a valid value for the
4130 # _capable op attributes
4132 old_node = self.cfg.GetNodeInfo(node)
4133 assert old_node is not None, "Can't retrieve locked node %s" % node
4134 for attr in self._NFLAGS:
4135 if getattr(self.op, attr) is None:
4136 setattr(self.op, attr, getattr(old_node, attr))
4138 for attr in self._NFLAGS:
4139 if getattr(self.op, attr) is None:
4140 setattr(self.op, attr, True)
4142 if self.op.readd and not self.op.vm_capable:
4143 pri, sec = cfg.GetNodeInstances(node)
4145 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4146 " flag set to false, but it already holds"
4147 " instances" % node,
4150 # check that the type of the node (single versus dual homed) is the
4151 # same as for the master
4152 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4153 master_singlehomed = myself.secondary_ip == myself.primary_ip
4154 newbie_singlehomed = secondary_ip == primary_ip
4155 if master_singlehomed != newbie_singlehomed:
4156 if master_singlehomed:
4157 raise errors.OpPrereqError("The master has no secondary ip but the"
4158 " new node has one",
4161 raise errors.OpPrereqError("The master has a secondary ip but the"
4162 " new node doesn't have one",
4165 # checks reachability
4166 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4167 raise errors.OpPrereqError("Node not reachable by ping",
4168 errors.ECODE_ENVIRON)
4170 if not newbie_singlehomed:
4171 # check reachability from my secondary ip to newbie's secondary ip
4172 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4173 source=myself.secondary_ip):
4174 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4175 " based ping to node daemon port",
4176 errors.ECODE_ENVIRON)
4183 if self.op.master_capable:
4184 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4186 self.master_candidate = False
4189 self.new_node = old_node
4191 node_group = cfg.LookupNodeGroup(self.op.group)
4192 self.new_node = objects.Node(name=node,
4193 primary_ip=primary_ip,
4194 secondary_ip=secondary_ip,
4195 master_candidate=self.master_candidate,
4196 offline=False, drained=False,
4199 if self.op.ndparams:
4200 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4202 def Exec(self, feedback_fn):
4203 """Adds the new node to the cluster.
4206 new_node = self.new_node
4207 node = new_node.name
4209 # We adding a new node so we assume it's powered
4210 new_node.powered = True
4212 # for re-adds, reset the offline/drained/master-candidate flags;
4213 # we need to reset here, otherwise offline would prevent RPC calls
4214 # later in the procedure; this also means that if the re-add
4215 # fails, we are left with a non-offlined, broken node
4217 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4218 self.LogInfo("Readding a node, the offline/drained flags were reset")
4219 # if we demote the node, we do cleanup later in the procedure
4220 new_node.master_candidate = self.master_candidate
4221 if self.changed_primary_ip:
4222 new_node.primary_ip = self.op.primary_ip
4224 # copy the master/vm_capable flags
4225 for attr in self._NFLAGS:
4226 setattr(new_node, attr, getattr(self.op, attr))
4228 # notify the user about any possible mc promotion
4229 if new_node.master_candidate:
4230 self.LogInfo("Node will be a master candidate")
4232 if self.op.ndparams:
4233 new_node.ndparams = self.op.ndparams
4235 new_node.ndparams = {}
4237 # check connectivity
4238 result = self.rpc.call_version([node])[node]
4239 result.Raise("Can't get version information from node %s" % node)
4240 if constants.PROTOCOL_VERSION == result.payload:
4241 logging.info("Communication to node %s fine, sw version %s match",
4242 node, result.payload)
4244 raise errors.OpExecError("Version mismatch master version %s,"
4245 " node version %s" %
4246 (constants.PROTOCOL_VERSION, result.payload))
4248 # Add node to our /etc/hosts, and add key to known_hosts
4249 if self.cfg.GetClusterInfo().modify_etc_hosts:
4250 master_node = self.cfg.GetMasterNode()
4251 result = self.rpc.call_etc_hosts_modify(master_node,
4252 constants.ETC_HOSTS_ADD,
4255 result.Raise("Can't update hosts file with new host data")
4257 if new_node.secondary_ip != new_node.primary_ip:
4258 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4261 node_verify_list = [self.cfg.GetMasterNode()]
4262 node_verify_param = {
4263 constants.NV_NODELIST: [node],
4264 # TODO: do a node-net-test as well?
4267 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4268 self.cfg.GetClusterName())
4269 for verifier in node_verify_list:
4270 result[verifier].Raise("Cannot communicate with node %s" % verifier)
4271 nl_payload = result[verifier].payload[constants.NV_NODELIST]
4273 for failed in nl_payload:
4274 feedback_fn("ssh/hostname verification failed"
4275 " (checking from %s): %s" %
4276 (verifier, nl_payload[failed]))
4277 raise errors.OpExecError("ssh/hostname verification failed.")
4280 _RedistributeAncillaryFiles(self)
4281 self.context.ReaddNode(new_node)
4282 # make sure we redistribute the config
4283 self.cfg.Update(new_node, feedback_fn)
4284 # and make sure the new node will not have old files around
4285 if not new_node.master_candidate:
4286 result = self.rpc.call_node_demote_from_mc(new_node.name)
4287 msg = result.fail_msg
4289 self.LogWarning("Node failed to demote itself from master"
4290 " candidate status: %s" % msg)
4292 _RedistributeAncillaryFiles(self, additional_nodes=[node],
4293 additional_vm=self.op.vm_capable)
4294 self.context.AddNode(new_node, self.proc.GetECId())
4297 class LUNodeSetParams(LogicalUnit):
4298 """Modifies the parameters of a node.
4300 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4301 to the node role (as _ROLE_*)
4302 @cvar _R2F: a dictionary from node role to tuples of flags
4303 @cvar _FLAGS: a list of attribute names corresponding to the flags
4306 HPATH = "node-modify"
4307 HTYPE = constants.HTYPE_NODE
4309 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4311 (True, False, False): _ROLE_CANDIDATE,
4312 (False, True, False): _ROLE_DRAINED,
4313 (False, False, True): _ROLE_OFFLINE,
4314 (False, False, False): _ROLE_REGULAR,
4316 _R2F = dict((v, k) for k, v in _F2R.items())
4317 _FLAGS = ["master_candidate", "drained", "offline"]
4319 def CheckArguments(self):
4320 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4321 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4322 self.op.master_capable, self.op.vm_capable,
4323 self.op.secondary_ip, self.op.ndparams]
4324 if all_mods.count(None) == len(all_mods):
4325 raise errors.OpPrereqError("Please pass at least one modification",
4327 if all_mods.count(True) > 1:
4328 raise errors.OpPrereqError("Can't set the node into more than one"
4329 " state at the same time",
4332 # Boolean value that tells us whether we might be demoting from MC
4333 self.might_demote = (self.op.master_candidate == False or
4334 self.op.offline == True or
4335 self.op.drained == True or
4336 self.op.master_capable == False)
4338 if self.op.secondary_ip:
4339 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4340 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4341 " address" % self.op.secondary_ip,
4344 self.lock_all = self.op.auto_promote and self.might_demote
4345 self.lock_instances = self.op.secondary_ip is not None
4347 def ExpandNames(self):
4349 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4351 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4353 if self.lock_instances:
4354 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4356 def DeclareLocks(self, level):
4357 # If we have locked all instances, before waiting to lock nodes, release
4358 # all the ones living on nodes unrelated to the current operation.
4359 if level == locking.LEVEL_NODE and self.lock_instances:
4360 instances_release = []
4362 self.affected_instances = []
4363 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4364 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4365 instance = self.context.cfg.GetInstanceInfo(instance_name)
4366 i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4367 if i_mirrored and self.op.node_name in instance.all_nodes:
4368 instances_keep.append(instance_name)
4369 self.affected_instances.append(instance)
4371 instances_release.append(instance_name)
4372 if instances_release:
4373 self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4374 self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4376 def BuildHooksEnv(self):
4379 This runs on the master node.
4383 "OP_TARGET": self.op.node_name,
4384 "MASTER_CANDIDATE": str(self.op.master_candidate),
4385 "OFFLINE": str(self.op.offline),
4386 "DRAINED": str(self.op.drained),
4387 "MASTER_CAPABLE": str(self.op.master_capable),
4388 "VM_CAPABLE": str(self.op.vm_capable),
4390 nl = [self.cfg.GetMasterNode(),
4394 def CheckPrereq(self):
4395 """Check prerequisites.
4397 This only checks the instance list against the existing names.
4400 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4402 if (self.op.master_candidate is not None or
4403 self.op.drained is not None or
4404 self.op.offline is not None):
4405 # we can't change the master's node flags
4406 if self.op.node_name == self.cfg.GetMasterNode():
4407 raise errors.OpPrereqError("The master role can be changed"
4408 " only via master-failover",
4411 if self.op.master_candidate and not node.master_capable:
4412 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4413 " it a master candidate" % node.name,
4416 if self.op.vm_capable == False:
4417 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4419 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4420 " the vm_capable flag" % node.name,
4423 if node.master_candidate and self.might_demote and not self.lock_all:
4424 assert not self.op.auto_promote, "auto_promote set but lock_all not"
4425 # check if after removing the current node, we're missing master
4427 (mc_remaining, mc_should, _) = \
4428 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4429 if mc_remaining < mc_should:
4430 raise errors.OpPrereqError("Not enough master candidates, please"
4431 " pass auto promote option to allow"
4432 " promotion", errors.ECODE_STATE)
4434 self.old_flags = old_flags = (node.master_candidate,
4435 node.drained, node.offline)
4436 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4437 self.old_role = old_role = self._F2R[old_flags]
4439 # Check for ineffective changes
4440 for attr in self._FLAGS:
4441 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4442 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4443 setattr(self.op, attr, None)
4445 # Past this point, any flag change to False means a transition
4446 # away from the respective state, as only real changes are kept
4448 # TODO: We might query the real power state if it supports OOB
4449 if _SupportsOob(self.cfg, node):
4450 if self.op.offline is False and not (node.powered or
4451 self.op.powered == True):
4452 raise errors.OpPrereqError(("Please power on node %s first before you"
4453 " can reset offline state") %
4455 elif self.op.powered is not None:
4456 raise errors.OpPrereqError(("Unable to change powered state for node %s"
4457 " which does not support out-of-band"
4458 " handling") % self.op.node_name)
4460 # If we're being deofflined/drained, we'll MC ourself if needed
4461 if (self.op.drained == False or self.op.offline == False or
4462 (self.op.master_capable and not node.master_capable)):
4463 if _DecideSelfPromotion(self):
4464 self.op.master_candidate = True
4465 self.LogInfo("Auto-promoting node to master candidate")
4467 # If we're no longer master capable, we'll demote ourselves from MC
4468 if self.op.master_capable == False and node.master_candidate:
4469 self.LogInfo("Demoting from master candidate")
4470 self.op.master_candidate = False
4473 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4474 if self.op.master_candidate:
4475 new_role = self._ROLE_CANDIDATE
4476 elif self.op.drained:
4477 new_role = self._ROLE_DRAINED
4478 elif self.op.offline:
4479 new_role = self._ROLE_OFFLINE
4480 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4481 # False is still in new flags, which means we're un-setting (the
4483 new_role = self._ROLE_REGULAR
4484 else: # no new flags, nothing, keep old role
4487 self.new_role = new_role
4489 if old_role == self._ROLE_OFFLINE and new_role != old_role:
4490 # Trying to transition out of offline status
4491 result = self.rpc.call_version([node.name])[node.name]
4493 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4494 " to report its version: %s" %
4495 (node.name, result.fail_msg),
4498 self.LogWarning("Transitioning node from offline to online state"
4499 " without using re-add. Please make sure the node"
4502 if self.op.secondary_ip:
4503 # Ok even without locking, because this can't be changed by any LU
4504 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4505 master_singlehomed = master.secondary_ip == master.primary_ip
4506 if master_singlehomed and self.op.secondary_ip:
4507 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4508 " homed cluster", errors.ECODE_INVAL)
4511 if self.affected_instances:
4512 raise errors.OpPrereqError("Cannot change secondary ip: offline"
4513 " node has instances (%s) configured"
4514 " to use it" % self.affected_instances)
4516 # On online nodes, check that no instances are running, and that
4517 # the node has the new ip and we can reach it.
4518 for instance in self.affected_instances:
4519 _CheckInstanceDown(self, instance, "cannot change secondary ip")
4521 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4522 if master.name != node.name:
4523 # check reachability from master secondary ip to new secondary ip
4524 if not netutils.TcpPing(self.op.secondary_ip,
4525 constants.DEFAULT_NODED_PORT,
4526 source=master.secondary_ip):
4527 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4528 " based ping to node daemon port",
4529 errors.ECODE_ENVIRON)
4531 if self.op.ndparams:
4532 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4533 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4534 self.new_ndparams = new_ndparams
4536 def Exec(self, feedback_fn):
4541 old_role = self.old_role
4542 new_role = self.new_role
4546 if self.op.ndparams:
4547 node.ndparams = self.new_ndparams
4549 if self.op.powered is not None:
4550 node.powered = self.op.powered
4552 for attr in ["master_capable", "vm_capable"]:
4553 val = getattr(self.op, attr)
4555 setattr(node, attr, val)
4556 result.append((attr, str(val)))
4558 if new_role != old_role:
4559 # Tell the node to demote itself, if no longer MC and not offline
4560 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4561 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4563 self.LogWarning("Node failed to demote itself: %s", msg)
4565 new_flags = self._R2F[new_role]
4566 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4568 result.append((desc, str(nf)))
4569 (node.master_candidate, node.drained, node.offline) = new_flags
4571 # we locked all nodes, we adjust the CP before updating this node
4573 _AdjustCandidatePool(self, [node.name])
4575 if self.op.secondary_ip:
4576 node.secondary_ip = self.op.secondary_ip
4577 result.append(("secondary_ip", self.op.secondary_ip))
4579 # this will trigger configuration file update, if needed
4580 self.cfg.Update(node, feedback_fn)
4582 # this will trigger job queue propagation or cleanup if the mc
4584 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4585 self.context.ReaddNode(node)
4590 class LUNodePowercycle(NoHooksLU):
4591 """Powercycles a node.
4596 def CheckArguments(self):
4597 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4598 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4599 raise errors.OpPrereqError("The node is the master and the force"
4600 " parameter was not set",
4603 def ExpandNames(self):
4604 """Locking for PowercycleNode.
4606 This is a last-resort option and shouldn't block on other
4607 jobs. Therefore, we grab no locks.
4610 self.needed_locks = {}
4612 def Exec(self, feedback_fn):
4616 result = self.rpc.call_node_powercycle(self.op.node_name,
4617 self.cfg.GetHypervisorType())
4618 result.Raise("Failed to schedule the reboot")
4619 return result.payload
4622 class LUClusterQuery(NoHooksLU):
4623 """Query cluster configuration.
4628 def ExpandNames(self):
4629 self.needed_locks = {}
4631 def Exec(self, feedback_fn):
4632 """Return cluster config.
4635 cluster = self.cfg.GetClusterInfo()
4638 # Filter just for enabled hypervisors
4639 for os_name, hv_dict in cluster.os_hvp.items():
4640 os_hvp[os_name] = {}
4641 for hv_name, hv_params in hv_dict.items():
4642 if hv_name in cluster.enabled_hypervisors:
4643 os_hvp[os_name][hv_name] = hv_params
4645 # Convert ip_family to ip_version
4646 primary_ip_version = constants.IP4_VERSION
4647 if cluster.primary_ip_family == netutils.IP6Address.family:
4648 primary_ip_version = constants.IP6_VERSION
4651 "software_version": constants.RELEASE_VERSION,
4652 "protocol_version": constants.PROTOCOL_VERSION,
4653 "config_version": constants.CONFIG_VERSION,
4654 "os_api_version": max(constants.OS_API_VERSIONS),
4655 "export_version": constants.EXPORT_VERSION,
4656 "architecture": (platform.architecture()[0], platform.machine()),
4657 "name": cluster.cluster_name,
4658 "master": cluster.master_node,
4659 "default_hypervisor": cluster.enabled_hypervisors[0],
4660 "enabled_hypervisors": cluster.enabled_hypervisors,
4661 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4662 for hypervisor_name in cluster.enabled_hypervisors]),
4664 "beparams": cluster.beparams,
4665 "osparams": cluster.osparams,
4666 "nicparams": cluster.nicparams,
4667 "ndparams": cluster.ndparams,
4668 "candidate_pool_size": cluster.candidate_pool_size,
4669 "master_netdev": cluster.master_netdev,
4670 "volume_group_name": cluster.volume_group_name,
4671 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4672 "file_storage_dir": cluster.file_storage_dir,
4673 "maintain_node_health": cluster.maintain_node_health,
4674 "ctime": cluster.ctime,
4675 "mtime": cluster.mtime,
4676 "uuid": cluster.uuid,
4677 "tags": list(cluster.GetTags()),
4678 "uid_pool": cluster.uid_pool,
4679 "default_iallocator": cluster.default_iallocator,
4680 "reserved_lvs": cluster.reserved_lvs,
4681 "primary_ip_version": primary_ip_version,
4682 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4683 "hidden_os": cluster.hidden_os,
4684 "blacklisted_os": cluster.blacklisted_os,
4690 class LUClusterConfigQuery(NoHooksLU):
4691 """Return configuration values.
4695 _FIELDS_DYNAMIC = utils.FieldSet()
4696 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4697 "watcher_pause", "volume_group_name")
4699 def CheckArguments(self):
4700 _CheckOutputFields(static=self._FIELDS_STATIC,
4701 dynamic=self._FIELDS_DYNAMIC,
4702 selected=self.op.output_fields)
4704 def ExpandNames(self):
4705 self.needed_locks = {}
4707 def Exec(self, feedback_fn):
4708 """Dump a representation of the cluster config to the standard output.
4712 for field in self.op.output_fields:
4713 if field == "cluster_name":
4714 entry = self.cfg.GetClusterName()
4715 elif field == "master_node":
4716 entry = self.cfg.GetMasterNode()
4717 elif field == "drain_flag":
4718 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4719 elif field == "watcher_pause":
4720 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4721 elif field == "volume_group_name":
4722 entry = self.cfg.GetVGName()
4724 raise errors.ParameterError(field)
4725 values.append(entry)
4729 class LUInstanceActivateDisks(NoHooksLU):
4730 """Bring up an instance's disks.
4735 def ExpandNames(self):
4736 self._ExpandAndLockInstance()
4737 self.needed_locks[locking.LEVEL_NODE] = []
4738 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4740 def DeclareLocks(self, level):
4741 if level == locking.LEVEL_NODE:
4742 self._LockInstancesNodes()
4744 def CheckPrereq(self):
4745 """Check prerequisites.
4747 This checks that the instance is in the cluster.
4750 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4751 assert self.instance is not None, \
4752 "Cannot retrieve locked instance %s" % self.op.instance_name
4753 _CheckNodeOnline(self, self.instance.primary_node)
4755 def Exec(self, feedback_fn):
4756 """Activate the disks.
4759 disks_ok, disks_info = \
4760 _AssembleInstanceDisks(self, self.instance,
4761 ignore_size=self.op.ignore_size)
4763 raise errors.OpExecError("Cannot activate block devices")
4768 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4770 """Prepare the block devices for an instance.
4772 This sets up the block devices on all nodes.
4774 @type lu: L{LogicalUnit}
4775 @param lu: the logical unit on whose behalf we execute
4776 @type instance: L{objects.Instance}
4777 @param instance: the instance for whose disks we assemble
4778 @type disks: list of L{objects.Disk} or None
4779 @param disks: which disks to assemble (or all, if None)
4780 @type ignore_secondaries: boolean
4781 @param ignore_secondaries: if true, errors on secondary nodes
4782 won't result in an error return from the function
4783 @type ignore_size: boolean
4784 @param ignore_size: if true, the current known size of the disk
4785 will not be used during the disk activation, useful for cases
4786 when the size is wrong
4787 @return: False if the operation failed, otherwise a list of
4788 (host, instance_visible_name, node_visible_name)
4789 with the mapping from node devices to instance devices
4794 iname = instance.name
4795 disks = _ExpandCheckDisks(instance, disks)
4797 # With the two passes mechanism we try to reduce the window of
4798 # opportunity for the race condition of switching DRBD to primary
4799 # before handshaking occured, but we do not eliminate it
4801 # The proper fix would be to wait (with some limits) until the
4802 # connection has been made and drbd transitions from WFConnection
4803 # into any other network-connected state (Connected, SyncTarget,
4806 # 1st pass, assemble on all nodes in secondary mode
4807 for idx, inst_disk in enumerate(disks):
4808 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4810 node_disk = node_disk.Copy()
4811 node_disk.UnsetSize()
4812 lu.cfg.SetDiskID(node_disk, node)
4813 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4814 msg = result.fail_msg
4816 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4817 " (is_primary=False, pass=1): %s",
4818 inst_disk.iv_name, node, msg)
4819 if not ignore_secondaries:
4822 # FIXME: race condition on drbd migration to primary
4824 # 2nd pass, do only the primary node
4825 for idx, inst_disk in enumerate(disks):
4828 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4829 if node != instance.primary_node:
4832 node_disk = node_disk.Copy()
4833 node_disk.UnsetSize()
4834 lu.cfg.SetDiskID(node_disk, node)
4835 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4836 msg = result.fail_msg
4838 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4839 " (is_primary=True, pass=2): %s",
4840 inst_disk.iv_name, node, msg)
4843 dev_path = result.payload
4845 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4847 # leave the disks configured for the primary node
4848 # this is a workaround that would be fixed better by
4849 # improving the logical/physical id handling
4851 lu.cfg.SetDiskID(disk, instance.primary_node)
4853 return disks_ok, device_info
4856 def _StartInstanceDisks(lu, instance, force):
4857 """Start the disks of an instance.
4860 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4861 ignore_secondaries=force)
4863 _ShutdownInstanceDisks(lu, instance)
4864 if force is not None and not force:
4865 lu.proc.LogWarning("", hint="If the message above refers to a"
4867 " you can retry the operation using '--force'.")
4868 raise errors.OpExecError("Disk consistency error")
4871 class LUInstanceDeactivateDisks(NoHooksLU):
4872 """Shutdown an instance's disks.
4877 def ExpandNames(self):
4878 self._ExpandAndLockInstance()
4879 self.needed_locks[locking.LEVEL_NODE] = []
4880 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4882 def DeclareLocks(self, level):
4883 if level == locking.LEVEL_NODE:
4884 self._LockInstancesNodes()
4886 def CheckPrereq(self):
4887 """Check prerequisites.
4889 This checks that the instance is in the cluster.
4892 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4893 assert self.instance is not None, \
4894 "Cannot retrieve locked instance %s" % self.op.instance_name
4896 def Exec(self, feedback_fn):
4897 """Deactivate the disks
4900 instance = self.instance
4902 _ShutdownInstanceDisks(self, instance)
4904 _SafeShutdownInstanceDisks(self, instance)
4907 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4908 """Shutdown block devices of an instance.
4910 This function checks if an instance is running, before calling
4911 _ShutdownInstanceDisks.
4914 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4915 _ShutdownInstanceDisks(lu, instance, disks=disks)
4918 def _ExpandCheckDisks(instance, disks):
4919 """Return the instance disks selected by the disks list
4921 @type disks: list of L{objects.Disk} or None
4922 @param disks: selected disks
4923 @rtype: list of L{objects.Disk}
4924 @return: selected instance disks to act on
4928 return instance.disks
4930 if not set(disks).issubset(instance.disks):
4931 raise errors.ProgrammerError("Can only act on disks belonging to the"
4936 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4937 """Shutdown block devices of an instance.
4939 This does the shutdown on all nodes of the instance.
4941 If the ignore_primary is false, errors on the primary node are
4946 disks = _ExpandCheckDisks(instance, disks)
4949 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4950 lu.cfg.SetDiskID(top_disk, node)
4951 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4952 msg = result.fail_msg
4954 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4955 disk.iv_name, node, msg)
4956 if ((node == instance.primary_node and not ignore_primary) or
4957 (node != instance.primary_node and not result.offline)):
4962 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4963 """Checks if a node has enough free memory.
4965 This function check if a given node has the needed amount of free
4966 memory. In case the node has less memory or we cannot get the
4967 information from the node, this function raise an OpPrereqError
4970 @type lu: C{LogicalUnit}
4971 @param lu: a logical unit from which we get configuration data
4973 @param node: the node to check
4974 @type reason: C{str}
4975 @param reason: string to use in the error message
4976 @type requested: C{int}
4977 @param requested: the amount of memory in MiB to check for
4978 @type hypervisor_name: C{str}
4979 @param hypervisor_name: the hypervisor to ask for memory stats
4980 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4981 we cannot check the node
4984 nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
4985 nodeinfo[node].Raise("Can't get data from node %s" % node,
4986 prereq=True, ecode=errors.ECODE_ENVIRON)
4987 free_mem = nodeinfo[node].payload.get('memory_free', None)
4988 if not isinstance(free_mem, int):
4989 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4990 " was '%s'" % (node, free_mem),
4991 errors.ECODE_ENVIRON)
4992 if requested > free_mem:
4993 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4994 " needed %s MiB, available %s MiB" %
4995 (node, reason, requested, free_mem),
4999 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5000 """Checks if nodes have enough free disk space in the all VGs.
5002 This function check if all given nodes have the needed amount of
5003 free disk. In case any node has less disk or we cannot get the
5004 information from the node, this function raise an OpPrereqError
5007 @type lu: C{LogicalUnit}
5008 @param lu: a logical unit from which we get configuration data
5009 @type nodenames: C{list}
5010 @param nodenames: the list of node names to check
5011 @type req_sizes: C{dict}
5012 @param req_sizes: the hash of vg and corresponding amount of disk in
5014 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5015 or we cannot check the node
5018 for vg, req_size in req_sizes.items():
5019 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5022 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5023 """Checks if nodes have enough free disk space in the specified VG.
5025 This function check if all given nodes have the needed amount of
5026 free disk. In case any node has less disk or we cannot get the
5027 information from the node, this function raise an OpPrereqError
5030 @type lu: C{LogicalUnit}
5031 @param lu: a logical unit from which we get configuration data
5032 @type nodenames: C{list}
5033 @param nodenames: the list of node names to check
5035 @param vg: the volume group to check
5036 @type requested: C{int}
5037 @param requested: the amount of disk in MiB to check for
5038 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5039 or we cannot check the node
5042 nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5043 for node in nodenames:
5044 info = nodeinfo[node]
5045 info.Raise("Cannot get current information from node %s" % node,
5046 prereq=True, ecode=errors.ECODE_ENVIRON)
5047 vg_free = info.payload.get("vg_free", None)
5048 if not isinstance(vg_free, int):
5049 raise errors.OpPrereqError("Can't compute free disk space on node"
5050 " %s for vg %s, result was '%s'" %
5051 (node, vg, vg_free), errors.ECODE_ENVIRON)
5052 if requested > vg_free:
5053 raise errors.OpPrereqError("Not enough disk space on target node %s"
5054 " vg %s: required %d MiB, available %d MiB" %
5055 (node, vg, requested, vg_free),
5059 class LUInstanceStartup(LogicalUnit):
5060 """Starts an instance.
5063 HPATH = "instance-start"
5064 HTYPE = constants.HTYPE_INSTANCE
5067 def CheckArguments(self):
5069 if self.op.beparams:
5070 # fill the beparams dict
5071 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5073 def ExpandNames(self):
5074 self._ExpandAndLockInstance()
5076 def BuildHooksEnv(self):
5079 This runs on master, primary and secondary nodes of the instance.
5083 "FORCE": self.op.force,
5085 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5086 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5089 def CheckPrereq(self):
5090 """Check prerequisites.
5092 This checks that the instance is in the cluster.
5095 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5096 assert self.instance is not None, \
5097 "Cannot retrieve locked instance %s" % self.op.instance_name
5100 if self.op.hvparams:
5101 # check hypervisor parameter syntax (locally)
5102 cluster = self.cfg.GetClusterInfo()
5103 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5104 filled_hvp = cluster.FillHV(instance)
5105 filled_hvp.update(self.op.hvparams)
5106 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5107 hv_type.CheckParameterSyntax(filled_hvp)
5108 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5110 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5112 if self.primary_offline and self.op.ignore_offline_nodes:
5113 self.proc.LogWarning("Ignoring offline primary node")
5115 if self.op.hvparams or self.op.beparams:
5116 self.proc.LogWarning("Overridden parameters are ignored")
5118 _CheckNodeOnline(self, instance.primary_node)
5120 bep = self.cfg.GetClusterInfo().FillBE(instance)
5122 # check bridges existence
5123 _CheckInstanceBridgesExist(self, instance)
5125 remote_info = self.rpc.call_instance_info(instance.primary_node,
5127 instance.hypervisor)
5128 remote_info.Raise("Error checking node %s" % instance.primary_node,
5129 prereq=True, ecode=errors.ECODE_ENVIRON)
5130 if not remote_info.payload: # not running already
5131 _CheckNodeFreeMemory(self, instance.primary_node,
5132 "starting instance %s" % instance.name,
5133 bep[constants.BE_MEMORY], instance.hypervisor)
5135 def Exec(self, feedback_fn):
5136 """Start the instance.
5139 instance = self.instance
5140 force = self.op.force
5142 self.cfg.MarkInstanceUp(instance.name)
5144 if self.primary_offline:
5145 assert self.op.ignore_offline_nodes
5146 self.proc.LogInfo("Primary node offline, marked instance as started")
5148 node_current = instance.primary_node
5150 _StartInstanceDisks(self, instance, force)
5152 result = self.rpc.call_instance_start(node_current, instance,
5153 self.op.hvparams, self.op.beparams)
5154 msg = result.fail_msg
5156 _ShutdownInstanceDisks(self, instance)
5157 raise errors.OpExecError("Could not start instance: %s" % msg)
5160 class LUInstanceReboot(LogicalUnit):
5161 """Reboot an instance.
5164 HPATH = "instance-reboot"
5165 HTYPE = constants.HTYPE_INSTANCE
5168 def ExpandNames(self):
5169 self._ExpandAndLockInstance()
5171 def BuildHooksEnv(self):
5174 This runs on master, primary and secondary nodes of the instance.
5178 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5179 "REBOOT_TYPE": self.op.reboot_type,
5180 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5182 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5183 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5186 def CheckPrereq(self):
5187 """Check prerequisites.
5189 This checks that the instance is in the cluster.
5192 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5193 assert self.instance is not None, \
5194 "Cannot retrieve locked instance %s" % self.op.instance_name
5196 _CheckNodeOnline(self, instance.primary_node)
5198 # check bridges existence
5199 _CheckInstanceBridgesExist(self, instance)
5201 def Exec(self, feedback_fn):
5202 """Reboot the instance.
5205 instance = self.instance
5206 ignore_secondaries = self.op.ignore_secondaries
5207 reboot_type = self.op.reboot_type
5209 node_current = instance.primary_node
5211 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5212 constants.INSTANCE_REBOOT_HARD]:
5213 for disk in instance.disks:
5214 self.cfg.SetDiskID(disk, node_current)
5215 result = self.rpc.call_instance_reboot(node_current, instance,
5217 self.op.shutdown_timeout)
5218 result.Raise("Could not reboot instance")
5220 result = self.rpc.call_instance_shutdown(node_current, instance,
5221 self.op.shutdown_timeout)
5222 result.Raise("Could not shutdown instance for full reboot")
5223 _ShutdownInstanceDisks(self, instance)
5224 _StartInstanceDisks(self, instance, ignore_secondaries)
5225 result = self.rpc.call_instance_start(node_current, instance, None, None)
5226 msg = result.fail_msg
5228 _ShutdownInstanceDisks(self, instance)
5229 raise errors.OpExecError("Could not start instance for"
5230 " full reboot: %s" % msg)
5232 self.cfg.MarkInstanceUp(instance.name)
5235 class LUInstanceShutdown(LogicalUnit):
5236 """Shutdown an instance.
5239 HPATH = "instance-stop"
5240 HTYPE = constants.HTYPE_INSTANCE
5243 def ExpandNames(self):
5244 self._ExpandAndLockInstance()
5246 def BuildHooksEnv(self):
5249 This runs on master, primary and secondary nodes of the instance.
5252 env = _BuildInstanceHookEnvByObject(self, self.instance)
5253 env["TIMEOUT"] = self.op.timeout
5254 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5257 def CheckPrereq(self):
5258 """Check prerequisites.
5260 This checks that the instance is in the cluster.
5263 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5264 assert self.instance is not None, \
5265 "Cannot retrieve locked instance %s" % self.op.instance_name
5267 self.primary_offline = \
5268 self.cfg.GetNodeInfo(self.instance.primary_node).offline
5270 if self.primary_offline and self.op.ignore_offline_nodes:
5271 self.proc.LogWarning("Ignoring offline primary node")
5273 _CheckNodeOnline(self, self.instance.primary_node)
5275 def Exec(self, feedback_fn):
5276 """Shutdown the instance.
5279 instance = self.instance
5280 node_current = instance.primary_node
5281 timeout = self.op.timeout
5283 self.cfg.MarkInstanceDown(instance.name)
5285 if self.primary_offline:
5286 assert self.op.ignore_offline_nodes
5287 self.proc.LogInfo("Primary node offline, marked instance as stopped")
5289 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5290 msg = result.fail_msg
5292 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5294 _ShutdownInstanceDisks(self, instance)
5297 class LUInstanceReinstall(LogicalUnit):
5298 """Reinstall an instance.
5301 HPATH = "instance-reinstall"
5302 HTYPE = constants.HTYPE_INSTANCE
5305 def ExpandNames(self):
5306 self._ExpandAndLockInstance()
5308 def BuildHooksEnv(self):
5311 This runs on master, primary and secondary nodes of the instance.
5314 env = _BuildInstanceHookEnvByObject(self, self.instance)
5315 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5318 def CheckPrereq(self):
5319 """Check prerequisites.
5321 This checks that the instance is in the cluster and is not running.
5324 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5325 assert instance is not None, \
5326 "Cannot retrieve locked instance %s" % self.op.instance_name
5327 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5328 " offline, cannot reinstall")
5329 for node in instance.secondary_nodes:
5330 _CheckNodeOnline(self, node, "Instance secondary node offline,"
5331 " cannot reinstall")
5333 if instance.disk_template == constants.DT_DISKLESS:
5334 raise errors.OpPrereqError("Instance '%s' has no disks" %
5335 self.op.instance_name,
5337 _CheckInstanceDown(self, instance, "cannot reinstall")
5339 if self.op.os_type is not None:
5341 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5342 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5343 instance_os = self.op.os_type
5345 instance_os = instance.os
5347 nodelist = list(instance.all_nodes)
5349 if self.op.osparams:
5350 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5351 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5352 self.os_inst = i_osdict # the new dict (without defaults)
5356 self.instance = instance
5358 def Exec(self, feedback_fn):
5359 """Reinstall the instance.
5362 inst = self.instance
5364 if self.op.os_type is not None:
5365 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5366 inst.os = self.op.os_type
5367 # Write to configuration
5368 self.cfg.Update(inst, feedback_fn)
5370 _StartInstanceDisks(self, inst, None)
5372 feedback_fn("Running the instance OS create scripts...")
5373 # FIXME: pass debug option from opcode to backend
5374 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5375 self.op.debug_level,
5376 osparams=self.os_inst)
5377 result.Raise("Could not install OS for instance %s on node %s" %
5378 (inst.name, inst.primary_node))
5380 _ShutdownInstanceDisks(self, inst)
5383 class LUInstanceRecreateDisks(LogicalUnit):
5384 """Recreate an instance's missing disks.
5387 HPATH = "instance-recreate-disks"
5388 HTYPE = constants.HTYPE_INSTANCE
5391 def ExpandNames(self):
5392 self._ExpandAndLockInstance()
5394 def BuildHooksEnv(self):
5397 This runs on master, primary and secondary nodes of the instance.
5400 env = _BuildInstanceHookEnvByObject(self, self.instance)
5401 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5404 def CheckPrereq(self):
5405 """Check prerequisites.
5407 This checks that the instance is in the cluster and is not running.
5410 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5411 assert instance is not None, \
5412 "Cannot retrieve locked instance %s" % self.op.instance_name
5413 _CheckNodeOnline(self, instance.primary_node)
5415 if instance.disk_template == constants.DT_DISKLESS:
5416 raise errors.OpPrereqError("Instance '%s' has no disks" %
5417 self.op.instance_name, errors.ECODE_INVAL)
5418 _CheckInstanceDown(self, instance, "cannot recreate disks")
5420 if not self.op.disks:
5421 self.op.disks = range(len(instance.disks))
5423 for idx in self.op.disks:
5424 if idx >= len(instance.disks):
5425 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5428 self.instance = instance
5430 def Exec(self, feedback_fn):
5431 """Recreate the disks.
5435 for idx, _ in enumerate(self.instance.disks):
5436 if idx not in self.op.disks: # disk idx has not been passed in
5440 _CreateDisks(self, self.instance, to_skip=to_skip)
5443 class LUInstanceRename(LogicalUnit):
5444 """Rename an instance.
5447 HPATH = "instance-rename"
5448 HTYPE = constants.HTYPE_INSTANCE
5450 def CheckArguments(self):
5454 if self.op.ip_check and not self.op.name_check:
5455 # TODO: make the ip check more flexible and not depend on the name check
5456 raise errors.OpPrereqError("Cannot do ip check without a name check",
5459 def BuildHooksEnv(self):
5462 This runs on master, primary and secondary nodes of the instance.
5465 env = _BuildInstanceHookEnvByObject(self, self.instance)
5466 env["INSTANCE_NEW_NAME"] = self.op.new_name
5467 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5470 def CheckPrereq(self):
5471 """Check prerequisites.
5473 This checks that the instance is in the cluster and is not running.
5476 self.op.instance_name = _ExpandInstanceName(self.cfg,
5477 self.op.instance_name)
5478 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5479 assert instance is not None
5480 _CheckNodeOnline(self, instance.primary_node)
5481 _CheckInstanceDown(self, instance, "cannot rename")
5482 self.instance = instance
5484 new_name = self.op.new_name
5485 if self.op.name_check:
5486 hostname = netutils.GetHostname(name=new_name)
5487 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5489 new_name = self.op.new_name = hostname.name
5490 if (self.op.ip_check and
5491 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5492 raise errors.OpPrereqError("IP %s of instance %s already in use" %
5493 (hostname.ip, new_name),
5494 errors.ECODE_NOTUNIQUE)
5496 instance_list = self.cfg.GetInstanceList()
5497 if new_name in instance_list and new_name != instance.name:
5498 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5499 new_name, errors.ECODE_EXISTS)
5501 def Exec(self, feedback_fn):
5502 """Rename the instance.
5505 inst = self.instance
5506 old_name = inst.name
5508 rename_file_storage = False
5509 if (inst.disk_template == constants.DT_FILE and
5510 self.op.new_name != inst.name):
5511 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5512 rename_file_storage = True
5514 self.cfg.RenameInstance(inst.name, self.op.new_name)
5515 # Change the instance lock. This is definitely safe while we hold the BGL
5516 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5517 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5519 # re-read the instance from the configuration after rename
5520 inst = self.cfg.GetInstanceInfo(self.op.new_name)
5522 if rename_file_storage:
5523 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5524 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5525 old_file_storage_dir,
5526 new_file_storage_dir)
5527 result.Raise("Could not rename on node %s directory '%s' to '%s'"
5528 " (but the instance has been renamed in Ganeti)" %
5529 (inst.primary_node, old_file_storage_dir,
5530 new_file_storage_dir))
5532 _StartInstanceDisks(self, inst, None)
5534 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5535 old_name, self.op.debug_level)
5536 msg = result.fail_msg
5538 msg = ("Could not run OS rename script for instance %s on node %s"
5539 " (but the instance has been renamed in Ganeti): %s" %
5540 (inst.name, inst.primary_node, msg))
5541 self.proc.LogWarning(msg)
5543 _ShutdownInstanceDisks(self, inst)
5548 class LUInstanceRemove(LogicalUnit):
5549 """Remove an instance.
5552 HPATH = "instance-remove"
5553 HTYPE = constants.HTYPE_INSTANCE
5556 def ExpandNames(self):
5557 self._ExpandAndLockInstance()
5558 self.needed_locks[locking.LEVEL_NODE] = []
5559 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5561 def DeclareLocks(self, level):
5562 if level == locking.LEVEL_NODE:
5563 self._LockInstancesNodes()
5565 def BuildHooksEnv(self):
5568 This runs on master, primary and secondary nodes of the instance.
5571 env = _BuildInstanceHookEnvByObject(self, self.instance)
5572 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5573 nl = [self.cfg.GetMasterNode()]
5574 nl_post = list(self.instance.all_nodes) + nl
5575 return env, nl, nl_post
5577 def CheckPrereq(self):
5578 """Check prerequisites.
5580 This checks that the instance is in the cluster.
5583 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5584 assert self.instance is not None, \
5585 "Cannot retrieve locked instance %s" % self.op.instance_name
5587 def Exec(self, feedback_fn):
5588 """Remove the instance.
5591 instance = self.instance
5592 logging.info("Shutting down instance %s on node %s",
5593 instance.name, instance.primary_node)
5595 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5596 self.op.shutdown_timeout)
5597 msg = result.fail_msg
5599 if self.op.ignore_failures:
5600 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5602 raise errors.OpExecError("Could not shutdown instance %s on"
5604 (instance.name, instance.primary_node, msg))
5606 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5609 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5610 """Utility function to remove an instance.
5613 logging.info("Removing block devices for instance %s", instance.name)
5615 if not _RemoveDisks(lu, instance):
5616 if not ignore_failures:
5617 raise errors.OpExecError("Can't remove instance's disks")
5618 feedback_fn("Warning: can't remove instance's disks")
5620 logging.info("Removing instance %s out of cluster config", instance.name)
5622 lu.cfg.RemoveInstance(instance.name)
5624 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5625 "Instance lock removal conflict"
5627 # Remove lock for the instance
5628 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5631 class LUInstanceQuery(NoHooksLU):
5632 """Logical unit for querying instances.
5635 # pylint: disable-msg=W0142
5638 def CheckArguments(self):
5639 self.iq = _InstanceQuery(self.op.names, self.op.output_fields,
5640 self.op.use_locking)
5642 def ExpandNames(self):
5643 self.iq.ExpandNames(self)
5645 def DeclareLocks(self, level):
5646 self.iq.DeclareLocks(self, level)
5648 def Exec(self, feedback_fn):
5649 return self.iq.OldStyleQuery(self)
5652 class LUInstanceFailover(LogicalUnit):
5653 """Failover an instance.
5656 HPATH = "instance-failover"
5657 HTYPE = constants.HTYPE_INSTANCE
5660 def ExpandNames(self):
5661 self._ExpandAndLockInstance()
5662 self.needed_locks[locking.LEVEL_NODE] = []
5663 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5665 def DeclareLocks(self, level):
5666 if level == locking.LEVEL_NODE:
5667 self._LockInstancesNodes()
5669 def BuildHooksEnv(self):
5672 This runs on master, primary and secondary nodes of the instance.
5675 instance = self.instance
5676 source_node = instance.primary_node
5677 target_node = instance.secondary_nodes[0]
5679 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5680 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5681 "OLD_PRIMARY": source_node,
5682 "OLD_SECONDARY": target_node,
5683 "NEW_PRIMARY": target_node,
5684 "NEW_SECONDARY": source_node,
5686 env.update(_BuildInstanceHookEnvByObject(self, instance))
5687 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5689 nl_post.append(source_node)
5690 return env, nl, nl_post
5692 def CheckPrereq(self):
5693 """Check prerequisites.
5695 This checks that the instance is in the cluster.
5698 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5699 assert self.instance is not None, \
5700 "Cannot retrieve locked instance %s" % self.op.instance_name
5702 bep = self.cfg.GetClusterInfo().FillBE(instance)
5703 if instance.disk_template not in constants.DTS_NET_MIRROR:
5704 raise errors.OpPrereqError("Instance's disk layout is not"
5705 " network mirrored, cannot failover.",
5708 secondary_nodes = instance.secondary_nodes
5709 if not secondary_nodes:
5710 raise errors.ProgrammerError("no secondary node but using "
5711 "a mirrored disk template")
5713 target_node = secondary_nodes[0]
5714 _CheckNodeOnline(self, target_node)
5715 _CheckNodeNotDrained(self, target_node)
5716 if instance.admin_up:
5717 # check memory requirements on the secondary node
5718 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5719 instance.name, bep[constants.BE_MEMORY],
5720 instance.hypervisor)
5722 self.LogInfo("Not checking memory on the secondary node as"
5723 " instance will not be started")
5725 # check bridge existance
5726 _CheckInstanceBridgesExist(self, instance, node=target_node)
5728 def Exec(self, feedback_fn):
5729 """Failover an instance.
5731 The failover is done by shutting it down on its present node and
5732 starting it on the secondary.
5735 instance = self.instance
5736 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5738 source_node = instance.primary_node
5739 target_node = instance.secondary_nodes[0]
5741 if instance.admin_up:
5742 feedback_fn("* checking disk consistency between source and target")
5743 for dev in instance.disks:
5744 # for drbd, these are drbd over lvm
5745 if not _CheckDiskConsistency(self, dev, target_node, False):
5746 if not self.op.ignore_consistency:
5747 raise errors.OpExecError("Disk %s is degraded on target node,"
5748 " aborting failover." % dev.iv_name)
5750 feedback_fn("* not checking disk consistency as instance is not running")
5752 feedback_fn("* shutting down instance on source node")
5753 logging.info("Shutting down instance %s on node %s",
5754 instance.name, source_node)
5756 result = self.rpc.call_instance_shutdown(source_node, instance,
5757 self.op.shutdown_timeout)
5758 msg = result.fail_msg
5760 if self.op.ignore_consistency or primary_node.offline:
5761 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5762 " Proceeding anyway. Please make sure node"
5763 " %s is down. Error details: %s",
5764 instance.name, source_node, source_node, msg)
5766 raise errors.OpExecError("Could not shutdown instance %s on"
5768 (instance.name, source_node, msg))
5770 feedback_fn("* deactivating the instance's disks on source node")
5771 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5772 raise errors.OpExecError("Can't shut down the instance's disks.")
5774 instance.primary_node = target_node
5775 # distribute new instance config to the other nodes
5776 self.cfg.Update(instance, feedback_fn)
5778 # Only start the instance if it's marked as up
5779 if instance.admin_up:
5780 feedback_fn("* activating the instance's disks on target node")
5781 logging.info("Starting instance %s on node %s",
5782 instance.name, target_node)
5784 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5785 ignore_secondaries=True)
5787 _ShutdownInstanceDisks(self, instance)
5788 raise errors.OpExecError("Can't activate the instance's disks")
5790 feedback_fn("* starting the instance on the target node")
5791 result = self.rpc.call_instance_start(target_node, instance, None, None)
5792 msg = result.fail_msg
5794 _ShutdownInstanceDisks(self, instance)
5795 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5796 (instance.name, target_node, msg))
5799 class LUInstanceMigrate(LogicalUnit):
5800 """Migrate an instance.
5802 This is migration without shutting down, compared to the failover,
5803 which is done with shutdown.
5806 HPATH = "instance-migrate"
5807 HTYPE = constants.HTYPE_INSTANCE
5810 def ExpandNames(self):
5811 self._ExpandAndLockInstance()
5813 self.needed_locks[locking.LEVEL_NODE] = []
5814 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5816 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5818 self.tasklets = [self._migrater]
5820 def DeclareLocks(self, level):
5821 if level == locking.LEVEL_NODE:
5822 self._LockInstancesNodes()
5824 def BuildHooksEnv(self):
5827 This runs on master, primary and secondary nodes of the instance.
5830 instance = self._migrater.instance
5831 source_node = instance.primary_node
5832 target_node = instance.secondary_nodes[0]
5833 env = _BuildInstanceHookEnvByObject(self, instance)
5834 env["MIGRATE_LIVE"] = self._migrater.live
5835 env["MIGRATE_CLEANUP"] = self.op.cleanup
5837 "OLD_PRIMARY": source_node,
5838 "OLD_SECONDARY": target_node,
5839 "NEW_PRIMARY": target_node,
5840 "NEW_SECONDARY": source_node,
5842 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5844 nl_post.append(source_node)
5845 return env, nl, nl_post
5848 class LUInstanceMove(LogicalUnit):
5849 """Move an instance by data-copying.
5852 HPATH = "instance-move"
5853 HTYPE = constants.HTYPE_INSTANCE
5856 def ExpandNames(self):
5857 self._ExpandAndLockInstance()
5858 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5859 self.op.target_node = target_node
5860 self.needed_locks[locking.LEVEL_NODE] = [target_node]
5861 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5863 def DeclareLocks(self, level):
5864 if level == locking.LEVEL_NODE:
5865 self._LockInstancesNodes(primary_only=True)
5867 def BuildHooksEnv(self):
5870 This runs on master, primary and secondary nodes of the instance.
5874 "TARGET_NODE": self.op.target_node,
5875 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5877 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5878 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5879 self.op.target_node]
5882 def CheckPrereq(self):
5883 """Check prerequisites.
5885 This checks that the instance is in the cluster.
5888 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5889 assert self.instance is not None, \
5890 "Cannot retrieve locked instance %s" % self.op.instance_name
5892 node = self.cfg.GetNodeInfo(self.op.target_node)
5893 assert node is not None, \
5894 "Cannot retrieve locked node %s" % self.op.target_node
5896 self.target_node = target_node = node.name
5898 if target_node == instance.primary_node:
5899 raise errors.OpPrereqError("Instance %s is already on the node %s" %
5900 (instance.name, target_node),
5903 bep = self.cfg.GetClusterInfo().FillBE(instance)
5905 for idx, dsk in enumerate(instance.disks):
5906 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5907 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5908 " cannot copy" % idx, errors.ECODE_STATE)
5910 _CheckNodeOnline(self, target_node)
5911 _CheckNodeNotDrained(self, target_node)
5912 _CheckNodeVmCapable(self, target_node)
5914 if instance.admin_up:
5915 # check memory requirements on the secondary node
5916 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5917 instance.name, bep[constants.BE_MEMORY],
5918 instance.hypervisor)
5920 self.LogInfo("Not checking memory on the secondary node as"
5921 " instance will not be started")
5923 # check bridge existance
5924 _CheckInstanceBridgesExist(self, instance, node=target_node)
5926 def Exec(self, feedback_fn):
5927 """Move an instance.
5929 The move is done by shutting it down on its present node, copying
5930 the data over (slow) and starting it on the new node.
5933 instance = self.instance
5935 source_node = instance.primary_node
5936 target_node = self.target_node
5938 self.LogInfo("Shutting down instance %s on source node %s",
5939 instance.name, source_node)
5941 result = self.rpc.call_instance_shutdown(source_node, instance,
5942 self.op.shutdown_timeout)
5943 msg = result.fail_msg
5945 if self.op.ignore_consistency:
5946 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5947 " Proceeding anyway. Please make sure node"
5948 " %s is down. Error details: %s",
5949 instance.name, source_node, source_node, msg)
5951 raise errors.OpExecError("Could not shutdown instance %s on"
5953 (instance.name, source_node, msg))
5955 # create the target disks
5957 _CreateDisks(self, instance, target_node=target_node)
5958 except errors.OpExecError:
5959 self.LogWarning("Device creation failed, reverting...")
5961 _RemoveDisks(self, instance, target_node=target_node)
5963 self.cfg.ReleaseDRBDMinors(instance.name)
5966 cluster_name = self.cfg.GetClusterInfo().cluster_name
5969 # activate, get path, copy the data over
5970 for idx, disk in enumerate(instance.disks):
5971 self.LogInfo("Copying data for disk %d", idx)
5972 result = self.rpc.call_blockdev_assemble(target_node, disk,
5973 instance.name, True, idx)
5975 self.LogWarning("Can't assemble newly created disk %d: %s",
5976 idx, result.fail_msg)
5977 errs.append(result.fail_msg)
5979 dev_path = result.payload
5980 result = self.rpc.call_blockdev_export(source_node, disk,
5981 target_node, dev_path,
5984 self.LogWarning("Can't copy data over for disk %d: %s",
5985 idx, result.fail_msg)
5986 errs.append(result.fail_msg)
5990 self.LogWarning("Some disks failed to copy, aborting")
5992 _RemoveDisks(self, instance, target_node=target_node)
5994 self.cfg.ReleaseDRBDMinors(instance.name)
5995 raise errors.OpExecError("Errors during disk copy: %s" %
5998 instance.primary_node = target_node
5999 self.cfg.Update(instance, feedback_fn)
6001 self.LogInfo("Removing the disks on the original node")
6002 _RemoveDisks(self, instance, target_node=source_node)
6004 # Only start the instance if it's marked as up
6005 if instance.admin_up:
6006 self.LogInfo("Starting instance %s on node %s",
6007 instance.name, target_node)
6009 disks_ok, _ = _AssembleInstanceDisks(self, instance,
6010 ignore_secondaries=True)
6012 _ShutdownInstanceDisks(self, instance)
6013 raise errors.OpExecError("Can't activate the instance's disks")
6015 result = self.rpc.call_instance_start(target_node, instance, None, None)
6016 msg = result.fail_msg
6018 _ShutdownInstanceDisks(self, instance)
6019 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6020 (instance.name, target_node, msg))
6023 class LUNodeMigrate(LogicalUnit):
6024 """Migrate all instances from a node.
6027 HPATH = "node-migrate"
6028 HTYPE = constants.HTYPE_NODE
6031 def ExpandNames(self):
6032 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6034 self.needed_locks = {
6035 locking.LEVEL_NODE: [self.op.node_name],
6038 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6040 # Create tasklets for migrating instances for all instances on this node
6044 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6045 logging.debug("Migrating instance %s", inst.name)
6046 names.append(inst.name)
6048 tasklets.append(TLMigrateInstance(self, inst.name, False))
6050 self.tasklets = tasklets
6052 # Declare instance locks
6053 self.needed_locks[locking.LEVEL_INSTANCE] = names
6055 def DeclareLocks(self, level):
6056 if level == locking.LEVEL_NODE:
6057 self._LockInstancesNodes()
6059 def BuildHooksEnv(self):
6062 This runs on the master, the primary and all the secondaries.
6066 "NODE_NAME": self.op.node_name,
6069 nl = [self.cfg.GetMasterNode()]
6071 return (env, nl, nl)
6074 class TLMigrateInstance(Tasklet):
6075 """Tasklet class for instance migration.
6078 @ivar live: whether the migration will be done live or non-live;
6079 this variable is initalized only after CheckPrereq has run
6082 def __init__(self, lu, instance_name, cleanup):
6083 """Initializes this class.
6086 Tasklet.__init__(self, lu)
6089 self.instance_name = instance_name
6090 self.cleanup = cleanup
6091 self.live = False # will be overridden later
6093 def CheckPrereq(self):
6094 """Check prerequisites.
6096 This checks that the instance is in the cluster.
6099 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6100 instance = self.cfg.GetInstanceInfo(instance_name)
6101 assert instance is not None
6103 if instance.disk_template != constants.DT_DRBD8:
6104 raise errors.OpPrereqError("Instance's disk layout is not"
6105 " drbd8, cannot migrate.", errors.ECODE_STATE)
6107 secondary_nodes = instance.secondary_nodes
6108 if not secondary_nodes:
6109 raise errors.ConfigurationError("No secondary node but using"
6110 " drbd8 disk template")
6112 i_be = self.cfg.GetClusterInfo().FillBE(instance)
6114 target_node = secondary_nodes[0]
6115 # check memory requirements on the secondary node
6116 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6117 instance.name, i_be[constants.BE_MEMORY],
6118 instance.hypervisor)
6120 # check bridge existance
6121 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6123 if not self.cleanup:
6124 _CheckNodeNotDrained(self.lu, target_node)
6125 result = self.rpc.call_instance_migratable(instance.primary_node,
6127 result.Raise("Can't migrate, please use failover",
6128 prereq=True, ecode=errors.ECODE_STATE)
6130 self.instance = instance
6132 if self.lu.op.live is not None and self.lu.op.mode is not None:
6133 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6134 " parameters are accepted",
6136 if self.lu.op.live is not None:
6138 self.lu.op.mode = constants.HT_MIGRATION_LIVE
6140 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6141 # reset the 'live' parameter to None so that repeated
6142 # invocations of CheckPrereq do not raise an exception
6143 self.lu.op.live = None
6144 elif self.lu.op.mode is None:
6145 # read the default value from the hypervisor
6146 i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6147 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6149 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6151 def _WaitUntilSync(self):
6152 """Poll with custom rpc for disk sync.
6154 This uses our own step-based rpc call.
6157 self.feedback_fn("* wait until resync is done")
6161 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6163 self.instance.disks)
6165 for node, nres in result.items():
6166 nres.Raise("Cannot resync disks on node %s" % node)
6167 node_done, node_percent = nres.payload
6168 all_done = all_done and node_done
6169 if node_percent is not None:
6170 min_percent = min(min_percent, node_percent)
6172 if min_percent < 100:
6173 self.feedback_fn(" - progress: %.1f%%" % min_percent)
6176 def _EnsureSecondary(self, node):
6177 """Demote a node to secondary.
6180 self.feedback_fn("* switching node %s to secondary mode" % node)
6182 for dev in self.instance.disks:
6183 self.cfg.SetDiskID(dev, node)
6185 result = self.rpc.call_blockdev_close(node, self.instance.name,
6186 self.instance.disks)
6187 result.Raise("Cannot change disk to secondary on node %s" % node)
6189 def _GoStandalone(self):
6190 """Disconnect from the network.
6193 self.feedback_fn("* changing into standalone mode")
6194 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6195 self.instance.disks)
6196 for node, nres in result.items():
6197 nres.Raise("Cannot disconnect disks node %s" % node)
6199 def _GoReconnect(self, multimaster):
6200 """Reconnect to the network.
6206 msg = "single-master"
6207 self.feedback_fn("* changing disks into %s mode" % msg)
6208 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6209 self.instance.disks,
6210 self.instance.name, multimaster)
6211 for node, nres in result.items():
6212 nres.Raise("Cannot change disks config on node %s" % node)
6214 def _ExecCleanup(self):
6215 """Try to cleanup after a failed migration.
6217 The cleanup is done by:
6218 - check that the instance is running only on one node
6219 (and update the config if needed)
6220 - change disks on its secondary node to secondary
6221 - wait until disks are fully synchronized
6222 - disconnect from the network
6223 - change disks into single-master mode
6224 - wait again until disks are fully synchronized
6227 instance = self.instance
6228 target_node = self.target_node
6229 source_node = self.source_node
6231 # check running on only one node
6232 self.feedback_fn("* checking where the instance actually runs"
6233 " (if this hangs, the hypervisor might be in"
6235 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6236 for node, result in ins_l.items():
6237 result.Raise("Can't contact node %s" % node)
6239 runningon_source = instance.name in ins_l[source_node].payload
6240 runningon_target = instance.name in ins_l[target_node].payload
6242 if runningon_source and runningon_target:
6243 raise errors.OpExecError("Instance seems to be running on two nodes,"
6244 " or the hypervisor is confused. You will have"
6245 " to ensure manually that it runs only on one"
6246 " and restart this operation.")
6248 if not (runningon_source or runningon_target):
6249 raise errors.OpExecError("Instance does not seem to be running at all."
6250 " In this case, it's safer to repair by"
6251 " running 'gnt-instance stop' to ensure disk"
6252 " shutdown, and then restarting it.")
6254 if runningon_target:
6255 # the migration has actually succeeded, we need to update the config
6256 self.feedback_fn("* instance running on secondary node (%s),"
6257 " updating config" % target_node)
6258 instance.primary_node = target_node
6259 self.cfg.Update(instance, self.feedback_fn)
6260 demoted_node = source_node
6262 self.feedback_fn("* instance confirmed to be running on its"
6263 " primary node (%s)" % source_node)
6264 demoted_node = target_node
6266 self._EnsureSecondary(demoted_node)
6268 self._WaitUntilSync()
6269 except errors.OpExecError:
6270 # we ignore here errors, since if the device is standalone, it
6271 # won't be able to sync
6273 self._GoStandalone()
6274 self._GoReconnect(False)
6275 self._WaitUntilSync()
6277 self.feedback_fn("* done")
6279 def _RevertDiskStatus(self):
6280 """Try to revert the disk status after a failed migration.
6283 target_node = self.target_node
6285 self._EnsureSecondary(target_node)
6286 self._GoStandalone()
6287 self._GoReconnect(False)
6288 self._WaitUntilSync()
6289 except errors.OpExecError, err:
6290 self.lu.LogWarning("Migration failed and I can't reconnect the"
6291 " drives: error '%s'\n"
6292 "Please look and recover the instance status" %
6295 def _AbortMigration(self):
6296 """Call the hypervisor code to abort a started migration.
6299 instance = self.instance
6300 target_node = self.target_node
6301 migration_info = self.migration_info
6303 abort_result = self.rpc.call_finalize_migration(target_node,
6307 abort_msg = abort_result.fail_msg
6309 logging.error("Aborting migration failed on target node %s: %s",
6310 target_node, abort_msg)
6311 # Don't raise an exception here, as we stil have to try to revert the
6312 # disk status, even if this step failed.
6314 def _ExecMigration(self):
6315 """Migrate an instance.
6317 The migrate is done by:
6318 - change the disks into dual-master mode
6319 - wait until disks are fully synchronized again
6320 - migrate the instance
6321 - change disks on the new secondary node (the old primary) to secondary
6322 - wait until disks are fully synchronized
6323 - change disks into single-master mode
6326 instance = self.instance
6327 target_node = self.target_node
6328 source_node = self.source_node
6330 self.feedback_fn("* checking disk consistency between source and target")
6331 for dev in instance.disks:
6332 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6333 raise errors.OpExecError("Disk %s is degraded or not fully"
6334 " synchronized on target node,"
6335 " aborting migrate." % dev.iv_name)
6337 # First get the migration information from the remote node
6338 result = self.rpc.call_migration_info(source_node, instance)
6339 msg = result.fail_msg
6341 log_err = ("Failed fetching source migration information from %s: %s" %
6343 logging.error(log_err)
6344 raise errors.OpExecError(log_err)
6346 self.migration_info = migration_info = result.payload
6348 # Then switch the disks to master/master mode
6349 self._EnsureSecondary(target_node)
6350 self._GoStandalone()
6351 self._GoReconnect(True)
6352 self._WaitUntilSync()
6354 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6355 result = self.rpc.call_accept_instance(target_node,
6358 self.nodes_ip[target_node])
6360 msg = result.fail_msg
6362 logging.error("Instance pre-migration failed, trying to revert"
6363 " disk status: %s", msg)
6364 self.feedback_fn("Pre-migration failed, aborting")
6365 self._AbortMigration()
6366 self._RevertDiskStatus()
6367 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6368 (instance.name, msg))
6370 self.feedback_fn("* migrating instance to %s" % target_node)
6372 result = self.rpc.call_instance_migrate(source_node, instance,
6373 self.nodes_ip[target_node],
6375 msg = result.fail_msg
6377 logging.error("Instance migration failed, trying to revert"
6378 " disk status: %s", msg)
6379 self.feedback_fn("Migration failed, aborting")
6380 self._AbortMigration()
6381 self._RevertDiskStatus()
6382 raise errors.OpExecError("Could not migrate instance %s: %s" %
6383 (instance.name, msg))
6386 instance.primary_node = target_node
6387 # distribute new instance config to the other nodes
6388 self.cfg.Update(instance, self.feedback_fn)
6390 result = self.rpc.call_finalize_migration(target_node,
6394 msg = result.fail_msg
6396 logging.error("Instance migration succeeded, but finalization failed:"
6398 raise errors.OpExecError("Could not finalize instance migration: %s" %
6401 self._EnsureSecondary(source_node)
6402 self._WaitUntilSync()
6403 self._GoStandalone()
6404 self._GoReconnect(False)
6405 self._WaitUntilSync()
6407 self.feedback_fn("* done")
6409 def Exec(self, feedback_fn):
6410 """Perform the migration.
6413 feedback_fn("Migrating instance %s" % self.instance.name)
6415 self.feedback_fn = feedback_fn
6417 self.source_node = self.instance.primary_node
6418 self.target_node = self.instance.secondary_nodes[0]
6419 self.all_nodes = [self.source_node, self.target_node]
6421 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6422 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6426 return self._ExecCleanup()
6428 return self._ExecMigration()
6431 def _CreateBlockDev(lu, node, instance, device, force_create,
6433 """Create a tree of block devices on a given node.
6435 If this device type has to be created on secondaries, create it and
6438 If not, just recurse to children keeping the same 'force' value.
6440 @param lu: the lu on whose behalf we execute
6441 @param node: the node on which to create the device
6442 @type instance: L{objects.Instance}
6443 @param instance: the instance which owns the device
6444 @type device: L{objects.Disk}
6445 @param device: the device to create
6446 @type force_create: boolean
6447 @param force_create: whether to force creation of this device; this
6448 will be change to True whenever we find a device which has
6449 CreateOnSecondary() attribute
6450 @param info: the extra 'metadata' we should attach to the device
6451 (this will be represented as a LVM tag)
6452 @type force_open: boolean
6453 @param force_open: this parameter will be passes to the
6454 L{backend.BlockdevCreate} function where it specifies
6455 whether we run on primary or not, and it affects both
6456 the child assembly and the device own Open() execution
6459 if device.CreateOnSecondary():
6463 for child in device.children:
6464 _CreateBlockDev(lu, node, instance, child, force_create,
6467 if not force_create:
6470 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6473 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6474 """Create a single block device on a given node.
6476 This will not recurse over children of the device, so they must be
6479 @param lu: the lu on whose behalf we execute
6480 @param node: the node on which to create the device
6481 @type instance: L{objects.Instance}
6482 @param instance: the instance which owns the device
6483 @type device: L{objects.Disk}
6484 @param device: the device to create
6485 @param info: the extra 'metadata' we should attach to the device
6486 (this will be represented as a LVM tag)
6487 @type force_open: boolean
6488 @param force_open: this parameter will be passes to the
6489 L{backend.BlockdevCreate} function where it specifies
6490 whether we run on primary or not, and it affects both
6491 the child assembly and the device own Open() execution
6494 lu.cfg.SetDiskID(device, node)
6495 result = lu.rpc.call_blockdev_create(node, device, device.size,
6496 instance.name, force_open, info)
6497 result.Raise("Can't create block device %s on"
6498 " node %s for instance %s" % (device, node, instance.name))
6499 if device.physical_id is None:
6500 device.physical_id = result.payload
6503 def _GenerateUniqueNames(lu, exts):
6504 """Generate a suitable LV name.
6506 This will generate a logical volume name for the given instance.
6511 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6512 results.append("%s%s" % (new_id, val))
6516 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6518 """Generate a drbd8 device complete with its children.
6521 port = lu.cfg.AllocatePort()
6522 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6523 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6524 logical_id=(vgname, names[0]))
6525 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6526 logical_id=(vgname, names[1]))
6527 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6528 logical_id=(primary, secondary, port,
6531 children=[dev_data, dev_meta],
6536 def _GenerateDiskTemplate(lu, template_name,
6537 instance_name, primary_node,
6538 secondary_nodes, disk_info,
6539 file_storage_dir, file_driver,
6540 base_index, feedback_fn):
6541 """Generate the entire disk layout for a given template type.
6544 #TODO: compute space requirements
6546 vgname = lu.cfg.GetVGName()
6547 disk_count = len(disk_info)
6549 if template_name == constants.DT_DISKLESS:
6551 elif template_name == constants.DT_PLAIN:
6552 if len(secondary_nodes) != 0:
6553 raise errors.ProgrammerError("Wrong template configuration")
6555 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6556 for i in range(disk_count)])
6557 for idx, disk in enumerate(disk_info):
6558 disk_index = idx + base_index
6559 vg = disk.get("vg", vgname)
6560 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6561 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6562 logical_id=(vg, names[idx]),
6563 iv_name="disk/%d" % disk_index,
6565 disks.append(disk_dev)
6566 elif template_name == constants.DT_DRBD8:
6567 if len(secondary_nodes) != 1:
6568 raise errors.ProgrammerError("Wrong template configuration")
6569 remote_node = secondary_nodes[0]
6570 minors = lu.cfg.AllocateDRBDMinor(
6571 [primary_node, remote_node] * len(disk_info), instance_name)
6574 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6575 for i in range(disk_count)]):
6576 names.append(lv_prefix + "_data")
6577 names.append(lv_prefix + "_meta")
6578 for idx, disk in enumerate(disk_info):
6579 disk_index = idx + base_index
6580 vg = disk.get("vg", vgname)
6581 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6582 disk["size"], vg, names[idx*2:idx*2+2],
6583 "disk/%d" % disk_index,
6584 minors[idx*2], minors[idx*2+1])
6585 disk_dev.mode = disk["mode"]
6586 disks.append(disk_dev)
6587 elif template_name == constants.DT_FILE:
6588 if len(secondary_nodes) != 0:
6589 raise errors.ProgrammerError("Wrong template configuration")
6591 opcodes.RequireFileStorage()
6593 for idx, disk in enumerate(disk_info):
6594 disk_index = idx + base_index
6595 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6596 iv_name="disk/%d" % disk_index,
6597 logical_id=(file_driver,
6598 "%s/disk%d" % (file_storage_dir,
6601 disks.append(disk_dev)
6603 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6607 def _GetInstanceInfoText(instance):
6608 """Compute that text that should be added to the disk's metadata.
6611 return "originstname+%s" % instance.name
6614 def _CalcEta(time_taken, written, total_size):
6615 """Calculates the ETA based on size written and total size.
6617 @param time_taken: The time taken so far
6618 @param written: amount written so far
6619 @param total_size: The total size of data to be written
6620 @return: The remaining time in seconds
6623 avg_time = time_taken / float(written)
6624 return (total_size - written) * avg_time
6627 def _WipeDisks(lu, instance):
6628 """Wipes instance disks.
6630 @type lu: L{LogicalUnit}
6631 @param lu: the logical unit on whose behalf we execute
6632 @type instance: L{objects.Instance}
6633 @param instance: the instance whose disks we should create
6634 @return: the success of the wipe
6637 node = instance.primary_node
6639 for device in instance.disks:
6640 lu.cfg.SetDiskID(device, node)
6642 logging.info("Pause sync of instance %s disks", instance.name)
6643 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6645 for idx, success in enumerate(result.payload):
6647 logging.warn("pause-sync of instance %s for disks %d failed",
6651 for idx, device in enumerate(instance.disks):
6652 lu.LogInfo("* Wiping disk %d", idx)
6653 logging.info("Wiping disk %d for instance %s, node %s",
6654 idx, instance.name, node)
6656 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6657 # MAX_WIPE_CHUNK at max
6658 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6659 constants.MIN_WIPE_CHUNK_PERCENT)
6664 start_time = time.time()
6666 while offset < size:
6667 wipe_size = min(wipe_chunk_size, size - offset)
6668 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6669 result.Raise("Could not wipe disk %d at offset %d for size %d" %
6670 (idx, offset, wipe_size))
6673 if now - last_output >= 60:
6674 eta = _CalcEta(now - start_time, offset, size)
6675 lu.LogInfo(" - done: %.1f%% ETA: %s" %
6676 (offset / float(size) * 100, utils.FormatSeconds(eta)))
6679 logging.info("Resume sync of instance %s disks", instance.name)
6681 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6683 for idx, success in enumerate(result.payload):
6685 lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6686 " look at the status and troubleshoot the issue.", idx)
6687 logging.warn("resume-sync of instance %s for disks %d failed",
6691 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6692 """Create all disks for an instance.
6694 This abstracts away some work from AddInstance.
6696 @type lu: L{LogicalUnit}
6697 @param lu: the logical unit on whose behalf we execute
6698 @type instance: L{objects.Instance}
6699 @param instance: the instance whose disks we should create
6701 @param to_skip: list of indices to skip
6702 @type target_node: string
6703 @param target_node: if passed, overrides the target node for creation
6705 @return: the success of the creation
6708 info = _GetInstanceInfoText(instance)
6709 if target_node is None:
6710 pnode = instance.primary_node
6711 all_nodes = instance.all_nodes
6716 if instance.disk_template == constants.DT_FILE:
6717 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6718 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6720 result.Raise("Failed to create directory '%s' on"
6721 " node %s" % (file_storage_dir, pnode))
6723 # Note: this needs to be kept in sync with adding of disks in
6724 # LUInstanceSetParams
6725 for idx, device in enumerate(instance.disks):
6726 if to_skip and idx in to_skip:
6728 logging.info("Creating volume %s for instance %s",
6729 device.iv_name, instance.name)
6731 for node in all_nodes:
6732 f_create = node == pnode
6733 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6736 def _RemoveDisks(lu, instance, target_node=None):
6737 """Remove all disks for an instance.
6739 This abstracts away some work from `AddInstance()` and
6740 `RemoveInstance()`. Note that in case some of the devices couldn't
6741 be removed, the removal will continue with the other ones (compare
6742 with `_CreateDisks()`).
6744 @type lu: L{LogicalUnit}
6745 @param lu: the logical unit on whose behalf we execute
6746 @type instance: L{objects.Instance}
6747 @param instance: the instance whose disks we should remove
6748 @type target_node: string
6749 @param target_node: used to override the node on which to remove the disks
6751 @return: the success of the removal
6754 logging.info("Removing block devices for instance %s", instance.name)
6757 for device in instance.disks:
6759 edata = [(target_node, device)]
6761 edata = device.ComputeNodeTree(instance.primary_node)
6762 for node, disk in edata:
6763 lu.cfg.SetDiskID(disk, node)
6764 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6766 lu.LogWarning("Could not remove block device %s on node %s,"
6767 " continuing anyway: %s", device.iv_name, node, msg)
6770 if instance.disk_template == constants.DT_FILE:
6771 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6775 tgt = instance.primary_node
6776 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6778 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6779 file_storage_dir, instance.primary_node, result.fail_msg)
6785 def _ComputeDiskSizePerVG(disk_template, disks):
6786 """Compute disk size requirements in the volume group
6789 def _compute(disks, payload):
6790 """Universal algorithm
6795 vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
6799 # Required free disk space as a function of disk and swap space
6801 constants.DT_DISKLESS: {},
6802 constants.DT_PLAIN: _compute(disks, 0),
6803 # 128 MB are added for drbd metadata for each disk
6804 constants.DT_DRBD8: _compute(disks, 128),
6805 constants.DT_FILE: {},
6808 if disk_template not in req_size_dict:
6809 raise errors.ProgrammerError("Disk template '%s' size requirement"
6810 " is unknown" % disk_template)
6812 return req_size_dict[disk_template]
6815 def _ComputeDiskSize(disk_template, disks):
6816 """Compute disk size requirements in the volume group
6819 # Required free disk space as a function of disk and swap space
6821 constants.DT_DISKLESS: None,
6822 constants.DT_PLAIN: sum(d["size"] for d in disks),
6823 # 128 MB are added for drbd metadata for each disk
6824 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6825 constants.DT_FILE: None,
6828 if disk_template not in req_size_dict:
6829 raise errors.ProgrammerError("Disk template '%s' size requirement"
6830 " is unknown" % disk_template)
6832 return req_size_dict[disk_template]
6835 def _FilterVmNodes(lu, nodenames):
6836 """Filters out non-vm_capable nodes from a list.
6838 @type lu: L{LogicalUnit}
6839 @param lu: the logical unit for which we check
6840 @type nodenames: list
6841 @param nodenames: the list of nodes on which we should check
6843 @return: the list of vm-capable nodes
6846 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
6847 return [name for name in nodenames if name not in vm_nodes]
6850 def _CheckHVParams(lu, nodenames, hvname, hvparams):
6851 """Hypervisor parameter validation.
6853 This function abstract the hypervisor parameter validation to be
6854 used in both instance create and instance modify.
6856 @type lu: L{LogicalUnit}
6857 @param lu: the logical unit for which we check
6858 @type nodenames: list
6859 @param nodenames: the list of nodes on which we should check
6860 @type hvname: string
6861 @param hvname: the name of the hypervisor we should use
6862 @type hvparams: dict
6863 @param hvparams: the parameters which we need to check
6864 @raise errors.OpPrereqError: if the parameters are not valid
6867 nodenames = _FilterVmNodes(lu, nodenames)
6868 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6871 for node in nodenames:
6875 info.Raise("Hypervisor parameter validation failed on node %s" % node)
6878 def _CheckOSParams(lu, required, nodenames, osname, osparams):
6879 """OS parameters validation.
6881 @type lu: L{LogicalUnit}
6882 @param lu: the logical unit for which we check
6883 @type required: boolean
6884 @param required: whether the validation should fail if the OS is not
6886 @type nodenames: list
6887 @param nodenames: the list of nodes on which we should check
6888 @type osname: string
6889 @param osname: the name of the hypervisor we should use
6890 @type osparams: dict
6891 @param osparams: the parameters which we need to check
6892 @raise errors.OpPrereqError: if the parameters are not valid
6895 nodenames = _FilterVmNodes(lu, nodenames)
6896 result = lu.rpc.call_os_validate(required, nodenames, osname,
6897 [constants.OS_VALIDATE_PARAMETERS],
6899 for node, nres in result.items():
6900 # we don't check for offline cases since this should be run only
6901 # against the master node and/or an instance's nodes
6902 nres.Raise("OS Parameters validation failed on node %s" % node)
6903 if not nres.payload:
6904 lu.LogInfo("OS %s not found on node %s, validation skipped",
6908 class LUInstanceCreate(LogicalUnit):
6909 """Create an instance.
6912 HPATH = "instance-add"
6913 HTYPE = constants.HTYPE_INSTANCE
6916 def CheckArguments(self):
6920 # do not require name_check to ease forward/backward compatibility
6922 if self.op.no_install and self.op.start:
6923 self.LogInfo("No-installation mode selected, disabling startup")
6924 self.op.start = False
6925 # validate/normalize the instance name
6926 self.op.instance_name = \
6927 netutils.Hostname.GetNormalizedName(self.op.instance_name)
6929 if self.op.ip_check and not self.op.name_check:
6930 # TODO: make the ip check more flexible and not depend on the name check
6931 raise errors.OpPrereqError("Cannot do ip check without a name check",
6934 # check nics' parameter names
6935 for nic in self.op.nics:
6936 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6938 # check disks. parameter names and consistent adopt/no-adopt strategy
6939 has_adopt = has_no_adopt = False
6940 for disk in self.op.disks:
6941 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6946 if has_adopt and has_no_adopt:
6947 raise errors.OpPrereqError("Either all disks are adopted or none is",
6950 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
6951 raise errors.OpPrereqError("Disk adoption is not supported for the"
6952 " '%s' disk template" %
6953 self.op.disk_template,
6955 if self.op.iallocator is not None:
6956 raise errors.OpPrereqError("Disk adoption not allowed with an"
6957 " iallocator script", errors.ECODE_INVAL)
6958 if self.op.mode == constants.INSTANCE_IMPORT:
6959 raise errors.OpPrereqError("Disk adoption not allowed for"
6960 " instance import", errors.ECODE_INVAL)
6962 self.adopt_disks = has_adopt
6964 # instance name verification
6965 if self.op.name_check:
6966 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
6967 self.op.instance_name = self.hostname1.name
6968 # used in CheckPrereq for ip ping check
6969 self.check_ip = self.hostname1.ip
6971 self.check_ip = None
6973 # file storage checks
6974 if (self.op.file_driver and
6975 not self.op.file_driver in constants.FILE_DRIVER):
6976 raise errors.OpPrereqError("Invalid file driver name '%s'" %
6977 self.op.file_driver, errors.ECODE_INVAL)
6979 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6980 raise errors.OpPrereqError("File storage directory path not absolute",
6983 ### Node/iallocator related checks
6984 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
6986 if self.op.pnode is not None:
6987 if self.op.disk_template in constants.DTS_NET_MIRROR:
6988 if self.op.snode is None:
6989 raise errors.OpPrereqError("The networked disk templates need"
6990 " a mirror node", errors.ECODE_INVAL)
6992 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
6994 self.op.snode = None
6996 self._cds = _GetClusterDomainSecret()
6998 if self.op.mode == constants.INSTANCE_IMPORT:
6999 # On import force_variant must be True, because if we forced it at
7000 # initial install, our only chance when importing it back is that it
7002 self.op.force_variant = True
7004 if self.op.no_install:
7005 self.LogInfo("No-installation mode has no effect during import")
7007 elif self.op.mode == constants.INSTANCE_CREATE:
7008 if self.op.os_type is None:
7009 raise errors.OpPrereqError("No guest OS specified",
7011 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7012 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7013 " installation" % self.op.os_type,
7015 if self.op.disk_template is None:
7016 raise errors.OpPrereqError("No disk template specified",
7019 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7020 # Check handshake to ensure both clusters have the same domain secret
7021 src_handshake = self.op.source_handshake
7022 if not src_handshake:
7023 raise errors.OpPrereqError("Missing source handshake",
7026 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7029 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7032 # Load and check source CA
7033 self.source_x509_ca_pem = self.op.source_x509_ca
7034 if not self.source_x509_ca_pem:
7035 raise errors.OpPrereqError("Missing source X509 CA",
7039 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7041 except OpenSSL.crypto.Error, err:
7042 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7043 (err, ), errors.ECODE_INVAL)
7045 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7046 if errcode is not None:
7047 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7050 self.source_x509_ca = cert
7052 src_instance_name = self.op.source_instance_name
7053 if not src_instance_name:
7054 raise errors.OpPrereqError("Missing source instance name",
7057 self.source_instance_name = \
7058 netutils.GetHostname(name=src_instance_name).name
7061 raise errors.OpPrereqError("Invalid instance creation mode %r" %
7062 self.op.mode, errors.ECODE_INVAL)
7064 def ExpandNames(self):
7065 """ExpandNames for CreateInstance.
7067 Figure out the right locks for instance creation.
7070 self.needed_locks = {}
7072 instance_name = self.op.instance_name
7073 # this is just a preventive check, but someone might still add this
7074 # instance in the meantime, and creation will fail at lock-add time
7075 if instance_name in self.cfg.GetInstanceList():
7076 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7077 instance_name, errors.ECODE_EXISTS)
7079 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7081 if self.op.iallocator:
7082 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7084 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7085 nodelist = [self.op.pnode]
7086 if self.op.snode is not None:
7087 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7088 nodelist.append(self.op.snode)
7089 self.needed_locks[locking.LEVEL_NODE] = nodelist
7091 # in case of import lock the source node too
7092 if self.op.mode == constants.INSTANCE_IMPORT:
7093 src_node = self.op.src_node
7094 src_path = self.op.src_path
7096 if src_path is None:
7097 self.op.src_path = src_path = self.op.instance_name
7099 if src_node is None:
7100 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7101 self.op.src_node = None
7102 if os.path.isabs(src_path):
7103 raise errors.OpPrereqError("Importing an instance from an absolute"
7104 " path requires a source node option.",
7107 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7108 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7109 self.needed_locks[locking.LEVEL_NODE].append(src_node)
7110 if not os.path.isabs(src_path):
7111 self.op.src_path = src_path = \
7112 utils.PathJoin(constants.EXPORT_DIR, src_path)
7114 def _RunAllocator(self):
7115 """Run the allocator based on input opcode.
7118 nics = [n.ToDict() for n in self.nics]
7119 ial = IAllocator(self.cfg, self.rpc,
7120 mode=constants.IALLOCATOR_MODE_ALLOC,
7121 name=self.op.instance_name,
7122 disk_template=self.op.disk_template,
7125 vcpus=self.be_full[constants.BE_VCPUS],
7126 mem_size=self.be_full[constants.BE_MEMORY],
7129 hypervisor=self.op.hypervisor,
7132 ial.Run(self.op.iallocator)
7135 raise errors.OpPrereqError("Can't compute nodes using"
7136 " iallocator '%s': %s" %
7137 (self.op.iallocator, ial.info),
7139 if len(ial.result) != ial.required_nodes:
7140 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7141 " of nodes (%s), required %s" %
7142 (self.op.iallocator, len(ial.result),
7143 ial.required_nodes), errors.ECODE_FAULT)
7144 self.op.pnode = ial.result[0]
7145 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7146 self.op.instance_name, self.op.iallocator,
7147 utils.CommaJoin(ial.result))
7148 if ial.required_nodes == 2:
7149 self.op.snode = ial.result[1]
7151 def BuildHooksEnv(self):
7154 This runs on master, primary and secondary nodes of the instance.
7158 "ADD_MODE": self.op.mode,
7160 if self.op.mode == constants.INSTANCE_IMPORT:
7161 env["SRC_NODE"] = self.op.src_node
7162 env["SRC_PATH"] = self.op.src_path
7163 env["SRC_IMAGES"] = self.src_images
7165 env.update(_BuildInstanceHookEnv(
7166 name=self.op.instance_name,
7167 primary_node=self.op.pnode,
7168 secondary_nodes=self.secondaries,
7169 status=self.op.start,
7170 os_type=self.op.os_type,
7171 memory=self.be_full[constants.BE_MEMORY],
7172 vcpus=self.be_full[constants.BE_VCPUS],
7173 nics=_NICListToTuple(self, self.nics),
7174 disk_template=self.op.disk_template,
7175 disks=[(d["size"], d["mode"]) for d in self.disks],
7178 hypervisor_name=self.op.hypervisor,
7181 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7185 def _ReadExportInfo(self):
7186 """Reads the export information from disk.
7188 It will override the opcode source node and path with the actual
7189 information, if these two were not specified before.
7191 @return: the export information
7194 assert self.op.mode == constants.INSTANCE_IMPORT
7196 src_node = self.op.src_node
7197 src_path = self.op.src_path
7199 if src_node is None:
7200 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7201 exp_list = self.rpc.call_export_list(locked_nodes)
7203 for node in exp_list:
7204 if exp_list[node].fail_msg:
7206 if src_path in exp_list[node].payload:
7208 self.op.src_node = src_node = node
7209 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7213 raise errors.OpPrereqError("No export found for relative path %s" %
7214 src_path, errors.ECODE_INVAL)
7216 _CheckNodeOnline(self, src_node)
7217 result = self.rpc.call_export_info(src_node, src_path)
7218 result.Raise("No export or invalid export found in dir %s" % src_path)
7220 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7221 if not export_info.has_section(constants.INISECT_EXP):
7222 raise errors.ProgrammerError("Corrupted export config",
7223 errors.ECODE_ENVIRON)
7225 ei_version = export_info.get(constants.INISECT_EXP, "version")
7226 if (int(ei_version) != constants.EXPORT_VERSION):
7227 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7228 (ei_version, constants.EXPORT_VERSION),
7229 errors.ECODE_ENVIRON)
7232 def _ReadExportParams(self, einfo):
7233 """Use export parameters as defaults.
7235 In case the opcode doesn't specify (as in override) some instance
7236 parameters, then try to use them from the export information, if
7240 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7242 if self.op.disk_template is None:
7243 if einfo.has_option(constants.INISECT_INS, "disk_template"):
7244 self.op.disk_template = einfo.get(constants.INISECT_INS,
7247 raise errors.OpPrereqError("No disk template specified and the export"
7248 " is missing the disk_template information",
7251 if not self.op.disks:
7252 if einfo.has_option(constants.INISECT_INS, "disk_count"):
7254 # TODO: import the disk iv_name too
7255 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7256 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7257 disks.append({"size": disk_sz})
7258 self.op.disks = disks
7260 raise errors.OpPrereqError("No disk info specified and the export"
7261 " is missing the disk information",
7264 if (not self.op.nics and
7265 einfo.has_option(constants.INISECT_INS, "nic_count")):
7267 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7269 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7270 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7275 if (self.op.hypervisor is None and
7276 einfo.has_option(constants.INISECT_INS, "hypervisor")):
7277 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7278 if einfo.has_section(constants.INISECT_HYP):
7279 # use the export parameters but do not override the ones
7280 # specified by the user
7281 for name, value in einfo.items(constants.INISECT_HYP):
7282 if name not in self.op.hvparams:
7283 self.op.hvparams[name] = value
7285 if einfo.has_section(constants.INISECT_BEP):
7286 # use the parameters, without overriding
7287 for name, value in einfo.items(constants.INISECT_BEP):
7288 if name not in self.op.beparams:
7289 self.op.beparams[name] = value
7291 # try to read the parameters old style, from the main section
7292 for name in constants.BES_PARAMETERS:
7293 if (name not in self.op.beparams and
7294 einfo.has_option(constants.INISECT_INS, name)):
7295 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7297 if einfo.has_section(constants.INISECT_OSP):
7298 # use the parameters, without overriding
7299 for name, value in einfo.items(constants.INISECT_OSP):
7300 if name not in self.op.osparams:
7301 self.op.osparams[name] = value
7303 def _RevertToDefaults(self, cluster):
7304 """Revert the instance parameters to the default values.
7308 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7309 for name in self.op.hvparams.keys():
7310 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7311 del self.op.hvparams[name]
7313 be_defs = cluster.SimpleFillBE({})
7314 for name in self.op.beparams.keys():
7315 if name in be_defs and be_defs[name] == self.op.beparams[name]:
7316 del self.op.beparams[name]
7318 nic_defs = cluster.SimpleFillNIC({})
7319 for nic in self.op.nics:
7320 for name in constants.NICS_PARAMETERS:
7321 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7324 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7325 for name in self.op.osparams.keys():
7326 if name in os_defs and os_defs[name] == self.op.osparams[name]:
7327 del self.op.osparams[name]
7329 def CheckPrereq(self):
7330 """Check prerequisites.
7333 if self.op.mode == constants.INSTANCE_IMPORT:
7334 export_info = self._ReadExportInfo()
7335 self._ReadExportParams(export_info)
7337 if (not self.cfg.GetVGName() and
7338 self.op.disk_template not in constants.DTS_NOT_LVM):
7339 raise errors.OpPrereqError("Cluster does not support lvm-based"
7340 " instances", errors.ECODE_STATE)
7342 if self.op.hypervisor is None:
7343 self.op.hypervisor = self.cfg.GetHypervisorType()
7345 cluster = self.cfg.GetClusterInfo()
7346 enabled_hvs = cluster.enabled_hypervisors
7347 if self.op.hypervisor not in enabled_hvs:
7348 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7349 " cluster (%s)" % (self.op.hypervisor,
7350 ",".join(enabled_hvs)),
7353 # check hypervisor parameter syntax (locally)
7354 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7355 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7357 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7358 hv_type.CheckParameterSyntax(filled_hvp)
7359 self.hv_full = filled_hvp
7360 # check that we don't specify global parameters on an instance
7361 _CheckGlobalHvParams(self.op.hvparams)
7363 # fill and remember the beparams dict
7364 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7365 self.be_full = cluster.SimpleFillBE(self.op.beparams)
7367 # build os parameters
7368 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7370 # now that hvp/bep are in final format, let's reset to defaults,
7372 if self.op.identify_defaults:
7373 self._RevertToDefaults(cluster)
7377 for idx, nic in enumerate(self.op.nics):
7378 nic_mode_req = nic.get("mode", None)
7379 nic_mode = nic_mode_req
7380 if nic_mode is None:
7381 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7383 # in routed mode, for the first nic, the default ip is 'auto'
7384 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7385 default_ip_mode = constants.VALUE_AUTO
7387 default_ip_mode = constants.VALUE_NONE
7389 # ip validity checks
7390 ip = nic.get("ip", default_ip_mode)
7391 if ip is None or ip.lower() == constants.VALUE_NONE:
7393 elif ip.lower() == constants.VALUE_AUTO:
7394 if not self.op.name_check:
7395 raise errors.OpPrereqError("IP address set to auto but name checks"
7396 " have been skipped",
7398 nic_ip = self.hostname1.ip
7400 if not netutils.IPAddress.IsValid(ip):
7401 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7405 # TODO: check the ip address for uniqueness
7406 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7407 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7410 # MAC address verification
7411 mac = nic.get("mac", constants.VALUE_AUTO)
7412 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7413 mac = utils.NormalizeAndValidateMac(mac)
7416 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7417 except errors.ReservationError:
7418 raise errors.OpPrereqError("MAC address %s already in use"
7419 " in cluster" % mac,
7420 errors.ECODE_NOTUNIQUE)
7422 # bridge verification
7423 bridge = nic.get("bridge", None)
7424 link = nic.get("link", None)
7426 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7427 " at the same time", errors.ECODE_INVAL)
7428 elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7429 raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7436 nicparams[constants.NIC_MODE] = nic_mode_req
7438 nicparams[constants.NIC_LINK] = link
7440 check_params = cluster.SimpleFillNIC(nicparams)
7441 objects.NIC.CheckParameterSyntax(check_params)
7442 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7444 # disk checks/pre-build
7446 for disk in self.op.disks:
7447 mode = disk.get("mode", constants.DISK_RDWR)
7448 if mode not in constants.DISK_ACCESS_SET:
7449 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7450 mode, errors.ECODE_INVAL)
7451 size = disk.get("size", None)
7453 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7456 except (TypeError, ValueError):
7457 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7459 vg = disk.get("vg", self.cfg.GetVGName())
7460 new_disk = {"size": size, "mode": mode, "vg": vg}
7462 new_disk["adopt"] = disk["adopt"]
7463 self.disks.append(new_disk)
7465 if self.op.mode == constants.INSTANCE_IMPORT:
7467 # Check that the new instance doesn't have less disks than the export
7468 instance_disks = len(self.disks)
7469 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7470 if instance_disks < export_disks:
7471 raise errors.OpPrereqError("Not enough disks to import."
7472 " (instance: %d, export: %d)" %
7473 (instance_disks, export_disks),
7477 for idx in range(export_disks):
7478 option = 'disk%d_dump' % idx
7479 if export_info.has_option(constants.INISECT_INS, option):
7480 # FIXME: are the old os-es, disk sizes, etc. useful?
7481 export_name = export_info.get(constants.INISECT_INS, option)
7482 image = utils.PathJoin(self.op.src_path, export_name)
7483 disk_images.append(image)
7485 disk_images.append(False)
7487 self.src_images = disk_images
7489 old_name = export_info.get(constants.INISECT_INS, 'name')
7491 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7492 except (TypeError, ValueError), err:
7493 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7494 " an integer: %s" % str(err),
7496 if self.op.instance_name == old_name:
7497 for idx, nic in enumerate(self.nics):
7498 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7499 nic_mac_ini = 'nic%d_mac' % idx
7500 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7502 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7504 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7505 if self.op.ip_check:
7506 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7507 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7508 (self.check_ip, self.op.instance_name),
7509 errors.ECODE_NOTUNIQUE)
7511 #### mac address generation
7512 # By generating here the mac address both the allocator and the hooks get
7513 # the real final mac address rather than the 'auto' or 'generate' value.
7514 # There is a race condition between the generation and the instance object
7515 # creation, which means that we know the mac is valid now, but we're not
7516 # sure it will be when we actually add the instance. If things go bad
7517 # adding the instance will abort because of a duplicate mac, and the
7518 # creation job will fail.
7519 for nic in self.nics:
7520 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7521 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7525 if self.op.iallocator is not None:
7526 self._RunAllocator()
7528 #### node related checks
7530 # check primary node
7531 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7532 assert self.pnode is not None, \
7533 "Cannot retrieve locked node %s" % self.op.pnode
7535 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7536 pnode.name, errors.ECODE_STATE)
7538 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7539 pnode.name, errors.ECODE_STATE)
7540 if not pnode.vm_capable:
7541 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7542 " '%s'" % pnode.name, errors.ECODE_STATE)
7544 self.secondaries = []
7546 # mirror node verification
7547 if self.op.disk_template in constants.DTS_NET_MIRROR:
7548 if self.op.snode == pnode.name:
7549 raise errors.OpPrereqError("The secondary node cannot be the"
7550 " primary node.", errors.ECODE_INVAL)
7551 _CheckNodeOnline(self, self.op.snode)
7552 _CheckNodeNotDrained(self, self.op.snode)
7553 _CheckNodeVmCapable(self, self.op.snode)
7554 self.secondaries.append(self.op.snode)
7556 nodenames = [pnode.name] + self.secondaries
7558 if not self.adopt_disks:
7559 # Check lv size requirements, if not adopting
7560 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7561 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7563 else: # instead, we must check the adoption data
7564 all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7565 if len(all_lvs) != len(self.disks):
7566 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7568 for lv_name in all_lvs:
7570 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7571 # to ReserveLV uses the same syntax
7572 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7573 except errors.ReservationError:
7574 raise errors.OpPrereqError("LV named %s used by another instance" %
7575 lv_name, errors.ECODE_NOTUNIQUE)
7577 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7578 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7580 node_lvs = self.rpc.call_lv_list([pnode.name],
7581 vg_names.payload.keys())[pnode.name]
7582 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7583 node_lvs = node_lvs.payload
7585 delta = all_lvs.difference(node_lvs.keys())
7587 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7588 utils.CommaJoin(delta),
7590 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7592 raise errors.OpPrereqError("Online logical volumes found, cannot"
7593 " adopt: %s" % utils.CommaJoin(online_lvs),
7595 # update the size of disk based on what is found
7596 for dsk in self.disks:
7597 dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7599 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7601 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7602 # check OS parameters (remotely)
7603 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7605 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7607 # memory check on primary node
7609 _CheckNodeFreeMemory(self, self.pnode.name,
7610 "creating instance %s" % self.op.instance_name,
7611 self.be_full[constants.BE_MEMORY],
7614 self.dry_run_result = list(nodenames)
7616 def Exec(self, feedback_fn):
7617 """Create and add the instance to the cluster.
7620 instance = self.op.instance_name
7621 pnode_name = self.pnode.name
7623 ht_kind = self.op.hypervisor
7624 if ht_kind in constants.HTS_REQ_PORT:
7625 network_port = self.cfg.AllocatePort()
7629 if constants.ENABLE_FILE_STORAGE:
7630 # this is needed because os.path.join does not accept None arguments
7631 if self.op.file_storage_dir is None:
7632 string_file_storage_dir = ""
7634 string_file_storage_dir = self.op.file_storage_dir
7636 # build the full file storage dir path
7637 file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7638 string_file_storage_dir, instance)
7640 file_storage_dir = ""
7642 disks = _GenerateDiskTemplate(self,
7643 self.op.disk_template,
7644 instance, pnode_name,
7648 self.op.file_driver,
7652 iobj = objects.Instance(name=instance, os=self.op.os_type,
7653 primary_node=pnode_name,
7654 nics=self.nics, disks=disks,
7655 disk_template=self.op.disk_template,
7657 network_port=network_port,
7658 beparams=self.op.beparams,
7659 hvparams=self.op.hvparams,
7660 hypervisor=self.op.hypervisor,
7661 osparams=self.op.osparams,
7664 if self.adopt_disks:
7665 # rename LVs to the newly-generated names; we need to construct
7666 # 'fake' LV disks with the old data, plus the new unique_id
7667 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7669 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7670 rename_to.append(t_dsk.logical_id)
7671 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7672 self.cfg.SetDiskID(t_dsk, pnode_name)
7673 result = self.rpc.call_blockdev_rename(pnode_name,
7674 zip(tmp_disks, rename_to))
7675 result.Raise("Failed to rename adoped LVs")
7677 feedback_fn("* creating instance disks...")
7679 _CreateDisks(self, iobj)
7680 except errors.OpExecError:
7681 self.LogWarning("Device creation failed, reverting...")
7683 _RemoveDisks(self, iobj)
7685 self.cfg.ReleaseDRBDMinors(instance)
7688 if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7689 feedback_fn("* wiping instance disks...")
7691 _WipeDisks(self, iobj)
7692 except errors.OpExecError:
7693 self.LogWarning("Device wiping failed, reverting...")
7695 _RemoveDisks(self, iobj)
7697 self.cfg.ReleaseDRBDMinors(instance)
7700 feedback_fn("adding instance %s to cluster config" % instance)
7702 self.cfg.AddInstance(iobj, self.proc.GetECId())
7704 # Declare that we don't want to remove the instance lock anymore, as we've
7705 # added the instance to the config
7706 del self.remove_locks[locking.LEVEL_INSTANCE]
7707 # Unlock all the nodes
7708 if self.op.mode == constants.INSTANCE_IMPORT:
7709 nodes_keep = [self.op.src_node]
7710 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7711 if node != self.op.src_node]
7712 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7713 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7715 self.context.glm.release(locking.LEVEL_NODE)
7716 del self.acquired_locks[locking.LEVEL_NODE]
7718 if self.op.wait_for_sync:
7719 disk_abort = not _WaitForSync(self, iobj)
7720 elif iobj.disk_template in constants.DTS_NET_MIRROR:
7721 # make sure the disks are not degraded (still sync-ing is ok)
7723 feedback_fn("* checking mirrors status")
7724 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7729 _RemoveDisks(self, iobj)
7730 self.cfg.RemoveInstance(iobj.name)
7731 # Make sure the instance lock gets removed
7732 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7733 raise errors.OpExecError("There are some degraded disks for"
7736 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7737 if self.op.mode == constants.INSTANCE_CREATE:
7738 if not self.op.no_install:
7739 feedback_fn("* running the instance OS create scripts...")
7740 # FIXME: pass debug option from opcode to backend
7741 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7742 self.op.debug_level)
7743 result.Raise("Could not add os for instance %s"
7744 " on node %s" % (instance, pnode_name))
7746 elif self.op.mode == constants.INSTANCE_IMPORT:
7747 feedback_fn("* running the instance OS import scripts...")
7751 for idx, image in enumerate(self.src_images):
7755 # FIXME: pass debug option from opcode to backend
7756 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7757 constants.IEIO_FILE, (image, ),
7758 constants.IEIO_SCRIPT,
7759 (iobj.disks[idx], idx),
7761 transfers.append(dt)
7764 masterd.instance.TransferInstanceData(self, feedback_fn,
7765 self.op.src_node, pnode_name,
7766 self.pnode.secondary_ip,
7768 if not compat.all(import_result):
7769 self.LogWarning("Some disks for instance %s on node %s were not"
7770 " imported successfully" % (instance, pnode_name))
7772 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7773 feedback_fn("* preparing remote import...")
7774 # The source cluster will stop the instance before attempting to make a
7775 # connection. In some cases stopping an instance can take a long time,
7776 # hence the shutdown timeout is added to the connection timeout.
7777 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
7778 self.op.source_shutdown_timeout)
7779 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7781 assert iobj.primary_node == self.pnode.name
7783 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
7784 self.source_x509_ca,
7785 self._cds, timeouts)
7786 if not compat.all(disk_results):
7787 # TODO: Should the instance still be started, even if some disks
7788 # failed to import (valid for local imports, too)?
7789 self.LogWarning("Some disks for instance %s on node %s were not"
7790 " imported successfully" % (instance, pnode_name))
7792 # Run rename script on newly imported instance
7793 assert iobj.name == instance
7794 feedback_fn("Running rename script for %s" % instance)
7795 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7796 self.source_instance_name,
7797 self.op.debug_level)
7799 self.LogWarning("Failed to run rename script for %s on node"
7800 " %s: %s" % (instance, pnode_name, result.fail_msg))
7803 # also checked in the prereq part
7804 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7808 iobj.admin_up = True
7809 self.cfg.Update(iobj, feedback_fn)
7810 logging.info("Starting instance %s on node %s", instance, pnode_name)
7811 feedback_fn("* starting instance...")
7812 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7813 result.Raise("Could not start instance")
7815 return list(iobj.all_nodes)
7818 class LUInstanceConsole(NoHooksLU):
7819 """Connect to an instance's console.
7821 This is somewhat special in that it returns the command line that
7822 you need to run on the master node in order to connect to the
7828 def ExpandNames(self):
7829 self._ExpandAndLockInstance()
7831 def CheckPrereq(self):
7832 """Check prerequisites.
7834 This checks that the instance is in the cluster.
7837 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7838 assert self.instance is not None, \
7839 "Cannot retrieve locked instance %s" % self.op.instance_name
7840 _CheckNodeOnline(self, self.instance.primary_node)
7842 def Exec(self, feedback_fn):
7843 """Connect to the console of an instance
7846 instance = self.instance
7847 node = instance.primary_node
7849 node_insts = self.rpc.call_instance_list([node],
7850 [instance.hypervisor])[node]
7851 node_insts.Raise("Can't get node information from %s" % node)
7853 if instance.name not in node_insts.payload:
7854 if instance.admin_up:
7855 state = "ERROR_down"
7857 state = "ADMIN_down"
7858 raise errors.OpExecError("Instance %s is not running (state %s)" %
7859 (instance.name, state))
7861 logging.debug("Connecting to console of %s on %s", instance.name, node)
7863 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
7866 def _GetInstanceConsole(cluster, instance):
7867 """Returns console information for an instance.
7869 @type cluster: L{objects.Cluster}
7870 @type instance: L{objects.Instance}
7874 hyper = hypervisor.GetHypervisor(instance.hypervisor)
7875 # beparams and hvparams are passed separately, to avoid editing the
7876 # instance and then saving the defaults in the instance itself.
7877 hvparams = cluster.FillHV(instance)
7878 beparams = cluster.FillBE(instance)
7879 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
7881 assert console.instance == instance.name
7882 assert console.Validate()
7884 return console.ToDict()
7887 class LUInstanceReplaceDisks(LogicalUnit):
7888 """Replace the disks of an instance.
7891 HPATH = "mirrors-replace"
7892 HTYPE = constants.HTYPE_INSTANCE
7895 def CheckArguments(self):
7896 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7899 def ExpandNames(self):
7900 self._ExpandAndLockInstance()
7902 if self.op.iallocator is not None:
7903 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7905 elif self.op.remote_node is not None:
7906 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7907 self.op.remote_node = remote_node
7909 # Warning: do not remove the locking of the new secondary here
7910 # unless DRBD8.AddChildren is changed to work in parallel;
7911 # currently it doesn't since parallel invocations of
7912 # FindUnusedMinor will conflict
7913 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7914 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7917 self.needed_locks[locking.LEVEL_NODE] = []
7918 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7920 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7921 self.op.iallocator, self.op.remote_node,
7922 self.op.disks, False, self.op.early_release)
7924 self.tasklets = [self.replacer]
7926 def DeclareLocks(self, level):
7927 # If we're not already locking all nodes in the set we have to declare the
7928 # instance's primary/secondary nodes.
7929 if (level == locking.LEVEL_NODE and
7930 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7931 self._LockInstancesNodes()
7933 def BuildHooksEnv(self):
7936 This runs on the master, the primary and all the secondaries.
7939 instance = self.replacer.instance
7941 "MODE": self.op.mode,
7942 "NEW_SECONDARY": self.op.remote_node,
7943 "OLD_SECONDARY": instance.secondary_nodes[0],
7945 env.update(_BuildInstanceHookEnvByObject(self, instance))
7947 self.cfg.GetMasterNode(),
7948 instance.primary_node,
7950 if self.op.remote_node is not None:
7951 nl.append(self.op.remote_node)
7955 class TLReplaceDisks(Tasklet):
7956 """Replaces disks for an instance.
7958 Note: Locking is not within the scope of this class.
7961 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7962 disks, delay_iallocator, early_release):
7963 """Initializes this class.
7966 Tasklet.__init__(self, lu)
7969 self.instance_name = instance_name
7971 self.iallocator_name = iallocator_name
7972 self.remote_node = remote_node
7974 self.delay_iallocator = delay_iallocator
7975 self.early_release = early_release
7978 self.instance = None
7979 self.new_node = None
7980 self.target_node = None
7981 self.other_node = None
7982 self.remote_node_info = None
7983 self.node_secondary_ip = None
7986 def CheckArguments(mode, remote_node, iallocator):
7987 """Helper function for users of this class.
7990 # check for valid parameter combination
7991 if mode == constants.REPLACE_DISK_CHG:
7992 if remote_node is None and iallocator is None:
7993 raise errors.OpPrereqError("When changing the secondary either an"
7994 " iallocator script must be used or the"
7995 " new node given", errors.ECODE_INVAL)
7997 if remote_node is not None and iallocator is not None:
7998 raise errors.OpPrereqError("Give either the iallocator or the new"
7999 " secondary, not both", errors.ECODE_INVAL)
8001 elif remote_node is not None or iallocator is not None:
8002 # Not replacing the secondary
8003 raise errors.OpPrereqError("The iallocator and new node options can"
8004 " only be used when changing the"
8005 " secondary node", errors.ECODE_INVAL)
8008 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8009 """Compute a new secondary node using an IAllocator.
8012 ial = IAllocator(lu.cfg, lu.rpc,
8013 mode=constants.IALLOCATOR_MODE_RELOC,
8015 relocate_from=relocate_from)
8017 ial.Run(iallocator_name)
8020 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8021 " %s" % (iallocator_name, ial.info),
8024 if len(ial.result) != ial.required_nodes:
8025 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8026 " of nodes (%s), required %s" %
8028 len(ial.result), ial.required_nodes),
8031 remote_node_name = ial.result[0]
8033 lu.LogInfo("Selected new secondary for instance '%s': %s",
8034 instance_name, remote_node_name)
8036 return remote_node_name
8038 def _FindFaultyDisks(self, node_name):
8039 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8042 def CheckPrereq(self):
8043 """Check prerequisites.
8045 This checks that the instance is in the cluster.
8048 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8049 assert instance is not None, \
8050 "Cannot retrieve locked instance %s" % self.instance_name
8052 if instance.disk_template != constants.DT_DRBD8:
8053 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8054 " instances", errors.ECODE_INVAL)
8056 if len(instance.secondary_nodes) != 1:
8057 raise errors.OpPrereqError("The instance has a strange layout,"
8058 " expected one secondary but found %d" %
8059 len(instance.secondary_nodes),
8062 if not self.delay_iallocator:
8063 self._CheckPrereq2()
8065 def _CheckPrereq2(self):
8066 """Check prerequisites, second part.
8068 This function should always be part of CheckPrereq. It was separated and is
8069 now called from Exec because during node evacuation iallocator was only
8070 called with an unmodified cluster model, not taking planned changes into
8074 instance = self.instance
8075 secondary_node = instance.secondary_nodes[0]
8077 if self.iallocator_name is None:
8078 remote_node = self.remote_node
8080 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8081 instance.name, instance.secondary_nodes)
8083 if remote_node is not None:
8084 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8085 assert self.remote_node_info is not None, \
8086 "Cannot retrieve locked node %s" % remote_node
8088 self.remote_node_info = None
8090 if remote_node == self.instance.primary_node:
8091 raise errors.OpPrereqError("The specified node is the primary node of"
8092 " the instance.", errors.ECODE_INVAL)
8094 if remote_node == secondary_node:
8095 raise errors.OpPrereqError("The specified node is already the"
8096 " secondary node of the instance.",
8099 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8100 constants.REPLACE_DISK_CHG):
8101 raise errors.OpPrereqError("Cannot specify disks to be replaced",
8104 if self.mode == constants.REPLACE_DISK_AUTO:
8105 faulty_primary = self._FindFaultyDisks(instance.primary_node)
8106 faulty_secondary = self._FindFaultyDisks(secondary_node)
8108 if faulty_primary and faulty_secondary:
8109 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8110 " one node and can not be repaired"
8111 " automatically" % self.instance_name,
8115 self.disks = faulty_primary
8116 self.target_node = instance.primary_node
8117 self.other_node = secondary_node
8118 check_nodes = [self.target_node, self.other_node]
8119 elif faulty_secondary:
8120 self.disks = faulty_secondary
8121 self.target_node = secondary_node
8122 self.other_node = instance.primary_node
8123 check_nodes = [self.target_node, self.other_node]
8129 # Non-automatic modes
8130 if self.mode == constants.REPLACE_DISK_PRI:
8131 self.target_node = instance.primary_node
8132 self.other_node = secondary_node
8133 check_nodes = [self.target_node, self.other_node]
8135 elif self.mode == constants.REPLACE_DISK_SEC:
8136 self.target_node = secondary_node
8137 self.other_node = instance.primary_node
8138 check_nodes = [self.target_node, self.other_node]
8140 elif self.mode == constants.REPLACE_DISK_CHG:
8141 self.new_node = remote_node
8142 self.other_node = instance.primary_node
8143 self.target_node = secondary_node
8144 check_nodes = [self.new_node, self.other_node]
8146 _CheckNodeNotDrained(self.lu, remote_node)
8147 _CheckNodeVmCapable(self.lu, remote_node)
8149 old_node_info = self.cfg.GetNodeInfo(secondary_node)
8150 assert old_node_info is not None
8151 if old_node_info.offline and not self.early_release:
8152 # doesn't make sense to delay the release
8153 self.early_release = True
8154 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8155 " early-release mode", secondary_node)
8158 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8161 # If not specified all disks should be replaced
8163 self.disks = range(len(self.instance.disks))
8165 for node in check_nodes:
8166 _CheckNodeOnline(self.lu, node)
8168 # Check whether disks are valid
8169 for disk_idx in self.disks:
8170 instance.FindDisk(disk_idx)
8172 # Get secondary node IP addresses
8175 for node_name in [self.target_node, self.other_node, self.new_node]:
8176 if node_name is not None:
8177 node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8179 self.node_secondary_ip = node_2nd_ip
8181 def Exec(self, feedback_fn):
8182 """Execute disk replacement.
8184 This dispatches the disk replacement to the appropriate handler.
8187 if self.delay_iallocator:
8188 self._CheckPrereq2()
8191 feedback_fn("No disks need replacement")
8194 feedback_fn("Replacing disk(s) %s for %s" %
8195 (utils.CommaJoin(self.disks), self.instance.name))
8197 activate_disks = (not self.instance.admin_up)
8199 # Activate the instance disks if we're replacing them on a down instance
8201 _StartInstanceDisks(self.lu, self.instance, True)
8204 # Should we replace the secondary node?
8205 if self.new_node is not None:
8206 fn = self._ExecDrbd8Secondary
8208 fn = self._ExecDrbd8DiskOnly
8210 return fn(feedback_fn)
8213 # Deactivate the instance disks if we're replacing them on a
8216 _SafeShutdownInstanceDisks(self.lu, self.instance)
8218 def _CheckVolumeGroup(self, nodes):
8219 self.lu.LogInfo("Checking volume groups")
8221 vgname = self.cfg.GetVGName()
8223 # Make sure volume group exists on all involved nodes
8224 results = self.rpc.call_vg_list(nodes)
8226 raise errors.OpExecError("Can't list volume groups on the nodes")
8230 res.Raise("Error checking node %s" % node)
8231 if vgname not in res.payload:
8232 raise errors.OpExecError("Volume group '%s' not found on node %s" %
8235 def _CheckDisksExistence(self, nodes):
8236 # Check disk existence
8237 for idx, dev in enumerate(self.instance.disks):
8238 if idx not in self.disks:
8242 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8243 self.cfg.SetDiskID(dev, node)
8245 result = self.rpc.call_blockdev_find(node, dev)
8247 msg = result.fail_msg
8248 if msg or not result.payload:
8250 msg = "disk not found"
8251 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8254 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8255 for idx, dev in enumerate(self.instance.disks):
8256 if idx not in self.disks:
8259 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8262 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8264 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8265 " replace disks for instance %s" %
8266 (node_name, self.instance.name))
8268 def _CreateNewStorage(self, node_name):
8269 vgname = self.cfg.GetVGName()
8272 for idx, dev in enumerate(self.instance.disks):
8273 if idx not in self.disks:
8276 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8278 self.cfg.SetDiskID(dev, node_name)
8280 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8281 names = _GenerateUniqueNames(self.lu, lv_names)
8283 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8284 logical_id=(vgname, names[0]))
8285 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8286 logical_id=(vgname, names[1]))
8288 new_lvs = [lv_data, lv_meta]
8289 old_lvs = dev.children
8290 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8292 # we pass force_create=True to force the LVM creation
8293 for new_lv in new_lvs:
8294 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8295 _GetInstanceInfoText(self.instance), False)
8299 def _CheckDevices(self, node_name, iv_names):
8300 for name, (dev, _, _) in iv_names.iteritems():
8301 self.cfg.SetDiskID(dev, node_name)
8303 result = self.rpc.call_blockdev_find(node_name, dev)
8305 msg = result.fail_msg
8306 if msg or not result.payload:
8308 msg = "disk not found"
8309 raise errors.OpExecError("Can't find DRBD device %s: %s" %
8312 if result.payload.is_degraded:
8313 raise errors.OpExecError("DRBD device %s is degraded!" % name)
8315 def _RemoveOldStorage(self, node_name, iv_names):
8316 for name, (_, old_lvs, _) in iv_names.iteritems():
8317 self.lu.LogInfo("Remove logical volumes for %s" % name)
8320 self.cfg.SetDiskID(lv, node_name)
8322 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8324 self.lu.LogWarning("Can't remove old LV: %s" % msg,
8325 hint="remove unused LVs manually")
8327 def _ReleaseNodeLock(self, node_name):
8328 """Releases the lock for a given node."""
8329 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8331 def _ExecDrbd8DiskOnly(self, feedback_fn):
8332 """Replace a disk on the primary or secondary for DRBD 8.
8334 The algorithm for replace is quite complicated:
8336 1. for each disk to be replaced:
8338 1. create new LVs on the target node with unique names
8339 1. detach old LVs from the drbd device
8340 1. rename old LVs to name_replaced.<time_t>
8341 1. rename new LVs to old LVs
8342 1. attach the new LVs (with the old names now) to the drbd device
8344 1. wait for sync across all devices
8346 1. for each modified disk:
8348 1. remove old LVs (which have the name name_replaces.<time_t>)
8350 Failures are not very well handled.
8355 # Step: check device activation
8356 self.lu.LogStep(1, steps_total, "Check device existence")
8357 self._CheckDisksExistence([self.other_node, self.target_node])
8358 self._CheckVolumeGroup([self.target_node, self.other_node])
8360 # Step: check other node consistency
8361 self.lu.LogStep(2, steps_total, "Check peer consistency")
8362 self._CheckDisksConsistency(self.other_node,
8363 self.other_node == self.instance.primary_node,
8366 # Step: create new storage
8367 self.lu.LogStep(3, steps_total, "Allocate new storage")
8368 iv_names = self._CreateNewStorage(self.target_node)
8370 # Step: for each lv, detach+rename*2+attach
8371 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8372 for dev, old_lvs, new_lvs in iv_names.itervalues():
8373 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8375 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8377 result.Raise("Can't detach drbd from local storage on node"
8378 " %s for device %s" % (self.target_node, dev.iv_name))
8380 #cfg.Update(instance)
8382 # ok, we created the new LVs, so now we know we have the needed
8383 # storage; as such, we proceed on the target node to rename
8384 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8385 # using the assumption that logical_id == physical_id (which in
8386 # turn is the unique_id on that node)
8388 # FIXME(iustin): use a better name for the replaced LVs
8389 temp_suffix = int(time.time())
8390 ren_fn = lambda d, suff: (d.physical_id[0],
8391 d.physical_id[1] + "_replaced-%s" % suff)
8393 # Build the rename list based on what LVs exist on the node
8394 rename_old_to_new = []
8395 for to_ren in old_lvs:
8396 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8397 if not result.fail_msg and result.payload:
8399 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8401 self.lu.LogInfo("Renaming the old LVs on the target node")
8402 result = self.rpc.call_blockdev_rename(self.target_node,
8404 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8406 # Now we rename the new LVs to the old LVs
8407 self.lu.LogInfo("Renaming the new LVs on the target node")
8408 rename_new_to_old = [(new, old.physical_id)
8409 for old, new in zip(old_lvs, new_lvs)]
8410 result = self.rpc.call_blockdev_rename(self.target_node,
8412 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8414 for old, new in zip(old_lvs, new_lvs):
8415 new.logical_id = old.logical_id
8416 self.cfg.SetDiskID(new, self.target_node)
8418 for disk in old_lvs:
8419 disk.logical_id = ren_fn(disk, temp_suffix)
8420 self.cfg.SetDiskID(disk, self.target_node)
8422 # Now that the new lvs have the old name, we can add them to the device
8423 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8424 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8426 msg = result.fail_msg
8428 for new_lv in new_lvs:
8429 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8432 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8433 hint=("cleanup manually the unused logical"
8435 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8437 dev.children = new_lvs
8439 self.cfg.Update(self.instance, feedback_fn)
8442 if self.early_release:
8443 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8445 self._RemoveOldStorage(self.target_node, iv_names)
8446 # WARNING: we release both node locks here, do not do other RPCs
8447 # than WaitForSync to the primary node
8448 self._ReleaseNodeLock([self.target_node, self.other_node])
8451 # This can fail as the old devices are degraded and _WaitForSync
8452 # does a combined result over all disks, so we don't check its return value
8453 self.lu.LogStep(cstep, steps_total, "Sync devices")
8455 _WaitForSync(self.lu, self.instance)
8457 # Check all devices manually
8458 self._CheckDevices(self.instance.primary_node, iv_names)
8460 # Step: remove old storage
8461 if not self.early_release:
8462 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8464 self._RemoveOldStorage(self.target_node, iv_names)
8466 def _ExecDrbd8Secondary(self, feedback_fn):
8467 """Replace the secondary node for DRBD 8.
8469 The algorithm for replace is quite complicated:
8470 - for all disks of the instance:
8471 - create new LVs on the new node with same names
8472 - shutdown the drbd device on the old secondary
8473 - disconnect the drbd network on the primary
8474 - create the drbd device on the new secondary
8475 - network attach the drbd on the primary, using an artifice:
8476 the drbd code for Attach() will connect to the network if it
8477 finds a device which is connected to the good local disks but
8479 - wait for sync across all devices
8480 - remove all disks from the old secondary
8482 Failures are not very well handled.
8487 # Step: check device activation
8488 self.lu.LogStep(1, steps_total, "Check device existence")
8489 self._CheckDisksExistence([self.instance.primary_node])
8490 self._CheckVolumeGroup([self.instance.primary_node])
8492 # Step: check other node consistency
8493 self.lu.LogStep(2, steps_total, "Check peer consistency")
8494 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8496 # Step: create new storage
8497 self.lu.LogStep(3, steps_total, "Allocate new storage")
8498 for idx, dev in enumerate(self.instance.disks):
8499 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8500 (self.new_node, idx))
8501 # we pass force_create=True to force LVM creation
8502 for new_lv in dev.children:
8503 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8504 _GetInstanceInfoText(self.instance), False)
8506 # Step 4: dbrd minors and drbd setups changes
8507 # after this, we must manually remove the drbd minors on both the
8508 # error and the success paths
8509 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8510 minors = self.cfg.AllocateDRBDMinor([self.new_node
8511 for dev in self.instance.disks],
8513 logging.debug("Allocated minors %r", minors)
8516 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8517 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8518 (self.new_node, idx))
8519 # create new devices on new_node; note that we create two IDs:
8520 # one without port, so the drbd will be activated without
8521 # networking information on the new node at this stage, and one
8522 # with network, for the latter activation in step 4
8523 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8524 if self.instance.primary_node == o_node1:
8527 assert self.instance.primary_node == o_node2, "Three-node instance?"
8530 new_alone_id = (self.instance.primary_node, self.new_node, None,
8531 p_minor, new_minor, o_secret)
8532 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8533 p_minor, new_minor, o_secret)
8535 iv_names[idx] = (dev, dev.children, new_net_id)
8536 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8538 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8539 logical_id=new_alone_id,
8540 children=dev.children,
8543 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8544 _GetInstanceInfoText(self.instance), False)
8545 except errors.GenericError:
8546 self.cfg.ReleaseDRBDMinors(self.instance.name)
8549 # We have new devices, shutdown the drbd on the old secondary
8550 for idx, dev in enumerate(self.instance.disks):
8551 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8552 self.cfg.SetDiskID(dev, self.target_node)
8553 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8555 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8556 "node: %s" % (idx, msg),
8557 hint=("Please cleanup this device manually as"
8558 " soon as possible"))
8560 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8561 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8562 self.node_secondary_ip,
8563 self.instance.disks)\
8564 [self.instance.primary_node]
8566 msg = result.fail_msg
8568 # detaches didn't succeed (unlikely)
8569 self.cfg.ReleaseDRBDMinors(self.instance.name)
8570 raise errors.OpExecError("Can't detach the disks from the network on"
8571 " old node: %s" % (msg,))
8573 # if we managed to detach at least one, we update all the disks of
8574 # the instance to point to the new secondary
8575 self.lu.LogInfo("Updating instance configuration")
8576 for dev, _, new_logical_id in iv_names.itervalues():
8577 dev.logical_id = new_logical_id
8578 self.cfg.SetDiskID(dev, self.instance.primary_node)
8580 self.cfg.Update(self.instance, feedback_fn)
8582 # and now perform the drbd attach
8583 self.lu.LogInfo("Attaching primary drbds to new secondary"
8584 " (standalone => connected)")
8585 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8587 self.node_secondary_ip,
8588 self.instance.disks,
8591 for to_node, to_result in result.items():
8592 msg = to_result.fail_msg
8594 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8596 hint=("please do a gnt-instance info to see the"
8597 " status of disks"))
8599 if self.early_release:
8600 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8602 self._RemoveOldStorage(self.target_node, iv_names)
8603 # WARNING: we release all node locks here, do not do other RPCs
8604 # than WaitForSync to the primary node
8605 self._ReleaseNodeLock([self.instance.primary_node,
8610 # This can fail as the old devices are degraded and _WaitForSync
8611 # does a combined result over all disks, so we don't check its return value
8612 self.lu.LogStep(cstep, steps_total, "Sync devices")
8614 _WaitForSync(self.lu, self.instance)
8616 # Check all devices manually
8617 self._CheckDevices(self.instance.primary_node, iv_names)
8619 # Step: remove old storage
8620 if not self.early_release:
8621 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8622 self._RemoveOldStorage(self.target_node, iv_names)
8625 class LURepairNodeStorage(NoHooksLU):
8626 """Repairs the volume group on a node.
8631 def CheckArguments(self):
8632 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8634 storage_type = self.op.storage_type
8636 if (constants.SO_FIX_CONSISTENCY not in
8637 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8638 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8639 " repaired" % storage_type,
8642 def ExpandNames(self):
8643 self.needed_locks = {
8644 locking.LEVEL_NODE: [self.op.node_name],
8647 def _CheckFaultyDisks(self, instance, node_name):
8648 """Ensure faulty disks abort the opcode or at least warn."""
8650 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8652 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8653 " node '%s'" % (instance.name, node_name),
8655 except errors.OpPrereqError, err:
8656 if self.op.ignore_consistency:
8657 self.proc.LogWarning(str(err.args[0]))
8661 def CheckPrereq(self):
8662 """Check prerequisites.
8665 # Check whether any instance on this node has faulty disks
8666 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8667 if not inst.admin_up:
8669 check_nodes = set(inst.all_nodes)
8670 check_nodes.discard(self.op.node_name)
8671 for inst_node_name in check_nodes:
8672 self._CheckFaultyDisks(inst, inst_node_name)
8674 def Exec(self, feedback_fn):
8675 feedback_fn("Repairing storage unit '%s' on %s ..." %
8676 (self.op.name, self.op.node_name))
8678 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8679 result = self.rpc.call_storage_execute(self.op.node_name,
8680 self.op.storage_type, st_args,
8682 constants.SO_FIX_CONSISTENCY)
8683 result.Raise("Failed to repair storage unit '%s' on %s" %
8684 (self.op.name, self.op.node_name))
8687 class LUNodeEvacStrategy(NoHooksLU):
8688 """Computes the node evacuation strategy.
8693 def CheckArguments(self):
8694 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8696 def ExpandNames(self):
8697 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8698 self.needed_locks = locks = {}
8699 if self.op.remote_node is None:
8700 locks[locking.LEVEL_NODE] = locking.ALL_SET
8702 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8703 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8705 def Exec(self, feedback_fn):
8706 if self.op.remote_node is not None:
8708 for node in self.op.nodes:
8709 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8712 if i.primary_node == self.op.remote_node:
8713 raise errors.OpPrereqError("Node %s is the primary node of"
8714 " instance %s, cannot use it as"
8716 (self.op.remote_node, i.name),
8718 result.append([i.name, self.op.remote_node])
8720 ial = IAllocator(self.cfg, self.rpc,
8721 mode=constants.IALLOCATOR_MODE_MEVAC,
8722 evac_nodes=self.op.nodes)
8723 ial.Run(self.op.iallocator, validate=True)
8725 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8731 class LUInstanceGrowDisk(LogicalUnit):
8732 """Grow a disk of an instance.
8736 HTYPE = constants.HTYPE_INSTANCE
8739 def ExpandNames(self):
8740 self._ExpandAndLockInstance()
8741 self.needed_locks[locking.LEVEL_NODE] = []
8742 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8744 def DeclareLocks(self, level):
8745 if level == locking.LEVEL_NODE:
8746 self._LockInstancesNodes()
8748 def BuildHooksEnv(self):
8751 This runs on the master, the primary and all the secondaries.
8755 "DISK": self.op.disk,
8756 "AMOUNT": self.op.amount,
8758 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8759 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8762 def CheckPrereq(self):
8763 """Check prerequisites.
8765 This checks that the instance is in the cluster.
8768 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8769 assert instance is not None, \
8770 "Cannot retrieve locked instance %s" % self.op.instance_name
8771 nodenames = list(instance.all_nodes)
8772 for node in nodenames:
8773 _CheckNodeOnline(self, node)
8775 self.instance = instance
8777 if instance.disk_template not in constants.DTS_GROWABLE:
8778 raise errors.OpPrereqError("Instance's disk layout does not support"
8779 " growing.", errors.ECODE_INVAL)
8781 self.disk = instance.FindDisk(self.op.disk)
8783 if instance.disk_template != constants.DT_FILE:
8784 # TODO: check the free disk space for file, when that feature
8786 _CheckNodesFreeDiskPerVG(self, nodenames,
8787 self.disk.ComputeGrowth(self.op.amount))
8789 def Exec(self, feedback_fn):
8790 """Execute disk grow.
8793 instance = self.instance
8796 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8798 raise errors.OpExecError("Cannot activate block device to grow")
8800 for node in instance.all_nodes:
8801 self.cfg.SetDiskID(disk, node)
8802 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8803 result.Raise("Grow request failed to node %s" % node)
8805 # TODO: Rewrite code to work properly
8806 # DRBD goes into sync mode for a short amount of time after executing the
8807 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8808 # calling "resize" in sync mode fails. Sleeping for a short amount of
8809 # time is a work-around.
8812 disk.RecordGrow(self.op.amount)
8813 self.cfg.Update(instance, feedback_fn)
8814 if self.op.wait_for_sync:
8815 disk_abort = not _WaitForSync(self, instance, disks=[disk])
8817 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8818 " status.\nPlease check the instance.")
8819 if not instance.admin_up:
8820 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8821 elif not instance.admin_up:
8822 self.proc.LogWarning("Not shutting down the disk even if the instance is"
8823 " not supposed to be running because no wait for"
8824 " sync mode was requested.")
8827 class LUInstanceQueryData(NoHooksLU):
8828 """Query runtime instance data.
8833 def ExpandNames(self):
8834 self.needed_locks = {}
8835 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8837 if self.op.instances:
8838 self.wanted_names = []
8839 for name in self.op.instances:
8840 full_name = _ExpandInstanceName(self.cfg, name)
8841 self.wanted_names.append(full_name)
8842 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8844 self.wanted_names = None
8845 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8847 self.needed_locks[locking.LEVEL_NODE] = []
8848 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8850 def DeclareLocks(self, level):
8851 if level == locking.LEVEL_NODE:
8852 self._LockInstancesNodes()
8854 def CheckPrereq(self):
8855 """Check prerequisites.
8857 This only checks the optional instance list against the existing names.
8860 if self.wanted_names is None:
8861 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8863 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8864 in self.wanted_names]
8866 def _ComputeBlockdevStatus(self, node, instance_name, dev):
8867 """Returns the status of a block device
8870 if self.op.static or not node:
8873 self.cfg.SetDiskID(dev, node)
8875 result = self.rpc.call_blockdev_find(node, dev)
8879 result.Raise("Can't compute disk status for %s" % instance_name)
8881 status = result.payload
8885 return (status.dev_path, status.major, status.minor,
8886 status.sync_percent, status.estimated_time,
8887 status.is_degraded, status.ldisk_status)
8889 def _ComputeDiskStatus(self, instance, snode, dev):
8890 """Compute block device status.
8893 if dev.dev_type in constants.LDS_DRBD:
8894 # we change the snode then (otherwise we use the one passed in)
8895 if dev.logical_id[0] == instance.primary_node:
8896 snode = dev.logical_id[1]
8898 snode = dev.logical_id[0]
8900 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8902 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8905 dev_children = [self._ComputeDiskStatus(instance, snode, child)
8906 for child in dev.children]
8911 "iv_name": dev.iv_name,
8912 "dev_type": dev.dev_type,
8913 "logical_id": dev.logical_id,
8914 "physical_id": dev.physical_id,
8915 "pstatus": dev_pstatus,
8916 "sstatus": dev_sstatus,
8917 "children": dev_children,
8924 def Exec(self, feedback_fn):
8925 """Gather and return data"""
8928 cluster = self.cfg.GetClusterInfo()
8930 for instance in self.wanted_instances:
8931 if not self.op.static:
8932 remote_info = self.rpc.call_instance_info(instance.primary_node,
8934 instance.hypervisor)
8935 remote_info.Raise("Error checking node %s" % instance.primary_node)
8936 remote_info = remote_info.payload
8937 if remote_info and "state" in remote_info:
8940 remote_state = "down"
8943 if instance.admin_up:
8946 config_state = "down"
8948 disks = [self._ComputeDiskStatus(instance, None, device)
8949 for device in instance.disks]
8952 "name": instance.name,
8953 "config_state": config_state,
8954 "run_state": remote_state,
8955 "pnode": instance.primary_node,
8956 "snodes": instance.secondary_nodes,
8958 # this happens to be the same format used for hooks
8959 "nics": _NICListToTuple(self, instance.nics),
8960 "disk_template": instance.disk_template,
8962 "hypervisor": instance.hypervisor,
8963 "network_port": instance.network_port,
8964 "hv_instance": instance.hvparams,
8965 "hv_actual": cluster.FillHV(instance, skip_globals=True),
8966 "be_instance": instance.beparams,
8967 "be_actual": cluster.FillBE(instance),
8968 "os_instance": instance.osparams,
8969 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
8970 "serial_no": instance.serial_no,
8971 "mtime": instance.mtime,
8972 "ctime": instance.ctime,
8973 "uuid": instance.uuid,
8976 result[instance.name] = idict
8981 class LUInstanceSetParams(LogicalUnit):
8982 """Modifies an instances's parameters.
8985 HPATH = "instance-modify"
8986 HTYPE = constants.HTYPE_INSTANCE
8989 def CheckArguments(self):
8990 if not (self.op.nics or self.op.disks or self.op.disk_template or
8991 self.op.hvparams or self.op.beparams or self.op.os_name):
8992 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8994 if self.op.hvparams:
8995 _CheckGlobalHvParams(self.op.hvparams)
8999 for disk_op, disk_dict in self.op.disks:
9000 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9001 if disk_op == constants.DDM_REMOVE:
9004 elif disk_op == constants.DDM_ADD:
9007 if not isinstance(disk_op, int):
9008 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9009 if not isinstance(disk_dict, dict):
9010 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9011 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9013 if disk_op == constants.DDM_ADD:
9014 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9015 if mode not in constants.DISK_ACCESS_SET:
9016 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9018 size = disk_dict.get('size', None)
9020 raise errors.OpPrereqError("Required disk parameter size missing",
9024 except (TypeError, ValueError), err:
9025 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9026 str(err), errors.ECODE_INVAL)
9027 disk_dict['size'] = size
9029 # modification of disk
9030 if 'size' in disk_dict:
9031 raise errors.OpPrereqError("Disk size change not possible, use"
9032 " grow-disk", errors.ECODE_INVAL)
9034 if disk_addremove > 1:
9035 raise errors.OpPrereqError("Only one disk add or remove operation"
9036 " supported at a time", errors.ECODE_INVAL)
9038 if self.op.disks and self.op.disk_template is not None:
9039 raise errors.OpPrereqError("Disk template conversion and other disk"
9040 " changes not supported at the same time",
9043 if (self.op.disk_template and
9044 self.op.disk_template in constants.DTS_NET_MIRROR and
9045 self.op.remote_node is None):
9046 raise errors.OpPrereqError("Changing the disk template to a mirrored"
9047 " one requires specifying a secondary node",
9052 for nic_op, nic_dict in self.op.nics:
9053 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9054 if nic_op == constants.DDM_REMOVE:
9057 elif nic_op == constants.DDM_ADD:
9060 if not isinstance(nic_op, int):
9061 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9062 if not isinstance(nic_dict, dict):
9063 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9064 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9066 # nic_dict should be a dict
9067 nic_ip = nic_dict.get('ip', None)
9068 if nic_ip is not None:
9069 if nic_ip.lower() == constants.VALUE_NONE:
9070 nic_dict['ip'] = None
9072 if not netutils.IPAddress.IsValid(nic_ip):
9073 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9076 nic_bridge = nic_dict.get('bridge', None)
9077 nic_link = nic_dict.get('link', None)
9078 if nic_bridge and nic_link:
9079 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9080 " at the same time", errors.ECODE_INVAL)
9081 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9082 nic_dict['bridge'] = None
9083 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9084 nic_dict['link'] = None
9086 if nic_op == constants.DDM_ADD:
9087 nic_mac = nic_dict.get('mac', None)
9089 nic_dict['mac'] = constants.VALUE_AUTO
9091 if 'mac' in nic_dict:
9092 nic_mac = nic_dict['mac']
9093 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9094 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9096 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9097 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9098 " modifying an existing nic",
9101 if nic_addremove > 1:
9102 raise errors.OpPrereqError("Only one NIC add or remove operation"
9103 " supported at a time", errors.ECODE_INVAL)
9105 def ExpandNames(self):
9106 self._ExpandAndLockInstance()
9107 self.needed_locks[locking.LEVEL_NODE] = []
9108 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9110 def DeclareLocks(self, level):
9111 if level == locking.LEVEL_NODE:
9112 self._LockInstancesNodes()
9113 if self.op.disk_template and self.op.remote_node:
9114 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9115 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9117 def BuildHooksEnv(self):
9120 This runs on the master, primary and secondaries.
9124 if constants.BE_MEMORY in self.be_new:
9125 args['memory'] = self.be_new[constants.BE_MEMORY]
9126 if constants.BE_VCPUS in self.be_new:
9127 args['vcpus'] = self.be_new[constants.BE_VCPUS]
9128 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9129 # information at all.
9132 nic_override = dict(self.op.nics)
9133 for idx, nic in enumerate(self.instance.nics):
9134 if idx in nic_override:
9135 this_nic_override = nic_override[idx]
9137 this_nic_override = {}
9138 if 'ip' in this_nic_override:
9139 ip = this_nic_override['ip']
9142 if 'mac' in this_nic_override:
9143 mac = this_nic_override['mac']
9146 if idx in self.nic_pnew:
9147 nicparams = self.nic_pnew[idx]
9149 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9150 mode = nicparams[constants.NIC_MODE]
9151 link = nicparams[constants.NIC_LINK]
9152 args['nics'].append((ip, mac, mode, link))
9153 if constants.DDM_ADD in nic_override:
9154 ip = nic_override[constants.DDM_ADD].get('ip', None)
9155 mac = nic_override[constants.DDM_ADD]['mac']
9156 nicparams = self.nic_pnew[constants.DDM_ADD]
9157 mode = nicparams[constants.NIC_MODE]
9158 link = nicparams[constants.NIC_LINK]
9159 args['nics'].append((ip, mac, mode, link))
9160 elif constants.DDM_REMOVE in nic_override:
9161 del args['nics'][-1]
9163 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9164 if self.op.disk_template:
9165 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9166 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9169 def CheckPrereq(self):
9170 """Check prerequisites.
9172 This only checks the instance list against the existing names.
9175 # checking the new params on the primary/secondary nodes
9177 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9178 cluster = self.cluster = self.cfg.GetClusterInfo()
9179 assert self.instance is not None, \
9180 "Cannot retrieve locked instance %s" % self.op.instance_name
9181 pnode = instance.primary_node
9182 nodelist = list(instance.all_nodes)
9185 if self.op.os_name and not self.op.force:
9186 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9187 self.op.force_variant)
9188 instance_os = self.op.os_name
9190 instance_os = instance.os
9192 if self.op.disk_template:
9193 if instance.disk_template == self.op.disk_template:
9194 raise errors.OpPrereqError("Instance already has disk template %s" %
9195 instance.disk_template, errors.ECODE_INVAL)
9197 if (instance.disk_template,
9198 self.op.disk_template) not in self._DISK_CONVERSIONS:
9199 raise errors.OpPrereqError("Unsupported disk template conversion from"
9200 " %s to %s" % (instance.disk_template,
9201 self.op.disk_template),
9203 _CheckInstanceDown(self, instance, "cannot change disk template")
9204 if self.op.disk_template in constants.DTS_NET_MIRROR:
9205 if self.op.remote_node == pnode:
9206 raise errors.OpPrereqError("Given new secondary node %s is the same"
9207 " as the primary node of the instance" %
9208 self.op.remote_node, errors.ECODE_STATE)
9209 _CheckNodeOnline(self, self.op.remote_node)
9210 _CheckNodeNotDrained(self, self.op.remote_node)
9211 # FIXME: here we assume that the old instance type is DT_PLAIN
9212 assert instance.disk_template == constants.DT_PLAIN
9213 disks = [{"size": d.size, "vg": d.logical_id[0]}
9214 for d in instance.disks]
9215 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9216 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9218 # hvparams processing
9219 if self.op.hvparams:
9220 hv_type = instance.hypervisor
9221 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9222 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9223 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9226 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9227 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9228 self.hv_new = hv_new # the new actual values
9229 self.hv_inst = i_hvdict # the new dict (without defaults)
9231 self.hv_new = self.hv_inst = {}
9233 # beparams processing
9234 if self.op.beparams:
9235 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9237 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9238 be_new = cluster.SimpleFillBE(i_bedict)
9239 self.be_new = be_new # the new actual values
9240 self.be_inst = i_bedict # the new dict (without defaults)
9242 self.be_new = self.be_inst = {}
9244 # osparams processing
9245 if self.op.osparams:
9246 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9247 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9248 self.os_inst = i_osdict # the new dict (without defaults)
9254 if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9255 mem_check_list = [pnode]
9256 if be_new[constants.BE_AUTO_BALANCE]:
9257 # either we changed auto_balance to yes or it was from before
9258 mem_check_list.extend(instance.secondary_nodes)
9259 instance_info = self.rpc.call_instance_info(pnode, instance.name,
9260 instance.hypervisor)
9261 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9262 instance.hypervisor)
9263 pninfo = nodeinfo[pnode]
9264 msg = pninfo.fail_msg
9266 # Assume the primary node is unreachable and go ahead
9267 self.warn.append("Can't get info from primary node %s: %s" %
9269 elif not isinstance(pninfo.payload.get('memory_free', None), int):
9270 self.warn.append("Node data from primary node %s doesn't contain"
9271 " free memory information" % pnode)
9272 elif instance_info.fail_msg:
9273 self.warn.append("Can't get instance runtime information: %s" %
9274 instance_info.fail_msg)
9276 if instance_info.payload:
9277 current_mem = int(instance_info.payload['memory'])
9279 # Assume instance not running
9280 # (there is a slight race condition here, but it's not very probable,
9281 # and we have no other way to check)
9283 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9284 pninfo.payload['memory_free'])
9286 raise errors.OpPrereqError("This change will prevent the instance"
9287 " from starting, due to %d MB of memory"
9288 " missing on its primary node" % miss_mem,
9291 if be_new[constants.BE_AUTO_BALANCE]:
9292 for node, nres in nodeinfo.items():
9293 if node not in instance.secondary_nodes:
9297 self.warn.append("Can't get info from secondary node %s: %s" %
9299 elif not isinstance(nres.payload.get('memory_free', None), int):
9300 self.warn.append("Secondary node %s didn't return free"
9301 " memory information" % node)
9302 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9303 self.warn.append("Not enough memory to failover instance to"
9304 " secondary node %s" % node)
9309 for nic_op, nic_dict in self.op.nics:
9310 if nic_op == constants.DDM_REMOVE:
9311 if not instance.nics:
9312 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9315 if nic_op != constants.DDM_ADD:
9317 if not instance.nics:
9318 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9319 " no NICs" % nic_op,
9321 if nic_op < 0 or nic_op >= len(instance.nics):
9322 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9324 (nic_op, len(instance.nics) - 1),
9326 old_nic_params = instance.nics[nic_op].nicparams
9327 old_nic_ip = instance.nics[nic_op].ip
9332 update_params_dict = dict([(key, nic_dict[key])
9333 for key in constants.NICS_PARAMETERS
9334 if key in nic_dict])
9336 if 'bridge' in nic_dict:
9337 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9339 new_nic_params = _GetUpdatedParams(old_nic_params,
9341 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9342 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9343 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9344 self.nic_pinst[nic_op] = new_nic_params
9345 self.nic_pnew[nic_op] = new_filled_nic_params
9346 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9348 if new_nic_mode == constants.NIC_MODE_BRIDGED:
9349 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9350 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9352 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9354 self.warn.append(msg)
9356 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9357 if new_nic_mode == constants.NIC_MODE_ROUTED:
9358 if 'ip' in nic_dict:
9359 nic_ip = nic_dict['ip']
9363 raise errors.OpPrereqError('Cannot set the nic ip to None'
9364 ' on a routed nic', errors.ECODE_INVAL)
9365 if 'mac' in nic_dict:
9366 nic_mac = nic_dict['mac']
9368 raise errors.OpPrereqError('Cannot set the nic mac to None',
9370 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9371 # otherwise generate the mac
9372 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9374 # or validate/reserve the current one
9376 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9377 except errors.ReservationError:
9378 raise errors.OpPrereqError("MAC address %s already in use"
9379 " in cluster" % nic_mac,
9380 errors.ECODE_NOTUNIQUE)
9383 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9384 raise errors.OpPrereqError("Disk operations not supported for"
9385 " diskless instances",
9387 for disk_op, _ in self.op.disks:
9388 if disk_op == constants.DDM_REMOVE:
9389 if len(instance.disks) == 1:
9390 raise errors.OpPrereqError("Cannot remove the last disk of"
9391 " an instance", errors.ECODE_INVAL)
9392 _CheckInstanceDown(self, instance, "cannot remove disks")
9394 if (disk_op == constants.DDM_ADD and
9395 len(instance.disks) >= constants.MAX_DISKS):
9396 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9397 " add more" % constants.MAX_DISKS,
9399 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9401 if disk_op < 0 or disk_op >= len(instance.disks):
9402 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9404 (disk_op, len(instance.disks)),
9409 def _ConvertPlainToDrbd(self, feedback_fn):
9410 """Converts an instance from plain to drbd.
9413 feedback_fn("Converting template to drbd")
9414 instance = self.instance
9415 pnode = instance.primary_node
9416 snode = self.op.remote_node
9418 # create a fake disk info for _GenerateDiskTemplate
9419 disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9420 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9421 instance.name, pnode, [snode],
9422 disk_info, None, None, 0, feedback_fn)
9423 info = _GetInstanceInfoText(instance)
9424 feedback_fn("Creating aditional volumes...")
9425 # first, create the missing data and meta devices
9426 for disk in new_disks:
9427 # unfortunately this is... not too nice
9428 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9430 for child in disk.children:
9431 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9432 # at this stage, all new LVs have been created, we can rename the
9434 feedback_fn("Renaming original volumes...")
9435 rename_list = [(o, n.children[0].logical_id)
9436 for (o, n) in zip(instance.disks, new_disks)]
9437 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9438 result.Raise("Failed to rename original LVs")
9440 feedback_fn("Initializing DRBD devices...")
9441 # all child devices are in place, we can now create the DRBD devices
9442 for disk in new_disks:
9443 for node in [pnode, snode]:
9444 f_create = node == pnode
9445 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9447 # at this point, the instance has been modified
9448 instance.disk_template = constants.DT_DRBD8
9449 instance.disks = new_disks
9450 self.cfg.Update(instance, feedback_fn)
9452 # disks are created, waiting for sync
9453 disk_abort = not _WaitForSync(self, instance)
9455 raise errors.OpExecError("There are some degraded disks for"
9456 " this instance, please cleanup manually")
9458 def _ConvertDrbdToPlain(self, feedback_fn):
9459 """Converts an instance from drbd to plain.
9462 instance = self.instance
9463 assert len(instance.secondary_nodes) == 1
9464 pnode = instance.primary_node
9465 snode = instance.secondary_nodes[0]
9466 feedback_fn("Converting template to plain")
9468 old_disks = instance.disks
9469 new_disks = [d.children[0] for d in old_disks]
9471 # copy over size and mode
9472 for parent, child in zip(old_disks, new_disks):
9473 child.size = parent.size
9474 child.mode = parent.mode
9476 # update instance structure
9477 instance.disks = new_disks
9478 instance.disk_template = constants.DT_PLAIN
9479 self.cfg.Update(instance, feedback_fn)
9481 feedback_fn("Removing volumes on the secondary node...")
9482 for disk in old_disks:
9483 self.cfg.SetDiskID(disk, snode)
9484 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9486 self.LogWarning("Could not remove block device %s on node %s,"
9487 " continuing anyway: %s", disk.iv_name, snode, msg)
9489 feedback_fn("Removing unneeded volumes on the primary node...")
9490 for idx, disk in enumerate(old_disks):
9491 meta = disk.children[1]
9492 self.cfg.SetDiskID(meta, pnode)
9493 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9495 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9496 " continuing anyway: %s", idx, pnode, msg)
9498 def Exec(self, feedback_fn):
9499 """Modifies an instance.
9501 All parameters take effect only at the next restart of the instance.
9504 # Process here the warnings from CheckPrereq, as we don't have a
9505 # feedback_fn there.
9506 for warn in self.warn:
9507 feedback_fn("WARNING: %s" % warn)
9510 instance = self.instance
9512 for disk_op, disk_dict in self.op.disks:
9513 if disk_op == constants.DDM_REMOVE:
9514 # remove the last disk
9515 device = instance.disks.pop()
9516 device_idx = len(instance.disks)
9517 for node, disk in device.ComputeNodeTree(instance.primary_node):
9518 self.cfg.SetDiskID(disk, node)
9519 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9521 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9522 " continuing anyway", device_idx, node, msg)
9523 result.append(("disk/%d" % device_idx, "remove"))
9524 elif disk_op == constants.DDM_ADD:
9526 if instance.disk_template == constants.DT_FILE:
9527 file_driver, file_path = instance.disks[0].logical_id
9528 file_path = os.path.dirname(file_path)
9530 file_driver = file_path = None
9531 disk_idx_base = len(instance.disks)
9532 new_disk = _GenerateDiskTemplate(self,
9533 instance.disk_template,
9534 instance.name, instance.primary_node,
9535 instance.secondary_nodes,
9539 disk_idx_base, feedback_fn)[0]
9540 instance.disks.append(new_disk)
9541 info = _GetInstanceInfoText(instance)
9543 logging.info("Creating volume %s for instance %s",
9544 new_disk.iv_name, instance.name)
9545 # Note: this needs to be kept in sync with _CreateDisks
9547 for node in instance.all_nodes:
9548 f_create = node == instance.primary_node
9550 _CreateBlockDev(self, node, instance, new_disk,
9551 f_create, info, f_create)
9552 except errors.OpExecError, err:
9553 self.LogWarning("Failed to create volume %s (%s) on"
9555 new_disk.iv_name, new_disk, node, err)
9556 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9557 (new_disk.size, new_disk.mode)))
9559 # change a given disk
9560 instance.disks[disk_op].mode = disk_dict['mode']
9561 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9563 if self.op.disk_template:
9564 r_shut = _ShutdownInstanceDisks(self, instance)
9566 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9567 " proceed with disk template conversion")
9568 mode = (instance.disk_template, self.op.disk_template)
9570 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9572 self.cfg.ReleaseDRBDMinors(instance.name)
9574 result.append(("disk_template", self.op.disk_template))
9577 for nic_op, nic_dict in self.op.nics:
9578 if nic_op == constants.DDM_REMOVE:
9579 # remove the last nic
9580 del instance.nics[-1]
9581 result.append(("nic.%d" % len(instance.nics), "remove"))
9582 elif nic_op == constants.DDM_ADD:
9583 # mac and bridge should be set, by now
9584 mac = nic_dict['mac']
9585 ip = nic_dict.get('ip', None)
9586 nicparams = self.nic_pinst[constants.DDM_ADD]
9587 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9588 instance.nics.append(new_nic)
9589 result.append(("nic.%d" % (len(instance.nics) - 1),
9590 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9591 (new_nic.mac, new_nic.ip,
9592 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9593 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9596 for key in 'mac', 'ip':
9598 setattr(instance.nics[nic_op], key, nic_dict[key])
9599 if nic_op in self.nic_pinst:
9600 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9601 for key, val in nic_dict.iteritems():
9602 result.append(("nic.%s/%d" % (key, nic_op), val))
9605 if self.op.hvparams:
9606 instance.hvparams = self.hv_inst
9607 for key, val in self.op.hvparams.iteritems():
9608 result.append(("hv/%s" % key, val))
9611 if self.op.beparams:
9612 instance.beparams = self.be_inst
9613 for key, val in self.op.beparams.iteritems():
9614 result.append(("be/%s" % key, val))
9618 instance.os = self.op.os_name
9621 if self.op.osparams:
9622 instance.osparams = self.os_inst
9623 for key, val in self.op.osparams.iteritems():
9624 result.append(("os/%s" % key, val))
9626 self.cfg.Update(instance, feedback_fn)
9630 _DISK_CONVERSIONS = {
9631 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9632 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9636 class LUBackupQuery(NoHooksLU):
9637 """Query the exports list
9642 def ExpandNames(self):
9643 self.needed_locks = {}
9644 self.share_locks[locking.LEVEL_NODE] = 1
9645 if not self.op.nodes:
9646 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9648 self.needed_locks[locking.LEVEL_NODE] = \
9649 _GetWantedNodes(self, self.op.nodes)
9651 def Exec(self, feedback_fn):
9652 """Compute the list of all the exported system images.
9655 @return: a dictionary with the structure node->(export-list)
9656 where export-list is a list of the instances exported on
9660 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9661 rpcresult = self.rpc.call_export_list(self.nodes)
9663 for node in rpcresult:
9664 if rpcresult[node].fail_msg:
9665 result[node] = False
9667 result[node] = rpcresult[node].payload
9672 class LUBackupPrepare(NoHooksLU):
9673 """Prepares an instance for an export and returns useful information.
9678 def ExpandNames(self):
9679 self._ExpandAndLockInstance()
9681 def CheckPrereq(self):
9682 """Check prerequisites.
9685 instance_name = self.op.instance_name
9687 self.instance = self.cfg.GetInstanceInfo(instance_name)
9688 assert self.instance is not None, \
9689 "Cannot retrieve locked instance %s" % self.op.instance_name
9690 _CheckNodeOnline(self, self.instance.primary_node)
9692 self._cds = _GetClusterDomainSecret()
9694 def Exec(self, feedback_fn):
9695 """Prepares an instance for an export.
9698 instance = self.instance
9700 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9701 salt = utils.GenerateSecret(8)
9703 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9704 result = self.rpc.call_x509_cert_create(instance.primary_node,
9705 constants.RIE_CERT_VALIDITY)
9706 result.Raise("Can't create X509 key and certificate on %s" % result.node)
9708 (name, cert_pem) = result.payload
9710 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9714 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9715 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9717 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9723 class LUBackupExport(LogicalUnit):
9724 """Export an instance to an image in the cluster.
9727 HPATH = "instance-export"
9728 HTYPE = constants.HTYPE_INSTANCE
9731 def CheckArguments(self):
9732 """Check the arguments.
9735 self.x509_key_name = self.op.x509_key_name
9736 self.dest_x509_ca_pem = self.op.destination_x509_ca
9738 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9739 if not self.x509_key_name:
9740 raise errors.OpPrereqError("Missing X509 key name for encryption",
9743 if not self.dest_x509_ca_pem:
9744 raise errors.OpPrereqError("Missing destination X509 CA",
9747 def ExpandNames(self):
9748 self._ExpandAndLockInstance()
9750 # Lock all nodes for local exports
9751 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9752 # FIXME: lock only instance primary and destination node
9754 # Sad but true, for now we have do lock all nodes, as we don't know where
9755 # the previous export might be, and in this LU we search for it and
9756 # remove it from its current node. In the future we could fix this by:
9757 # - making a tasklet to search (share-lock all), then create the
9758 # new one, then one to remove, after
9759 # - removing the removal operation altogether
9760 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9762 def DeclareLocks(self, level):
9763 """Last minute lock declaration."""
9764 # All nodes are locked anyway, so nothing to do here.
9766 def BuildHooksEnv(self):
9769 This will run on the master, primary node and target node.
9773 "EXPORT_MODE": self.op.mode,
9774 "EXPORT_NODE": self.op.target_node,
9775 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9776 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9777 # TODO: Generic function for boolean env variables
9778 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9781 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9783 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9785 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9786 nl.append(self.op.target_node)
9790 def CheckPrereq(self):
9791 """Check prerequisites.
9793 This checks that the instance and node names are valid.
9796 instance_name = self.op.instance_name
9798 self.instance = self.cfg.GetInstanceInfo(instance_name)
9799 assert self.instance is not None, \
9800 "Cannot retrieve locked instance %s" % self.op.instance_name
9801 _CheckNodeOnline(self, self.instance.primary_node)
9803 if (self.op.remove_instance and self.instance.admin_up and
9804 not self.op.shutdown):
9805 raise errors.OpPrereqError("Can not remove instance without shutting it"
9808 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9809 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9810 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9811 assert self.dst_node is not None
9813 _CheckNodeOnline(self, self.dst_node.name)
9814 _CheckNodeNotDrained(self, self.dst_node.name)
9817 self.dest_disk_info = None
9818 self.dest_x509_ca = None
9820 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9821 self.dst_node = None
9823 if len(self.op.target_node) != len(self.instance.disks):
9824 raise errors.OpPrereqError(("Received destination information for %s"
9825 " disks, but instance %s has %s disks") %
9826 (len(self.op.target_node), instance_name,
9827 len(self.instance.disks)),
9830 cds = _GetClusterDomainSecret()
9832 # Check X509 key name
9834 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9835 except (TypeError, ValueError), err:
9836 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9838 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9839 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9842 # Load and verify CA
9844 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9845 except OpenSSL.crypto.Error, err:
9846 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9847 (err, ), errors.ECODE_INVAL)
9849 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9850 if errcode is not None:
9851 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9852 (msg, ), errors.ECODE_INVAL)
9854 self.dest_x509_ca = cert
9856 # Verify target information
9858 for idx, disk_data in enumerate(self.op.target_node):
9860 (host, port, magic) = \
9861 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9862 except errors.GenericError, err:
9863 raise errors.OpPrereqError("Target info for disk %s: %s" %
9864 (idx, err), errors.ECODE_INVAL)
9866 disk_info.append((host, port, magic))
9868 assert len(disk_info) == len(self.op.target_node)
9869 self.dest_disk_info = disk_info
9872 raise errors.ProgrammerError("Unhandled export mode %r" %
9875 # instance disk type verification
9876 # TODO: Implement export support for file-based disks
9877 for disk in self.instance.disks:
9878 if disk.dev_type == constants.LD_FILE:
9879 raise errors.OpPrereqError("Export not supported for instances with"
9880 " file-based disks", errors.ECODE_INVAL)
9882 def _CleanupExports(self, feedback_fn):
9883 """Removes exports of current instance from all other nodes.
9885 If an instance in a cluster with nodes A..D was exported to node C, its
9886 exports will be removed from the nodes A, B and D.
9889 assert self.op.mode != constants.EXPORT_MODE_REMOTE
9891 nodelist = self.cfg.GetNodeList()
9892 nodelist.remove(self.dst_node.name)
9894 # on one-node clusters nodelist will be empty after the removal
9895 # if we proceed the backup would be removed because OpBackupQuery
9896 # substitutes an empty list with the full cluster node list.
9897 iname = self.instance.name
9899 feedback_fn("Removing old exports for instance %s" % iname)
9900 exportlist = self.rpc.call_export_list(nodelist)
9901 for node in exportlist:
9902 if exportlist[node].fail_msg:
9904 if iname in exportlist[node].payload:
9905 msg = self.rpc.call_export_remove(node, iname).fail_msg
9907 self.LogWarning("Could not remove older export for instance %s"
9908 " on node %s: %s", iname, node, msg)
9910 def Exec(self, feedback_fn):
9911 """Export an instance to an image in the cluster.
9914 assert self.op.mode in constants.EXPORT_MODES
9916 instance = self.instance
9917 src_node = instance.primary_node
9919 if self.op.shutdown:
9920 # shutdown the instance, but not the disks
9921 feedback_fn("Shutting down instance %s" % instance.name)
9922 result = self.rpc.call_instance_shutdown(src_node, instance,
9923 self.op.shutdown_timeout)
9924 # TODO: Maybe ignore failures if ignore_remove_failures is set
9925 result.Raise("Could not shutdown instance %s on"
9926 " node %s" % (instance.name, src_node))
9928 # set the disks ID correctly since call_instance_start needs the
9929 # correct drbd minor to create the symlinks
9930 for disk in instance.disks:
9931 self.cfg.SetDiskID(disk, src_node)
9933 activate_disks = (not instance.admin_up)
9936 # Activate the instance disks if we'exporting a stopped instance
9937 feedback_fn("Activating disks for %s" % instance.name)
9938 _StartInstanceDisks(self, instance, None)
9941 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
9944 helper.CreateSnapshots()
9946 if (self.op.shutdown and instance.admin_up and
9947 not self.op.remove_instance):
9948 assert not activate_disks
9949 feedback_fn("Starting instance %s" % instance.name)
9950 result = self.rpc.call_instance_start(src_node, instance, None, None)
9951 msg = result.fail_msg
9953 feedback_fn("Failed to start instance: %s" % msg)
9954 _ShutdownInstanceDisks(self, instance)
9955 raise errors.OpExecError("Could not start instance: %s" % msg)
9957 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9958 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9959 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9960 connect_timeout = constants.RIE_CONNECT_TIMEOUT
9961 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9963 (key_name, _, _) = self.x509_key_name
9966 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
9969 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
9970 key_name, dest_ca_pem,
9975 # Check for backwards compatibility
9976 assert len(dresults) == len(instance.disks)
9977 assert compat.all(isinstance(i, bool) for i in dresults), \
9978 "Not all results are boolean: %r" % dresults
9982 feedback_fn("Deactivating disks for %s" % instance.name)
9983 _ShutdownInstanceDisks(self, instance)
9985 if not (compat.all(dresults) and fin_resu):
9988 failures.append("export finalization")
9989 if not compat.all(dresults):
9990 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
9992 failures.append("disk export: disk(s) %s" % fdsk)
9994 raise errors.OpExecError("Export failed, errors in %s" %
9995 utils.CommaJoin(failures))
9997 # At this point, the export was successful, we can cleanup/finish
9999 # Remove instance if requested
10000 if self.op.remove_instance:
10001 feedback_fn("Removing instance %s" % instance.name)
10002 _RemoveInstance(self, feedback_fn, instance,
10003 self.op.ignore_remove_failures)
10005 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10006 self._CleanupExports(feedback_fn)
10008 return fin_resu, dresults
10011 class LUBackupRemove(NoHooksLU):
10012 """Remove exports related to the named instance.
10017 def ExpandNames(self):
10018 self.needed_locks = {}
10019 # We need all nodes to be locked in order for RemoveExport to work, but we
10020 # don't need to lock the instance itself, as nothing will happen to it (and
10021 # we can remove exports also for a removed instance)
10022 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10024 def Exec(self, feedback_fn):
10025 """Remove any export.
10028 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10029 # If the instance was not found we'll try with the name that was passed in.
10030 # This will only work if it was an FQDN, though.
10032 if not instance_name:
10034 instance_name = self.op.instance_name
10036 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10037 exportlist = self.rpc.call_export_list(locked_nodes)
10039 for node in exportlist:
10040 msg = exportlist[node].fail_msg
10042 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10044 if instance_name in exportlist[node].payload:
10046 result = self.rpc.call_export_remove(node, instance_name)
10047 msg = result.fail_msg
10049 logging.error("Could not remove export for instance %s"
10050 " on node %s: %s", instance_name, node, msg)
10052 if fqdn_warn and not found:
10053 feedback_fn("Export not found. If trying to remove an export belonging"
10054 " to a deleted instance please use its Fully Qualified"
10058 class LUGroupAdd(LogicalUnit):
10059 """Logical unit for creating node groups.
10062 HPATH = "group-add"
10063 HTYPE = constants.HTYPE_GROUP
10066 def ExpandNames(self):
10067 # We need the new group's UUID here so that we can create and acquire the
10068 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10069 # that it should not check whether the UUID exists in the configuration.
10070 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10071 self.needed_locks = {}
10072 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10074 def CheckPrereq(self):
10075 """Check prerequisites.
10077 This checks that the given group name is not an existing node group
10082 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10083 except errors.OpPrereqError:
10086 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10087 " node group (UUID: %s)" %
10088 (self.op.group_name, existing_uuid),
10089 errors.ECODE_EXISTS)
10091 if self.op.ndparams:
10092 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10094 def BuildHooksEnv(self):
10095 """Build hooks env.
10099 "GROUP_NAME": self.op.group_name,
10101 mn = self.cfg.GetMasterNode()
10102 return env, [mn], [mn]
10104 def Exec(self, feedback_fn):
10105 """Add the node group to the cluster.
10108 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10109 uuid=self.group_uuid,
10110 alloc_policy=self.op.alloc_policy,
10111 ndparams=self.op.ndparams)
10113 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10114 del self.remove_locks[locking.LEVEL_NODEGROUP]
10117 class LUGroupAssignNodes(NoHooksLU):
10118 """Logical unit for assigning nodes to groups.
10123 def ExpandNames(self):
10124 # These raise errors.OpPrereqError on their own:
10125 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10126 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10128 # We want to lock all the affected nodes and groups. We have readily
10129 # available the list of nodes, and the *destination* group. To gather the
10130 # list of "source" groups, we need to fetch node information.
10131 self.node_data = self.cfg.GetAllNodesInfo()
10132 affected_groups = set(self.node_data[node].group for node in self.op.nodes)
10133 affected_groups.add(self.group_uuid)
10135 self.needed_locks = {
10136 locking.LEVEL_NODEGROUP: list(affected_groups),
10137 locking.LEVEL_NODE: self.op.nodes,
10140 def CheckPrereq(self):
10141 """Check prerequisites.
10144 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10145 instance_data = self.cfg.GetAllInstancesInfo()
10147 if self.group is None:
10148 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10149 (self.op.group_name, self.group_uuid))
10151 (new_splits, previous_splits) = \
10152 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10153 for node in self.op.nodes],
10154 self.node_data, instance_data)
10157 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10159 if not self.op.force:
10160 raise errors.OpExecError("The following instances get split by this"
10161 " change and --force was not given: %s" %
10164 self.LogWarning("This operation will split the following instances: %s",
10167 if previous_splits:
10168 self.LogWarning("In addition, these already-split instances continue"
10169 " to be spit across groups: %s",
10170 utils.CommaJoin(utils.NiceSort(previous_splits)))
10172 def Exec(self, feedback_fn):
10173 """Assign nodes to a new group.
10176 for node in self.op.nodes:
10177 self.node_data[node].group = self.group_uuid
10179 self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10182 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10183 """Check for split instances after a node assignment.
10185 This method considers a series of node assignments as an atomic operation,
10186 and returns information about split instances after applying the set of
10189 In particular, it returns information about newly split instances, and
10190 instances that were already split, and remain so after the change.
10192 Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
10195 @type changes: list of (node_name, new_group_uuid) pairs.
10196 @param changes: list of node assignments to consider.
10197 @param node_data: a dict with data for all nodes
10198 @param instance_data: a dict with all instances to consider
10199 @rtype: a two-tuple
10200 @return: a list of instances that were previously okay and result split as a
10201 consequence of this change, and a list of instances that were previously
10202 split and this change does not fix.
10205 changed_nodes = dict((node, group) for node, group in changes
10206 if node_data[node].group != group)
10208 all_split_instances = set()
10209 previously_split_instances = set()
10211 def InstanceNodes(instance):
10212 return [instance.primary_node] + list(instance.secondary_nodes)
10214 for inst in instance_data.values():
10215 if inst.disk_template not in constants.DTS_NET_MIRROR:
10218 instance_nodes = InstanceNodes(inst)
10220 if len(set(node_data[node].group for node in instance_nodes)) > 1:
10221 previously_split_instances.add(inst.name)
10223 if len(set(changed_nodes.get(node, node_data[node].group)
10224 for node in instance_nodes)) > 1:
10225 all_split_instances.add(inst.name)
10227 return (list(all_split_instances - previously_split_instances),
10228 list(previously_split_instances & all_split_instances))
10231 class _GroupQuery(_QueryBase):
10233 FIELDS = query.GROUP_FIELDS
10235 def ExpandNames(self, lu):
10236 lu.needed_locks = {}
10238 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10239 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10242 self.wanted = [name_to_uuid[name]
10243 for name in utils.NiceSort(name_to_uuid.keys())]
10245 # Accept names to be either names or UUIDs.
10248 all_uuid = frozenset(self._all_groups.keys())
10250 for name in self.names:
10251 if name in all_uuid:
10252 self.wanted.append(name)
10253 elif name in name_to_uuid:
10254 self.wanted.append(name_to_uuid[name])
10256 missing.append(name)
10259 raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
10260 errors.ECODE_NOENT)
10262 def DeclareLocks(self, lu, level):
10265 def _GetQueryData(self, lu):
10266 """Computes the list of node groups and their attributes.
10269 do_nodes = query.GQ_NODE in self.requested_data
10270 do_instances = query.GQ_INST in self.requested_data
10272 group_to_nodes = None
10273 group_to_instances = None
10275 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10276 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10277 # latter GetAllInstancesInfo() is not enough, for we have to go through
10278 # instance->node. Hence, we will need to process nodes even if we only need
10279 # instance information.
10280 if do_nodes or do_instances:
10281 all_nodes = lu.cfg.GetAllNodesInfo()
10282 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10285 for node in all_nodes.values():
10286 if node.group in group_to_nodes:
10287 group_to_nodes[node.group].append(node.name)
10288 node_to_group[node.name] = node.group
10291 all_instances = lu.cfg.GetAllInstancesInfo()
10292 group_to_instances = dict((uuid, []) for uuid in self.wanted)
10294 for instance in all_instances.values():
10295 node = instance.primary_node
10296 if node in node_to_group:
10297 group_to_instances[node_to_group[node]].append(instance.name)
10300 # Do not pass on node information if it was not requested.
10301 group_to_nodes = None
10303 return query.GroupQueryData([self._all_groups[uuid]
10304 for uuid in self.wanted],
10305 group_to_nodes, group_to_instances)
10308 class LUGroupQuery(NoHooksLU):
10309 """Logical unit for querying node groups.
10314 def CheckArguments(self):
10315 self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
10317 def ExpandNames(self):
10318 self.gq.ExpandNames(self)
10320 def Exec(self, feedback_fn):
10321 return self.gq.OldStyleQuery(self)
10324 class LUGroupSetParams(LogicalUnit):
10325 """Modifies the parameters of a node group.
10328 HPATH = "group-modify"
10329 HTYPE = constants.HTYPE_GROUP
10332 def CheckArguments(self):
10335 self.op.alloc_policy,
10338 if all_changes.count(None) == len(all_changes):
10339 raise errors.OpPrereqError("Please pass at least one modification",
10340 errors.ECODE_INVAL)
10342 def ExpandNames(self):
10343 # This raises errors.OpPrereqError on its own:
10344 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10346 self.needed_locks = {
10347 locking.LEVEL_NODEGROUP: [self.group_uuid],
10350 def CheckPrereq(self):
10351 """Check prerequisites.
10354 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10356 if self.group is None:
10357 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10358 (self.op.group_name, self.group_uuid))
10360 if self.op.ndparams:
10361 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10362 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10363 self.new_ndparams = new_ndparams
10365 def BuildHooksEnv(self):
10366 """Build hooks env.
10370 "GROUP_NAME": self.op.group_name,
10371 "NEW_ALLOC_POLICY": self.op.alloc_policy,
10373 mn = self.cfg.GetMasterNode()
10374 return env, [mn], [mn]
10376 def Exec(self, feedback_fn):
10377 """Modifies the node group.
10382 if self.op.ndparams:
10383 self.group.ndparams = self.new_ndparams
10384 result.append(("ndparams", str(self.group.ndparams)))
10386 if self.op.alloc_policy:
10387 self.group.alloc_policy = self.op.alloc_policy
10389 self.cfg.Update(self.group, feedback_fn)
10394 class LUGroupRemove(LogicalUnit):
10395 HPATH = "group-remove"
10396 HTYPE = constants.HTYPE_GROUP
10399 def ExpandNames(self):
10400 # This will raises errors.OpPrereqError on its own:
10401 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10402 self.needed_locks = {
10403 locking.LEVEL_NODEGROUP: [self.group_uuid],
10406 def CheckPrereq(self):
10407 """Check prerequisites.
10409 This checks that the given group name exists as a node group, that is
10410 empty (i.e., contains no nodes), and that is not the last group of the
10414 # Verify that the group is empty.
10415 group_nodes = [node.name
10416 for node in self.cfg.GetAllNodesInfo().values()
10417 if node.group == self.group_uuid]
10420 raise errors.OpPrereqError("Group '%s' not empty, has the following"
10422 (self.op.group_name,
10423 utils.CommaJoin(utils.NiceSort(group_nodes))),
10424 errors.ECODE_STATE)
10426 # Verify the cluster would not be left group-less.
10427 if len(self.cfg.GetNodeGroupList()) == 1:
10428 raise errors.OpPrereqError("Group '%s' is the only group,"
10429 " cannot be removed" %
10430 self.op.group_name,
10431 errors.ECODE_STATE)
10433 def BuildHooksEnv(self):
10434 """Build hooks env.
10438 "GROUP_NAME": self.op.group_name,
10440 mn = self.cfg.GetMasterNode()
10441 return env, [mn], [mn]
10443 def Exec(self, feedback_fn):
10444 """Remove the node group.
10448 self.cfg.RemoveNodeGroup(self.group_uuid)
10449 except errors.ConfigurationError:
10450 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10451 (self.op.group_name, self.group_uuid))
10453 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10456 class LUGroupRename(LogicalUnit):
10457 HPATH = "group-rename"
10458 HTYPE = constants.HTYPE_GROUP
10461 def ExpandNames(self):
10462 # This raises errors.OpPrereqError on its own:
10463 self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name)
10465 self.needed_locks = {
10466 locking.LEVEL_NODEGROUP: [self.group_uuid],
10469 def CheckPrereq(self):
10470 """Check prerequisites.
10472 This checks that the given old_name exists as a node group, and that
10477 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10478 except errors.OpPrereqError:
10481 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10482 " node group (UUID: %s)" %
10483 (self.op.new_name, new_name_uuid),
10484 errors.ECODE_EXISTS)
10486 def BuildHooksEnv(self):
10487 """Build hooks env.
10491 "OLD_NAME": self.op.old_name,
10492 "NEW_NAME": self.op.new_name,
10495 mn = self.cfg.GetMasterNode()
10496 all_nodes = self.cfg.GetAllNodesInfo()
10498 all_nodes.pop(mn, None)
10500 for node in all_nodes.values():
10501 if node.group == self.group_uuid:
10502 run_nodes.append(node.name)
10504 return env, run_nodes, run_nodes
10506 def Exec(self, feedback_fn):
10507 """Rename the node group.
10510 group = self.cfg.GetNodeGroup(self.group_uuid)
10513 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10514 (self.op.old_name, self.group_uuid))
10516 group.name = self.op.new_name
10517 self.cfg.Update(group, feedback_fn)
10519 return self.op.new_name
10522 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10523 """Generic tags LU.
10525 This is an abstract class which is the parent of all the other tags LUs.
10529 def ExpandNames(self):
10530 self.needed_locks = {}
10531 if self.op.kind == constants.TAG_NODE:
10532 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10533 self.needed_locks[locking.LEVEL_NODE] = self.op.name
10534 elif self.op.kind == constants.TAG_INSTANCE:
10535 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10536 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10538 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10539 # not possible to acquire the BGL based on opcode parameters)
10541 def CheckPrereq(self):
10542 """Check prerequisites.
10545 if self.op.kind == constants.TAG_CLUSTER:
10546 self.target = self.cfg.GetClusterInfo()
10547 elif self.op.kind == constants.TAG_NODE:
10548 self.target = self.cfg.GetNodeInfo(self.op.name)
10549 elif self.op.kind == constants.TAG_INSTANCE:
10550 self.target = self.cfg.GetInstanceInfo(self.op.name)
10552 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10553 str(self.op.kind), errors.ECODE_INVAL)
10556 class LUTagsGet(TagsLU):
10557 """Returns the tags of a given object.
10562 def ExpandNames(self):
10563 TagsLU.ExpandNames(self)
10565 # Share locks as this is only a read operation
10566 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10568 def Exec(self, feedback_fn):
10569 """Returns the tag list.
10572 return list(self.target.GetTags())
10575 class LUTagsSearch(NoHooksLU):
10576 """Searches the tags for a given pattern.
10581 def ExpandNames(self):
10582 self.needed_locks = {}
10584 def CheckPrereq(self):
10585 """Check prerequisites.
10587 This checks the pattern passed for validity by compiling it.
10591 self.re = re.compile(self.op.pattern)
10592 except re.error, err:
10593 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10594 (self.op.pattern, err), errors.ECODE_INVAL)
10596 def Exec(self, feedback_fn):
10597 """Returns the tag list.
10601 tgts = [("/cluster", cfg.GetClusterInfo())]
10602 ilist = cfg.GetAllInstancesInfo().values()
10603 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10604 nlist = cfg.GetAllNodesInfo().values()
10605 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10607 for path, target in tgts:
10608 for tag in target.GetTags():
10609 if self.re.search(tag):
10610 results.append((path, tag))
10614 class LUTagsSet(TagsLU):
10615 """Sets a tag on a given object.
10620 def CheckPrereq(self):
10621 """Check prerequisites.
10623 This checks the type and length of the tag name and value.
10626 TagsLU.CheckPrereq(self)
10627 for tag in self.op.tags:
10628 objects.TaggableObject.ValidateTag(tag)
10630 def Exec(self, feedback_fn):
10635 for tag in self.op.tags:
10636 self.target.AddTag(tag)
10637 except errors.TagError, err:
10638 raise errors.OpExecError("Error while setting tag: %s" % str(err))
10639 self.cfg.Update(self.target, feedback_fn)
10642 class LUTagsDel(TagsLU):
10643 """Delete a list of tags from a given object.
10648 def CheckPrereq(self):
10649 """Check prerequisites.
10651 This checks that we have the given tag.
10654 TagsLU.CheckPrereq(self)
10655 for tag in self.op.tags:
10656 objects.TaggableObject.ValidateTag(tag)
10657 del_tags = frozenset(self.op.tags)
10658 cur_tags = self.target.GetTags()
10660 diff_tags = del_tags - cur_tags
10662 diff_names = ("'%s'" % i for i in sorted(diff_tags))
10663 raise errors.OpPrereqError("Tag(s) %s not found" %
10664 (utils.CommaJoin(diff_names), ),
10665 errors.ECODE_NOENT)
10667 def Exec(self, feedback_fn):
10668 """Remove the tag from the object.
10671 for tag in self.op.tags:
10672 self.target.RemoveTag(tag)
10673 self.cfg.Update(self.target, feedback_fn)
10676 class LUTestDelay(NoHooksLU):
10677 """Sleep for a specified amount of time.
10679 This LU sleeps on the master and/or nodes for a specified amount of
10685 def ExpandNames(self):
10686 """Expand names and set required locks.
10688 This expands the node list, if any.
10691 self.needed_locks = {}
10692 if self.op.on_nodes:
10693 # _GetWantedNodes can be used here, but is not always appropriate to use
10694 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10695 # more information.
10696 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10697 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10699 def _TestDelay(self):
10700 """Do the actual sleep.
10703 if self.op.on_master:
10704 if not utils.TestDelay(self.op.duration):
10705 raise errors.OpExecError("Error during master delay test")
10706 if self.op.on_nodes:
10707 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10708 for node, node_result in result.items():
10709 node_result.Raise("Failure during rpc call to node %s" % node)
10711 def Exec(self, feedback_fn):
10712 """Execute the test delay opcode, with the wanted repetitions.
10715 if self.op.repeat == 0:
10718 top_value = self.op.repeat - 1
10719 for i in range(self.op.repeat):
10720 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10724 class LUTestJqueue(NoHooksLU):
10725 """Utility LU to test some aspects of the job queue.
10730 # Must be lower than default timeout for WaitForJobChange to see whether it
10731 # notices changed jobs
10732 _CLIENT_CONNECT_TIMEOUT = 20.0
10733 _CLIENT_CONFIRM_TIMEOUT = 60.0
10736 def _NotifyUsingSocket(cls, cb, errcls):
10737 """Opens a Unix socket and waits for another program to connect.
10740 @param cb: Callback to send socket name to client
10741 @type errcls: class
10742 @param errcls: Exception class to use for errors
10745 # Using a temporary directory as there's no easy way to create temporary
10746 # sockets without writing a custom loop around tempfile.mktemp and
10748 tmpdir = tempfile.mkdtemp()
10750 tmpsock = utils.PathJoin(tmpdir, "sock")
10752 logging.debug("Creating temporary socket at %s", tmpsock)
10753 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10758 # Send details to client
10761 # Wait for client to connect before continuing
10762 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10764 (conn, _) = sock.accept()
10765 except socket.error, err:
10766 raise errcls("Client didn't connect in time (%s)" % err)
10770 # Remove as soon as client is connected
10771 shutil.rmtree(tmpdir)
10773 # Wait for client to close
10776 # pylint: disable-msg=E1101
10777 # Instance of '_socketobject' has no ... member
10778 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10780 except socket.error, err:
10781 raise errcls("Client failed to confirm notification (%s)" % err)
10785 def _SendNotification(self, test, arg, sockname):
10786 """Sends a notification to the client.
10789 @param test: Test name
10790 @param arg: Test argument (depends on test)
10791 @type sockname: string
10792 @param sockname: Socket path
10795 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10797 def _Notify(self, prereq, test, arg):
10798 """Notifies the client of a test.
10801 @param prereq: Whether this is a prereq-phase test
10803 @param test: Test name
10804 @param arg: Test argument (depends on test)
10808 errcls = errors.OpPrereqError
10810 errcls = errors.OpExecError
10812 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
10816 def CheckArguments(self):
10817 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
10818 self.expandnames_calls = 0
10820 def ExpandNames(self):
10821 checkargs_calls = getattr(self, "checkargs_calls", 0)
10822 if checkargs_calls < 1:
10823 raise errors.ProgrammerError("CheckArguments was not called")
10825 self.expandnames_calls += 1
10827 if self.op.notify_waitlock:
10828 self._Notify(True, constants.JQT_EXPANDNAMES, None)
10830 self.LogInfo("Expanding names")
10832 # Get lock on master node (just to get a lock, not for a particular reason)
10833 self.needed_locks = {
10834 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10837 def Exec(self, feedback_fn):
10838 if self.expandnames_calls < 1:
10839 raise errors.ProgrammerError("ExpandNames was not called")
10841 if self.op.notify_exec:
10842 self._Notify(False, constants.JQT_EXEC, None)
10844 self.LogInfo("Executing")
10846 if self.op.log_messages:
10847 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
10848 for idx, msg in enumerate(self.op.log_messages):
10849 self.LogInfo("Sending log message %s", idx + 1)
10850 feedback_fn(constants.JQT_MSGPREFIX + msg)
10851 # Report how many test messages have been sent
10852 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10855 raise errors.OpExecError("Opcode failure was requested")
10860 class IAllocator(object):
10861 """IAllocator framework.
10863 An IAllocator instance has three sets of attributes:
10864 - cfg that is needed to query the cluster
10865 - input data (all members of the _KEYS class attribute are required)
10866 - four buffer attributes (in|out_data|text), that represent the
10867 input (to the external script) in text and data structure format,
10868 and the output from it, again in two formats
10869 - the result variables from the script (success, info, nodes) for
10873 # pylint: disable-msg=R0902
10874 # lots of instance attributes
10876 "name", "mem_size", "disks", "disk_template",
10877 "os", "tags", "nics", "vcpus", "hypervisor",
10880 "name", "relocate_from",
10886 def __init__(self, cfg, rpc, mode, **kwargs):
10889 # init buffer variables
10890 self.in_text = self.out_text = self.in_data = self.out_data = None
10891 # init all input fields so that pylint is happy
10893 self.mem_size = self.disks = self.disk_template = None
10894 self.os = self.tags = self.nics = self.vcpus = None
10895 self.hypervisor = None
10896 self.relocate_from = None
10898 self.evac_nodes = None
10900 self.required_nodes = None
10901 # init result fields
10902 self.success = self.info = self.result = None
10903 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10904 keyset = self._ALLO_KEYS
10905 fn = self._AddNewInstance
10906 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10907 keyset = self._RELO_KEYS
10908 fn = self._AddRelocateInstance
10909 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10910 keyset = self._EVAC_KEYS
10911 fn = self._AddEvacuateNodes
10913 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10914 " IAllocator" % self.mode)
10916 if key not in keyset:
10917 raise errors.ProgrammerError("Invalid input parameter '%s' to"
10918 " IAllocator" % key)
10919 setattr(self, key, kwargs[key])
10922 if key not in kwargs:
10923 raise errors.ProgrammerError("Missing input parameter '%s' to"
10924 " IAllocator" % key)
10925 self._BuildInputData(fn)
10927 def _ComputeClusterData(self):
10928 """Compute the generic allocator input data.
10930 This is the data that is independent of the actual operation.
10934 cluster_info = cfg.GetClusterInfo()
10937 "version": constants.IALLOCATOR_VERSION,
10938 "cluster_name": cfg.GetClusterName(),
10939 "cluster_tags": list(cluster_info.GetTags()),
10940 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10941 # we don't have job IDs
10943 ninfo = cfg.GetAllNodesInfo()
10944 iinfo = cfg.GetAllInstancesInfo().values()
10945 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10948 node_list = [n.name for n in ninfo.values() if n.vm_capable]
10950 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10951 hypervisor_name = self.hypervisor
10952 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10953 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10954 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10955 hypervisor_name = cluster_info.enabled_hypervisors[0]
10957 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10960 self.rpc.call_all_instances_info(node_list,
10961 cluster_info.enabled_hypervisors)
10963 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
10965 config_ndata = self._ComputeBasicNodeData(ninfo)
10966 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
10967 i_list, config_ndata)
10968 assert len(data["nodes"]) == len(ninfo), \
10969 "Incomplete node data computed"
10971 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
10973 self.in_data = data
10976 def _ComputeNodeGroupData(cfg):
10977 """Compute node groups data.
10981 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
10983 "name": gdata.name,
10984 "alloc_policy": gdata.alloc_policy,
10989 def _ComputeBasicNodeData(node_cfg):
10990 """Compute global node data.
10993 @returns: a dict of name: (node dict, node config)
10997 for ninfo in node_cfg.values():
10998 # fill in static (config-based) values
11000 "tags": list(ninfo.GetTags()),
11001 "primary_ip": ninfo.primary_ip,
11002 "secondary_ip": ninfo.secondary_ip,
11003 "offline": ninfo.offline,
11004 "drained": ninfo.drained,
11005 "master_candidate": ninfo.master_candidate,
11006 "group": ninfo.group,
11007 "master_capable": ninfo.master_capable,
11008 "vm_capable": ninfo.vm_capable,
11011 node_results[ninfo.name] = pnr
11013 return node_results
11016 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11018 """Compute global node data.
11020 @param node_results: the basic node structures as filled from the config
11023 # make a copy of the current dict
11024 node_results = dict(node_results)
11025 for nname, nresult in node_data.items():
11026 assert nname in node_results, "Missing basic data for node %s" % nname
11027 ninfo = node_cfg[nname]
11029 if not (ninfo.offline or ninfo.drained):
11030 nresult.Raise("Can't get data for node %s" % nname)
11031 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11033 remote_info = nresult.payload
11035 for attr in ['memory_total', 'memory_free', 'memory_dom0',
11036 'vg_size', 'vg_free', 'cpu_total']:
11037 if attr not in remote_info:
11038 raise errors.OpExecError("Node '%s' didn't return attribute"
11039 " '%s'" % (nname, attr))
11040 if not isinstance(remote_info[attr], int):
11041 raise errors.OpExecError("Node '%s' returned invalid value"
11043 (nname, attr, remote_info[attr]))
11044 # compute memory used by primary instances
11045 i_p_mem = i_p_up_mem = 0
11046 for iinfo, beinfo in i_list:
11047 if iinfo.primary_node == nname:
11048 i_p_mem += beinfo[constants.BE_MEMORY]
11049 if iinfo.name not in node_iinfo[nname].payload:
11052 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11053 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11054 remote_info['memory_free'] -= max(0, i_mem_diff)
11057 i_p_up_mem += beinfo[constants.BE_MEMORY]
11059 # compute memory used by instances
11061 "total_memory": remote_info['memory_total'],
11062 "reserved_memory": remote_info['memory_dom0'],
11063 "free_memory": remote_info['memory_free'],
11064 "total_disk": remote_info['vg_size'],
11065 "free_disk": remote_info['vg_free'],
11066 "total_cpus": remote_info['cpu_total'],
11067 "i_pri_memory": i_p_mem,
11068 "i_pri_up_memory": i_p_up_mem,
11070 pnr_dyn.update(node_results[nname])
11071 node_results[nname] = pnr_dyn
11073 return node_results
11076 def _ComputeInstanceData(cluster_info, i_list):
11077 """Compute global instance data.
11081 for iinfo, beinfo in i_list:
11083 for nic in iinfo.nics:
11084 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11085 nic_dict = {"mac": nic.mac,
11087 "mode": filled_params[constants.NIC_MODE],
11088 "link": filled_params[constants.NIC_LINK],
11090 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11091 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11092 nic_data.append(nic_dict)
11094 "tags": list(iinfo.GetTags()),
11095 "admin_up": iinfo.admin_up,
11096 "vcpus": beinfo[constants.BE_VCPUS],
11097 "memory": beinfo[constants.BE_MEMORY],
11099 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11101 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11102 "disk_template": iinfo.disk_template,
11103 "hypervisor": iinfo.hypervisor,
11105 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11107 instance_data[iinfo.name] = pir
11109 return instance_data
11111 def _AddNewInstance(self):
11112 """Add new instance data to allocator structure.
11114 This in combination with _AllocatorGetClusterData will create the
11115 correct structure needed as input for the allocator.
11117 The checks for the completeness of the opcode must have already been
11121 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11123 if self.disk_template in constants.DTS_NET_MIRROR:
11124 self.required_nodes = 2
11126 self.required_nodes = 1
11129 "disk_template": self.disk_template,
11132 "vcpus": self.vcpus,
11133 "memory": self.mem_size,
11134 "disks": self.disks,
11135 "disk_space_total": disk_space,
11137 "required_nodes": self.required_nodes,
11141 def _AddRelocateInstance(self):
11142 """Add relocate instance data to allocator structure.
11144 This in combination with _IAllocatorGetClusterData will create the
11145 correct structure needed as input for the allocator.
11147 The checks for the completeness of the opcode must have already been
11151 instance = self.cfg.GetInstanceInfo(self.name)
11152 if instance is None:
11153 raise errors.ProgrammerError("Unknown instance '%s' passed to"
11154 " IAllocator" % self.name)
11156 if instance.disk_template not in constants.DTS_NET_MIRROR:
11157 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11158 errors.ECODE_INVAL)
11160 if len(instance.secondary_nodes) != 1:
11161 raise errors.OpPrereqError("Instance has not exactly one secondary node",
11162 errors.ECODE_STATE)
11164 self.required_nodes = 1
11165 disk_sizes = [{'size': disk.size} for disk in instance.disks]
11166 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11170 "disk_space_total": disk_space,
11171 "required_nodes": self.required_nodes,
11172 "relocate_from": self.relocate_from,
11176 def _AddEvacuateNodes(self):
11177 """Add evacuate nodes data to allocator structure.
11181 "evac_nodes": self.evac_nodes
11185 def _BuildInputData(self, fn):
11186 """Build input data structures.
11189 self._ComputeClusterData()
11192 request["type"] = self.mode
11193 self.in_data["request"] = request
11195 self.in_text = serializer.Dump(self.in_data)
11197 def Run(self, name, validate=True, call_fn=None):
11198 """Run an instance allocator and return the results.
11201 if call_fn is None:
11202 call_fn = self.rpc.call_iallocator_runner
11204 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11205 result.Raise("Failure while running the iallocator script")
11207 self.out_text = result.payload
11209 self._ValidateResult()
11211 def _ValidateResult(self):
11212 """Process the allocator results.
11214 This will process and if successful save the result in
11215 self.out_data and the other parameters.
11219 rdict = serializer.Load(self.out_text)
11220 except Exception, err:
11221 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11223 if not isinstance(rdict, dict):
11224 raise errors.OpExecError("Can't parse iallocator results: not a dict")
11226 # TODO: remove backwards compatiblity in later versions
11227 if "nodes" in rdict and "result" not in rdict:
11228 rdict["result"] = rdict["nodes"]
11231 for key in "success", "info", "result":
11232 if key not in rdict:
11233 raise errors.OpExecError("Can't parse iallocator results:"
11234 " missing key '%s'" % key)
11235 setattr(self, key, rdict[key])
11237 if not isinstance(rdict["result"], list):
11238 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11240 self.out_data = rdict
11243 class LUTestAllocator(NoHooksLU):
11244 """Run allocator tests.
11246 This LU runs the allocator tests
11249 def CheckPrereq(self):
11250 """Check prerequisites.
11252 This checks the opcode parameters depending on the director and mode test.
11255 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11256 for attr in ["mem_size", "disks", "disk_template",
11257 "os", "tags", "nics", "vcpus"]:
11258 if not hasattr(self.op, attr):
11259 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11260 attr, errors.ECODE_INVAL)
11261 iname = self.cfg.ExpandInstanceName(self.op.name)
11262 if iname is not None:
11263 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11264 iname, errors.ECODE_EXISTS)
11265 if not isinstance(self.op.nics, list):
11266 raise errors.OpPrereqError("Invalid parameter 'nics'",
11267 errors.ECODE_INVAL)
11268 if not isinstance(self.op.disks, list):
11269 raise errors.OpPrereqError("Invalid parameter 'disks'",
11270 errors.ECODE_INVAL)
11271 for row in self.op.disks:
11272 if (not isinstance(row, dict) or
11273 "size" not in row or
11274 not isinstance(row["size"], int) or
11275 "mode" not in row or
11276 row["mode"] not in ['r', 'w']):
11277 raise errors.OpPrereqError("Invalid contents of the 'disks'"
11278 " parameter", errors.ECODE_INVAL)
11279 if self.op.hypervisor is None:
11280 self.op.hypervisor = self.cfg.GetHypervisorType()
11281 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11282 fname = _ExpandInstanceName(self.cfg, self.op.name)
11283 self.op.name = fname
11284 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11285 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11286 if not hasattr(self.op, "evac_nodes"):
11287 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11288 " opcode input", errors.ECODE_INVAL)
11290 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11291 self.op.mode, errors.ECODE_INVAL)
11293 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11294 if self.op.allocator is None:
11295 raise errors.OpPrereqError("Missing allocator name",
11296 errors.ECODE_INVAL)
11297 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11298 raise errors.OpPrereqError("Wrong allocator test '%s'" %
11299 self.op.direction, errors.ECODE_INVAL)
11301 def Exec(self, feedback_fn):
11302 """Run the allocator test.
11305 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11306 ial = IAllocator(self.cfg, self.rpc,
11309 mem_size=self.op.mem_size,
11310 disks=self.op.disks,
11311 disk_template=self.op.disk_template,
11315 vcpus=self.op.vcpus,
11316 hypervisor=self.op.hypervisor,
11318 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11319 ial = IAllocator(self.cfg, self.rpc,
11322 relocate_from=list(self.relocate_from),
11324 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11325 ial = IAllocator(self.cfg, self.rpc,
11327 evac_nodes=self.op.evac_nodes)
11329 raise errors.ProgrammerError("Uncatched mode %s in"
11330 " LUTestAllocator.Exec", self.op.mode)
11332 if self.op.direction == constants.IALLOCATOR_DIR_IN:
11333 result = ial.in_text
11335 ial.Run(self.op.allocator, validate=False)
11336 result = ial.out_text
11340 #: Query type implementations
11342 constants.QR_INSTANCE: _InstanceQuery,
11343 constants.QR_NODE: _NodeQuery,
11344 constants.QR_GROUP: _GroupQuery,
11348 def _GetQueryImplementation(name):
11349 """Returns the implemtnation for a query type.
11351 @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11355 return _QUERY_IMPL[name]
11357 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11358 errors.ECODE_INVAL)