4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 import ganeti.masterd.instance # pylint: disable-msg=W0611
64 def _SupportsOob(cfg, node):
65 """Tells if node supports OOB.
67 @type cfg: L{config.ConfigWriter}
68 @param cfg: The cluster configuration
69 @type node: L{objects.Node}
71 @return: The OOB script if supported or an empty string otherwise
74 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
78 class LogicalUnit(object):
79 """Logical Unit base class.
81 Subclasses must follow these rules:
82 - implement ExpandNames
83 - implement CheckPrereq (except when tasklets are used)
84 - implement Exec (except when tasklets are used)
85 - implement BuildHooksEnv
86 - redefine HPATH and HTYPE
87 - optionally redefine their run requirements:
88 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
90 Note that all commands require root permissions.
92 @ivar dry_run_result: the value (if any) that will be returned to the caller
93 in dry-run mode (signalled by opcode dry_run parameter)
100 def __init__(self, processor, op, context, rpc):
101 """Constructor for LogicalUnit.
103 This needs to be overridden in derived classes in order to check op
107 self.proc = processor
109 self.cfg = context.cfg
110 self.context = context
112 # Dicts used to declare locking needs to mcpu
113 self.needed_locks = None
114 self.acquired_locks = {}
115 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
117 self.remove_locks = {}
118 # Used to force good behavior when calling helper functions
119 self.recalculate_locks = {}
122 self.Log = processor.Log # pylint: disable-msg=C0103
123 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126 # support for dry-run
127 self.dry_run_result = None
128 # support for generic debug attribute
129 if (not hasattr(self.op, "debug_level") or
130 not isinstance(self.op.debug_level, int)):
131 self.op.debug_level = 0
136 # Validate opcode parameters and set defaults
137 self.op.Validate(True)
139 self.CheckArguments()
142 """Returns the SshRunner object
146 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
149 ssh = property(fget=__GetSSH)
151 def CheckArguments(self):
152 """Check syntactic validity for the opcode arguments.
154 This method is for doing a simple syntactic check and ensure
155 validity of opcode parameters, without any cluster-related
156 checks. While the same can be accomplished in ExpandNames and/or
157 CheckPrereq, doing these separate is better because:
159 - ExpandNames is left as as purely a lock-related function
160 - CheckPrereq is run after we have acquired locks (and possible
163 The function is allowed to change the self.op attribute so that
164 later methods can no longer worry about missing parameters.
169 def ExpandNames(self):
170 """Expand names for this LU.
172 This method is called before starting to execute the opcode, and it should
173 update all the parameters of the opcode to their canonical form (e.g. a
174 short node name must be fully expanded after this method has successfully
175 completed). This way locking, hooks, logging, etc. can work correctly.
177 LUs which implement this method must also populate the self.needed_locks
178 member, as a dict with lock levels as keys, and a list of needed lock names
181 - use an empty dict if you don't need any lock
182 - if you don't need any lock at a particular level omit that level
183 - don't put anything for the BGL level
184 - if you want all locks at a level use locking.ALL_SET as a value
186 If you need to share locks (rather than acquire them exclusively) at one
187 level you can modify self.share_locks, setting a true value (usually 1) for
188 that level. By default locks are not shared.
190 This function can also define a list of tasklets, which then will be
191 executed in order instead of the usual LU-level CheckPrereq and Exec
192 functions, if those are not defined by the LU.
196 # Acquire all nodes and one instance
197 self.needed_locks = {
198 locking.LEVEL_NODE: locking.ALL_SET,
199 locking.LEVEL_INSTANCE: ['instance1.example.com'],
201 # Acquire just two nodes
202 self.needed_locks = {
203 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206 self.needed_locks = {} # No, you can't leave it to the default value None
209 # The implementation of this method is mandatory only if the new LU is
210 # concurrent, so that old LUs don't need to be changed all at the same
213 self.needed_locks = {} # Exclusive LUs don't need locks.
215 raise NotImplementedError
217 def DeclareLocks(self, level):
218 """Declare LU locking needs for a level
220 While most LUs can just declare their locking needs at ExpandNames time,
221 sometimes there's the need to calculate some locks after having acquired
222 the ones before. This function is called just before acquiring locks at a
223 particular level, but after acquiring the ones at lower levels, and permits
224 such calculations. It can be used to modify self.needed_locks, and by
225 default it does nothing.
227 This function is only called if you have something already set in
228 self.needed_locks for the level.
230 @param level: Locking level which is going to be locked
231 @type level: member of ganeti.locking.LEVELS
235 def CheckPrereq(self):
236 """Check prerequisites for this LU.
238 This method should check that the prerequisites for the execution
239 of this LU are fulfilled. It can do internode communication, but
240 it should be idempotent - no cluster or system changes are
243 The method should raise errors.OpPrereqError in case something is
244 not fulfilled. Its return value is ignored.
246 This method should also update all the parameters of the opcode to
247 their canonical form if it hasn't been done by ExpandNames before.
250 if self.tasklets is not None:
251 for (idx, tl) in enumerate(self.tasklets):
252 logging.debug("Checking prerequisites for tasklet %s/%s",
253 idx + 1, len(self.tasklets))
258 def Exec(self, feedback_fn):
261 This method should implement the actual work. It should raise
262 errors.OpExecError for failures that are somewhat dealt with in
266 if self.tasklets is not None:
267 for (idx, tl) in enumerate(self.tasklets):
268 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271 raise NotImplementedError
273 def BuildHooksEnv(self):
274 """Build hooks environment for this LU.
276 This method should return a three-node tuple consisting of: a dict
277 containing the environment that will be used for running the
278 specific hook for this LU, a list of node names on which the hook
279 should run before the execution, and a list of node names on which
280 the hook should run after the execution.
282 The keys of the dict must not have 'GANETI_' prefixed as this will
283 be handled in the hooks runner. Also note additional keys will be
284 added by the hooks runner. If the LU doesn't define any
285 environment, an empty dict (and not None) should be returned.
287 No nodes should be returned as an empty list (and not None).
289 Note that if the HPATH for a LU class is None, this function will
293 raise NotImplementedError
295 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296 """Notify the LU about the results of its hooks.
298 This method is called every time a hooks phase is executed, and notifies
299 the Logical Unit about the hooks' result. The LU can then use it to alter
300 its result based on the hooks. By default the method does nothing and the
301 previous result is passed back unchanged but any LU can define it if it
302 wants to use the local cluster hook-scripts somehow.
304 @param phase: one of L{constants.HOOKS_PHASE_POST} or
305 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306 @param hook_results: the results of the multi-node hooks rpc call
307 @param feedback_fn: function used send feedback back to the caller
308 @param lu_result: the previous Exec result this LU had, or None
310 @return: the new Exec result, based on the previous result
314 # API must be kept, thus we ignore the unused argument and could
315 # be a function warnings
316 # pylint: disable-msg=W0613,R0201
319 def _ExpandAndLockInstance(self):
320 """Helper function to expand and lock an instance.
322 Many LUs that work on an instance take its name in self.op.instance_name
323 and need to expand it and then declare the expanded name for locking. This
324 function does it, and then updates self.op.instance_name to the expanded
325 name. It also initializes needed_locks as a dict, if this hasn't been done
329 if self.needed_locks is None:
330 self.needed_locks = {}
332 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333 "_ExpandAndLockInstance called with instance-level locks set"
334 self.op.instance_name = _ExpandInstanceName(self.cfg,
335 self.op.instance_name)
336 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
338 def _LockInstancesNodes(self, primary_only=False):
339 """Helper function to declare instances' nodes for locking.
341 This function should be called after locking one or more instances to lock
342 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343 with all primary or secondary nodes for instances already locked and
344 present in self.needed_locks[locking.LEVEL_INSTANCE].
346 It should be called from DeclareLocks, and for safety only works if
347 self.recalculate_locks[locking.LEVEL_NODE] is set.
349 In the future it may grow parameters to just lock some instance's nodes, or
350 to just lock primaries or secondary nodes, if needed.
352 If should be called in DeclareLocks in a way similar to::
354 if level == locking.LEVEL_NODE:
355 self._LockInstancesNodes()
357 @type primary_only: boolean
358 @param primary_only: only lock primary nodes of locked instances
361 assert locking.LEVEL_NODE in self.recalculate_locks, \
362 "_LockInstancesNodes helper function called with no nodes to recalculate"
364 # TODO: check if we're really been called with the instance locks held
366 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367 # future we might want to have different behaviors depending on the value
368 # of self.recalculate_locks[locking.LEVEL_NODE]
370 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371 instance = self.context.cfg.GetInstanceInfo(instance_name)
372 wanted_nodes.append(instance.primary_node)
374 wanted_nodes.extend(instance.secondary_nodes)
376 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
381 del self.recalculate_locks[locking.LEVEL_NODE]
384 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385 """Simple LU which runs no hooks.
387 This LU is intended as a parent for other LogicalUnits which will
388 run no hooks, in order to reduce duplicate code.
394 def BuildHooksEnv(self):
395 """Empty BuildHooksEnv for NoHooksLu.
397 This just raises an error.
400 assert False, "BuildHooksEnv called for NoHooksLUs"
404 """Tasklet base class.
406 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407 they can mix legacy code with tasklets. Locking needs to be done in the LU,
408 tasklets know nothing about locks.
410 Subclasses must follow these rules:
411 - Implement CheckPrereq
415 def __init__(self, lu):
422 def CheckPrereq(self):
423 """Check prerequisites for this tasklets.
425 This method should check whether the prerequisites for the execution of
426 this tasklet are fulfilled. It can do internode communication, but it
427 should be idempotent - no cluster or system changes are allowed.
429 The method should raise errors.OpPrereqError in case something is not
430 fulfilled. Its return value is ignored.
432 This method should also update all parameters to their canonical form if it
433 hasn't been done before.
438 def Exec(self, feedback_fn):
439 """Execute the tasklet.
441 This method should implement the actual work. It should raise
442 errors.OpExecError for failures that are somewhat dealt with in code, or
446 raise NotImplementedError
450 """Base for query utility classes.
453 #: Attribute holding field definitions
456 def __init__(self, names, fields, use_locking):
457 """Initializes this class.
461 self.use_locking = use_locking
463 self.query = query.Query(self.FIELDS, fields)
464 self.requested_data = self.query.RequestedData()
466 self.do_locking = None
469 def _GetNames(self, lu, all_names, lock_level):
470 """Helper function to determine names asked for in the query.
474 names = lu.acquired_locks[lock_level]
478 if self.wanted == locking.ALL_SET:
479 assert not self.names
480 # caller didn't specify names, so ordering is not important
481 return utils.NiceSort(names)
483 # caller specified names and we must keep the same order
485 assert not self.do_locking or lu.acquired_locks[lock_level]
487 missing = set(self.wanted).difference(names)
489 raise errors.OpExecError("Some items were removed before retrieving"
490 " their data: %s" % missing)
492 # Return expanded names
496 def FieldsQuery(cls, fields):
497 """Returns list of available fields.
499 @return: List of L{objects.QueryFieldDefinition}
502 return query.QueryFields(cls.FIELDS, fields)
504 def ExpandNames(self, lu):
505 """Expand names for this query.
507 See L{LogicalUnit.ExpandNames}.
510 raise NotImplementedError()
512 def DeclareLocks(self, lu, level):
513 """Declare locks for this query.
515 See L{LogicalUnit.DeclareLocks}.
518 raise NotImplementedError()
520 def _GetQueryData(self, lu):
521 """Collects all data for this query.
523 @return: Query data object
526 raise NotImplementedError()
528 def NewStyleQuery(self, lu):
529 """Collect data and execute query.
532 return query.GetQueryResponse(self.query, self._GetQueryData(lu))
534 def OldStyleQuery(self, lu):
535 """Collect data and execute query.
538 return self.query.OldStyleQuery(self._GetQueryData(lu))
541 def _GetWantedNodes(lu, nodes):
542 """Returns list of checked and expanded node names.
544 @type lu: L{LogicalUnit}
545 @param lu: the logical unit on whose behalf we execute
547 @param nodes: list of node names or None for all nodes
549 @return: the list of nodes, sorted
550 @raise errors.ProgrammerError: if the nodes parameter is wrong type
554 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
556 return utils.NiceSort(lu.cfg.GetNodeList())
559 def _GetWantedInstances(lu, instances):
560 """Returns list of checked and expanded instance names.
562 @type lu: L{LogicalUnit}
563 @param lu: the logical unit on whose behalf we execute
564 @type instances: list
565 @param instances: list of instance names or None for all instances
567 @return: the list of instances, sorted
568 @raise errors.OpPrereqError: if the instances parameter is wrong type
569 @raise errors.OpPrereqError: if any of the passed instances is not found
573 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
575 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
579 def _GetUpdatedParams(old_params, update_dict,
580 use_default=True, use_none=False):
581 """Return the new version of a parameter dictionary.
583 @type old_params: dict
584 @param old_params: old parameters
585 @type update_dict: dict
586 @param update_dict: dict containing new parameter values, or
587 constants.VALUE_DEFAULT to reset the parameter to its default
589 @param use_default: boolean
590 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
591 values as 'to be deleted' values
592 @param use_none: boolean
593 @type use_none: whether to recognise C{None} values as 'to be
596 @return: the new parameter dictionary
599 params_copy = copy.deepcopy(old_params)
600 for key, val in update_dict.iteritems():
601 if ((use_default and val == constants.VALUE_DEFAULT) or
602 (use_none and val is None)):
608 params_copy[key] = val
612 def _CheckOutputFields(static, dynamic, selected):
613 """Checks whether all selected fields are valid.
615 @type static: L{utils.FieldSet}
616 @param static: static fields set
617 @type dynamic: L{utils.FieldSet}
618 @param dynamic: dynamic fields set
625 delta = f.NonMatching(selected)
627 raise errors.OpPrereqError("Unknown output fields selected: %s"
628 % ",".join(delta), errors.ECODE_INVAL)
631 def _CheckGlobalHvParams(params):
632 """Validates that given hypervisor params are not global ones.
634 This will ensure that instances don't get customised versions of
638 used_globals = constants.HVC_GLOBALS.intersection(params)
640 msg = ("The following hypervisor parameters are global and cannot"
641 " be customized at instance level, please modify them at"
642 " cluster level: %s" % utils.CommaJoin(used_globals))
643 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
646 def _CheckNodeOnline(lu, node, msg=None):
647 """Ensure that a given node is online.
649 @param lu: the LU on behalf of which we make the check
650 @param node: the node to check
651 @param msg: if passed, should be a message to replace the default one
652 @raise errors.OpPrereqError: if the node is offline
656 msg = "Can't use offline node"
657 if lu.cfg.GetNodeInfo(node).offline:
658 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
661 def _CheckNodeNotDrained(lu, node):
662 """Ensure that a given node is not drained.
664 @param lu: the LU on behalf of which we make the check
665 @param node: the node to check
666 @raise errors.OpPrereqError: if the node is drained
669 if lu.cfg.GetNodeInfo(node).drained:
670 raise errors.OpPrereqError("Can't use drained node %s" % node,
674 def _CheckNodeVmCapable(lu, node):
675 """Ensure that a given node is vm capable.
677 @param lu: the LU on behalf of which we make the check
678 @param node: the node to check
679 @raise errors.OpPrereqError: if the node is not vm capable
682 if not lu.cfg.GetNodeInfo(node).vm_capable:
683 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
687 def _CheckNodeHasOS(lu, node, os_name, force_variant):
688 """Ensure that a node supports a given OS.
690 @param lu: the LU on behalf of which we make the check
691 @param node: the node to check
692 @param os_name: the OS to query about
693 @param force_variant: whether to ignore variant errors
694 @raise errors.OpPrereqError: if the node is not supporting the OS
697 result = lu.rpc.call_os_get(node, os_name)
698 result.Raise("OS '%s' not in supported OS list for node %s" %
700 prereq=True, ecode=errors.ECODE_INVAL)
701 if not force_variant:
702 _CheckOSVariant(result.payload, os_name)
705 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
706 """Ensure that a node has the given secondary ip.
708 @type lu: L{LogicalUnit}
709 @param lu: the LU on behalf of which we make the check
711 @param node: the node to check
712 @type secondary_ip: string
713 @param secondary_ip: the ip to check
714 @type prereq: boolean
715 @param prereq: whether to throw a prerequisite or an execute error
716 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
717 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
720 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
721 result.Raise("Failure checking secondary ip on node %s" % node,
722 prereq=prereq, ecode=errors.ECODE_ENVIRON)
723 if not result.payload:
724 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
725 " please fix and re-run this command" % secondary_ip)
727 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
729 raise errors.OpExecError(msg)
732 def _GetClusterDomainSecret():
733 """Reads the cluster domain secret.
736 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
740 def _CheckInstanceDown(lu, instance, reason):
741 """Ensure that an instance is not running."""
742 if instance.admin_up:
743 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
744 (instance.name, reason), errors.ECODE_STATE)
746 pnode = instance.primary_node
747 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
748 ins_l.Raise("Can't contact node %s for instance information" % pnode,
749 prereq=True, ecode=errors.ECODE_ENVIRON)
751 if instance.name in ins_l.payload:
752 raise errors.OpPrereqError("Instance %s is running, %s" %
753 (instance.name, reason), errors.ECODE_STATE)
756 def _ExpandItemName(fn, name, kind):
757 """Expand an item name.
759 @param fn: the function to use for expansion
760 @param name: requested item name
761 @param kind: text description ('Node' or 'Instance')
762 @return: the resolved (full) name
763 @raise errors.OpPrereqError: if the item is not found
767 if full_name is None:
768 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
773 def _ExpandNodeName(cfg, name):
774 """Wrapper over L{_ExpandItemName} for nodes."""
775 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
778 def _ExpandInstanceName(cfg, name):
779 """Wrapper over L{_ExpandItemName} for instance."""
780 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
783 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
784 memory, vcpus, nics, disk_template, disks,
785 bep, hvp, hypervisor_name):
786 """Builds instance related env variables for hooks
788 This builds the hook environment from individual variables.
791 @param name: the name of the instance
792 @type primary_node: string
793 @param primary_node: the name of the instance's primary node
794 @type secondary_nodes: list
795 @param secondary_nodes: list of secondary nodes as strings
796 @type os_type: string
797 @param os_type: the name of the instance's OS
798 @type status: boolean
799 @param status: the should_run status of the instance
801 @param memory: the memory size of the instance
803 @param vcpus: the count of VCPUs the instance has
805 @param nics: list of tuples (ip, mac, mode, link) representing
806 the NICs the instance has
807 @type disk_template: string
808 @param disk_template: the disk template of the instance
810 @param disks: the list of (size, mode) pairs
812 @param bep: the backend parameters for the instance
814 @param hvp: the hypervisor parameters for the instance
815 @type hypervisor_name: string
816 @param hypervisor_name: the hypervisor for the instance
818 @return: the hook environment for this instance
827 "INSTANCE_NAME": name,
828 "INSTANCE_PRIMARY": primary_node,
829 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
830 "INSTANCE_OS_TYPE": os_type,
831 "INSTANCE_STATUS": str_status,
832 "INSTANCE_MEMORY": memory,
833 "INSTANCE_VCPUS": vcpus,
834 "INSTANCE_DISK_TEMPLATE": disk_template,
835 "INSTANCE_HYPERVISOR": hypervisor_name,
839 nic_count = len(nics)
840 for idx, (ip, mac, mode, link) in enumerate(nics):
843 env["INSTANCE_NIC%d_IP" % idx] = ip
844 env["INSTANCE_NIC%d_MAC" % idx] = mac
845 env["INSTANCE_NIC%d_MODE" % idx] = mode
846 env["INSTANCE_NIC%d_LINK" % idx] = link
847 if mode == constants.NIC_MODE_BRIDGED:
848 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
852 env["INSTANCE_NIC_COUNT"] = nic_count
855 disk_count = len(disks)
856 for idx, (size, mode) in enumerate(disks):
857 env["INSTANCE_DISK%d_SIZE" % idx] = size
858 env["INSTANCE_DISK%d_MODE" % idx] = mode
862 env["INSTANCE_DISK_COUNT"] = disk_count
864 for source, kind in [(bep, "BE"), (hvp, "HV")]:
865 for key, value in source.items():
866 env["INSTANCE_%s_%s" % (kind, key)] = value
871 def _NICListToTuple(lu, nics):
872 """Build a list of nic information tuples.
874 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
875 value in LUInstanceQueryData.
877 @type lu: L{LogicalUnit}
878 @param lu: the logical unit on whose behalf we execute
879 @type nics: list of L{objects.NIC}
880 @param nics: list of nics to convert to hooks tuples
884 cluster = lu.cfg.GetClusterInfo()
888 filled_params = cluster.SimpleFillNIC(nic.nicparams)
889 mode = filled_params[constants.NIC_MODE]
890 link = filled_params[constants.NIC_LINK]
891 hooks_nics.append((ip, mac, mode, link))
895 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
896 """Builds instance related env variables for hooks from an object.
898 @type lu: L{LogicalUnit}
899 @param lu: the logical unit on whose behalf we execute
900 @type instance: L{objects.Instance}
901 @param instance: the instance for which we should build the
904 @param override: dictionary with key/values that will override
907 @return: the hook environment dictionary
910 cluster = lu.cfg.GetClusterInfo()
911 bep = cluster.FillBE(instance)
912 hvp = cluster.FillHV(instance)
914 'name': instance.name,
915 'primary_node': instance.primary_node,
916 'secondary_nodes': instance.secondary_nodes,
917 'os_type': instance.os,
918 'status': instance.admin_up,
919 'memory': bep[constants.BE_MEMORY],
920 'vcpus': bep[constants.BE_VCPUS],
921 'nics': _NICListToTuple(lu, instance.nics),
922 'disk_template': instance.disk_template,
923 'disks': [(disk.size, disk.mode) for disk in instance.disks],
926 'hypervisor_name': instance.hypervisor,
929 args.update(override)
930 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
933 def _AdjustCandidatePool(lu, exceptions):
934 """Adjust the candidate pool after node operations.
937 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
939 lu.LogInfo("Promoted nodes to master candidate role: %s",
940 utils.CommaJoin(node.name for node in mod_list))
941 for name in mod_list:
942 lu.context.ReaddNode(name)
943 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
945 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
949 def _DecideSelfPromotion(lu, exceptions=None):
950 """Decide whether I should promote myself as a master candidate.
953 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
954 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
955 # the new node will increase mc_max with one, so:
956 mc_should = min(mc_should + 1, cp_size)
957 return mc_now < mc_should
960 def _CheckNicsBridgesExist(lu, target_nics, target_node):
961 """Check that the brigdes needed by a list of nics exist.
964 cluster = lu.cfg.GetClusterInfo()
965 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
966 brlist = [params[constants.NIC_LINK] for params in paramslist
967 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
969 result = lu.rpc.call_bridges_exist(target_node, brlist)
970 result.Raise("Error checking bridges on destination node '%s'" %
971 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
974 def _CheckInstanceBridgesExist(lu, instance, node=None):
975 """Check that the brigdes needed by an instance exist.
979 node = instance.primary_node
980 _CheckNicsBridgesExist(lu, instance.nics, node)
983 def _CheckOSVariant(os_obj, name):
984 """Check whether an OS name conforms to the os variants specification.
986 @type os_obj: L{objects.OS}
987 @param os_obj: OS object to check
989 @param name: OS name passed by the user, to check for validity
992 if not os_obj.supported_variants:
994 variant = objects.OS.GetVariant(name)
996 raise errors.OpPrereqError("OS name must include a variant",
999 if variant not in os_obj.supported_variants:
1000 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1003 def _GetNodeInstancesInner(cfg, fn):
1004 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1007 def _GetNodeInstances(cfg, node_name):
1008 """Returns a list of all primary and secondary instances on a node.
1012 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1015 def _GetNodePrimaryInstances(cfg, node_name):
1016 """Returns primary instances on a node.
1019 return _GetNodeInstancesInner(cfg,
1020 lambda inst: node_name == inst.primary_node)
1023 def _GetNodeSecondaryInstances(cfg, node_name):
1024 """Returns secondary instances on a node.
1027 return _GetNodeInstancesInner(cfg,
1028 lambda inst: node_name in inst.secondary_nodes)
1031 def _GetStorageTypeArgs(cfg, storage_type):
1032 """Returns the arguments for a storage type.
1035 # Special case for file storage
1036 if storage_type == constants.ST_FILE:
1037 # storage.FileStorage wants a list of storage directories
1038 return [[cfg.GetFileStorageDir()]]
1043 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1046 for dev in instance.disks:
1047 cfg.SetDiskID(dev, node_name)
1049 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1050 result.Raise("Failed to get disk status from node %s" % node_name,
1051 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1053 for idx, bdev_status in enumerate(result.payload):
1054 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1060 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1061 """Check the sanity of iallocator and node arguments and use the
1062 cluster-wide iallocator if appropriate.
1064 Check that at most one of (iallocator, node) is specified. If none is
1065 specified, then the LU's opcode's iallocator slot is filled with the
1066 cluster-wide default iallocator.
1068 @type iallocator_slot: string
1069 @param iallocator_slot: the name of the opcode iallocator slot
1070 @type node_slot: string
1071 @param node_slot: the name of the opcode target node slot
1074 node = getattr(lu.op, node_slot, None)
1075 iallocator = getattr(lu.op, iallocator_slot, None)
1077 if node is not None and iallocator is not None:
1078 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1080 elif node is None and iallocator is None:
1081 default_iallocator = lu.cfg.GetDefaultIAllocator()
1082 if default_iallocator:
1083 setattr(lu.op, iallocator_slot, default_iallocator)
1085 raise errors.OpPrereqError("No iallocator or node given and no"
1086 " cluster-wide default iallocator found."
1087 " Please specify either an iallocator or a"
1088 " node, or set a cluster-wide default"
1092 class LUClusterPostInit(LogicalUnit):
1093 """Logical unit for running hooks after cluster initialization.
1096 HPATH = "cluster-init"
1097 HTYPE = constants.HTYPE_CLUSTER
1099 def BuildHooksEnv(self):
1103 env = {"OP_TARGET": self.cfg.GetClusterName()}
1104 mn = self.cfg.GetMasterNode()
1105 return env, [], [mn]
1107 def Exec(self, feedback_fn):
1114 class LUClusterDestroy(LogicalUnit):
1115 """Logical unit for destroying the cluster.
1118 HPATH = "cluster-destroy"
1119 HTYPE = constants.HTYPE_CLUSTER
1121 def BuildHooksEnv(self):
1125 env = {"OP_TARGET": self.cfg.GetClusterName()}
1128 def CheckPrereq(self):
1129 """Check prerequisites.
1131 This checks whether the cluster is empty.
1133 Any errors are signaled by raising errors.OpPrereqError.
1136 master = self.cfg.GetMasterNode()
1138 nodelist = self.cfg.GetNodeList()
1139 if len(nodelist) != 1 or nodelist[0] != master:
1140 raise errors.OpPrereqError("There are still %d node(s) in"
1141 " this cluster." % (len(nodelist) - 1),
1143 instancelist = self.cfg.GetInstanceList()
1145 raise errors.OpPrereqError("There are still %d instance(s) in"
1146 " this cluster." % len(instancelist),
1149 def Exec(self, feedback_fn):
1150 """Destroys the cluster.
1153 master = self.cfg.GetMasterNode()
1155 # Run post hooks on master node before it's removed
1156 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1158 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1160 # pylint: disable-msg=W0702
1161 self.LogWarning("Errors occurred running hooks on %s" % master)
1163 result = self.rpc.call_node_stop_master(master, False)
1164 result.Raise("Could not disable the master role")
1169 def _VerifyCertificate(filename):
1170 """Verifies a certificate for LUClusterVerify.
1172 @type filename: string
1173 @param filename: Path to PEM file
1177 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1178 utils.ReadFile(filename))
1179 except Exception, err: # pylint: disable-msg=W0703
1180 return (LUClusterVerify.ETYPE_ERROR,
1181 "Failed to load X509 certificate %s: %s" % (filename, err))
1184 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1185 constants.SSL_CERT_EXPIRATION_ERROR)
1188 fnamemsg = "While verifying %s: %s" % (filename, msg)
1193 return (None, fnamemsg)
1194 elif errcode == utils.CERT_WARNING:
1195 return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1196 elif errcode == utils.CERT_ERROR:
1197 return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1199 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1202 class LUClusterVerify(LogicalUnit):
1203 """Verifies the cluster status.
1206 HPATH = "cluster-verify"
1207 HTYPE = constants.HTYPE_CLUSTER
1210 TCLUSTER = "cluster"
1212 TINSTANCE = "instance"
1214 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1215 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1216 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1217 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1218 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1219 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1220 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1221 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1222 EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1223 ENODEDRBD = (TNODE, "ENODEDRBD")
1224 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1225 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1226 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1227 ENODEHV = (TNODE, "ENODEHV")
1228 ENODELVM = (TNODE, "ENODELVM")
1229 ENODEN1 = (TNODE, "ENODEN1")
1230 ENODENET = (TNODE, "ENODENET")
1231 ENODEOS = (TNODE, "ENODEOS")
1232 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1233 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1234 ENODERPC = (TNODE, "ENODERPC")
1235 ENODESSH = (TNODE, "ENODESSH")
1236 ENODEVERSION = (TNODE, "ENODEVERSION")
1237 ENODESETUP = (TNODE, "ENODESETUP")
1238 ENODETIME = (TNODE, "ENODETIME")
1239 ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1241 ETYPE_FIELD = "code"
1242 ETYPE_ERROR = "ERROR"
1243 ETYPE_WARNING = "WARNING"
1245 _HOOKS_INDENT_RE = re.compile("^", re.M)
1247 class NodeImage(object):
1248 """A class representing the logical and physical status of a node.
1251 @ivar name: the node name to which this object refers
1252 @ivar volumes: a structure as returned from
1253 L{ganeti.backend.GetVolumeList} (runtime)
1254 @ivar instances: a list of running instances (runtime)
1255 @ivar pinst: list of configured primary instances (config)
1256 @ivar sinst: list of configured secondary instances (config)
1257 @ivar sbp: diction of {secondary-node: list of instances} of all peers
1258 of this node (config)
1259 @ivar mfree: free memory, as reported by hypervisor (runtime)
1260 @ivar dfree: free disk, as reported by the node (runtime)
1261 @ivar offline: the offline status (config)
1262 @type rpc_fail: boolean
1263 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1264 not whether the individual keys were correct) (runtime)
1265 @type lvm_fail: boolean
1266 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1267 @type hyp_fail: boolean
1268 @ivar hyp_fail: whether the RPC call didn't return the instance list
1269 @type ghost: boolean
1270 @ivar ghost: whether this is a known node or not (config)
1271 @type os_fail: boolean
1272 @ivar os_fail: whether the RPC call didn't return valid OS data
1274 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1275 @type vm_capable: boolean
1276 @ivar vm_capable: whether the node can host instances
1279 def __init__(self, offline=False, name=None, vm_capable=True):
1288 self.offline = offline
1289 self.vm_capable = vm_capable
1290 self.rpc_fail = False
1291 self.lvm_fail = False
1292 self.hyp_fail = False
1294 self.os_fail = False
1297 def ExpandNames(self):
1298 self.needed_locks = {
1299 locking.LEVEL_NODE: locking.ALL_SET,
1300 locking.LEVEL_INSTANCE: locking.ALL_SET,
1302 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1304 def _Error(self, ecode, item, msg, *args, **kwargs):
1305 """Format an error message.
1307 Based on the opcode's error_codes parameter, either format a
1308 parseable error code, or a simpler error string.
1310 This must be called only from Exec and functions called from Exec.
1313 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1315 # first complete the msg
1318 # then format the whole message
1319 if self.op.error_codes:
1320 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1326 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1327 # and finally report it via the feedback_fn
1328 self._feedback_fn(" - %s" % msg)
1330 def _ErrorIf(self, cond, *args, **kwargs):
1331 """Log an error message if the passed condition is True.
1334 cond = bool(cond) or self.op.debug_simulate_errors
1336 self._Error(*args, **kwargs)
1337 # do not mark the operation as failed for WARN cases only
1338 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1339 self.bad = self.bad or cond
1341 def _VerifyNode(self, ninfo, nresult):
1342 """Perform some basic validation on data returned from a node.
1344 - check the result data structure is well formed and has all the
1346 - check ganeti version
1348 @type ninfo: L{objects.Node}
1349 @param ninfo: the node to check
1350 @param nresult: the results from the node
1352 @return: whether overall this call was successful (and we can expect
1353 reasonable values in the respose)
1357 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1359 # main result, nresult should be a non-empty dict
1360 test = not nresult or not isinstance(nresult, dict)
1361 _ErrorIf(test, self.ENODERPC, node,
1362 "unable to verify node: no data returned")
1366 # compares ganeti version
1367 local_version = constants.PROTOCOL_VERSION
1368 remote_version = nresult.get("version", None)
1369 test = not (remote_version and
1370 isinstance(remote_version, (list, tuple)) and
1371 len(remote_version) == 2)
1372 _ErrorIf(test, self.ENODERPC, node,
1373 "connection to node returned invalid data")
1377 test = local_version != remote_version[0]
1378 _ErrorIf(test, self.ENODEVERSION, node,
1379 "incompatible protocol versions: master %s,"
1380 " node %s", local_version, remote_version[0])
1384 # node seems compatible, we can actually try to look into its results
1386 # full package version
1387 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1388 self.ENODEVERSION, node,
1389 "software version mismatch: master %s, node %s",
1390 constants.RELEASE_VERSION, remote_version[1],
1391 code=self.ETYPE_WARNING)
1393 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1394 if ninfo.vm_capable and isinstance(hyp_result, dict):
1395 for hv_name, hv_result in hyp_result.iteritems():
1396 test = hv_result is not None
1397 _ErrorIf(test, self.ENODEHV, node,
1398 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1400 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1401 if ninfo.vm_capable and isinstance(hvp_result, list):
1402 for item, hv_name, hv_result in hvp_result:
1403 _ErrorIf(True, self.ENODEHV, node,
1404 "hypervisor %s parameter verify failure (source %s): %s",
1405 hv_name, item, hv_result)
1407 test = nresult.get(constants.NV_NODESETUP,
1408 ["Missing NODESETUP results"])
1409 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1414 def _VerifyNodeTime(self, ninfo, nresult,
1415 nvinfo_starttime, nvinfo_endtime):
1416 """Check the node time.
1418 @type ninfo: L{objects.Node}
1419 @param ninfo: the node to check
1420 @param nresult: the remote results for the node
1421 @param nvinfo_starttime: the start time of the RPC call
1422 @param nvinfo_endtime: the end time of the RPC call
1426 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1428 ntime = nresult.get(constants.NV_TIME, None)
1430 ntime_merged = utils.MergeTime(ntime)
1431 except (ValueError, TypeError):
1432 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1435 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1436 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1437 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1438 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1442 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1443 "Node time diverges by at least %s from master node time",
1446 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1447 """Check the node LVM results.
1449 @type ninfo: L{objects.Node}
1450 @param ninfo: the node to check
1451 @param nresult: the remote results for the node
1452 @param vg_name: the configured VG name
1459 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1461 # checks vg existence and size > 20G
1462 vglist = nresult.get(constants.NV_VGLIST, None)
1464 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1466 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1467 constants.MIN_VG_SIZE)
1468 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1471 pvlist = nresult.get(constants.NV_PVLIST, None)
1472 test = pvlist is None
1473 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1475 # check that ':' is not present in PV names, since it's a
1476 # special character for lvcreate (denotes the range of PEs to
1478 for _, pvname, owner_vg in pvlist:
1479 test = ":" in pvname
1480 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1481 " '%s' of VG '%s'", pvname, owner_vg)
1483 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1484 """Check the node bridges.
1486 @type ninfo: L{objects.Node}
1487 @param ninfo: the node to check
1488 @param nresult: the remote results for the node
1489 @param bridges: the expected list of bridges
1496 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1498 missing = nresult.get(constants.NV_BRIDGES, None)
1499 test = not isinstance(missing, list)
1500 _ErrorIf(test, self.ENODENET, node,
1501 "did not return valid bridge information")
1503 _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1504 utils.CommaJoin(sorted(missing)))
1506 def _VerifyNodeNetwork(self, ninfo, nresult):
1507 """Check the node network connectivity results.
1509 @type ninfo: L{objects.Node}
1510 @param ninfo: the node to check
1511 @param nresult: the remote results for the node
1515 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1517 test = constants.NV_NODELIST not in nresult
1518 _ErrorIf(test, self.ENODESSH, node,
1519 "node hasn't returned node ssh connectivity data")
1521 if nresult[constants.NV_NODELIST]:
1522 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1523 _ErrorIf(True, self.ENODESSH, node,
1524 "ssh communication with node '%s': %s", a_node, a_msg)
1526 test = constants.NV_NODENETTEST not in nresult
1527 _ErrorIf(test, self.ENODENET, node,
1528 "node hasn't returned node tcp connectivity data")
1530 if nresult[constants.NV_NODENETTEST]:
1531 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1533 _ErrorIf(True, self.ENODENET, node,
1534 "tcp communication with node '%s': %s",
1535 anode, nresult[constants.NV_NODENETTEST][anode])
1537 test = constants.NV_MASTERIP not in nresult
1538 _ErrorIf(test, self.ENODENET, node,
1539 "node hasn't returned node master IP reachability data")
1541 if not nresult[constants.NV_MASTERIP]:
1542 if node == self.master_node:
1543 msg = "the master node cannot reach the master IP (not configured?)"
1545 msg = "cannot reach the master IP"
1546 _ErrorIf(True, self.ENODENET, node, msg)
1548 def _VerifyInstance(self, instance, instanceconfig, node_image,
1550 """Verify an instance.
1552 This function checks to see if the required block devices are
1553 available on the instance's node.
1556 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1557 node_current = instanceconfig.primary_node
1559 node_vol_should = {}
1560 instanceconfig.MapLVsByNode(node_vol_should)
1562 for node in node_vol_should:
1563 n_img = node_image[node]
1564 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1565 # ignore missing volumes on offline or broken nodes
1567 for volume in node_vol_should[node]:
1568 test = volume not in n_img.volumes
1569 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1570 "volume %s missing on node %s", volume, node)
1572 if instanceconfig.admin_up:
1573 pri_img = node_image[node_current]
1574 test = instance not in pri_img.instances and not pri_img.offline
1575 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1576 "instance not running on its primary node %s",
1579 for node, n_img in node_image.items():
1580 if node != node_current:
1581 test = instance in n_img.instances
1582 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1583 "instance should not run on node %s", node)
1585 diskdata = [(nname, success, status, idx)
1586 for (nname, disks) in diskstatus.items()
1587 for idx, (success, status) in enumerate(disks)]
1589 for nname, success, bdev_status, idx in diskdata:
1590 # the 'ghost node' construction in Exec() ensures that we have a
1592 snode = node_image[nname]
1593 bad_snode = snode.ghost or snode.offline
1594 _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1595 self.EINSTANCEFAULTYDISK, instance,
1596 "couldn't retrieve status for disk/%s on %s: %s",
1597 idx, nname, bdev_status)
1598 _ErrorIf((instanceconfig.admin_up and success and
1599 bdev_status.ldisk_status == constants.LDS_FAULTY),
1600 self.EINSTANCEFAULTYDISK, instance,
1601 "disk/%s on %s is faulty", idx, nname)
1603 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1604 """Verify if there are any unknown volumes in the cluster.
1606 The .os, .swap and backup volumes are ignored. All other volumes are
1607 reported as unknown.
1609 @type reserved: L{ganeti.utils.FieldSet}
1610 @param reserved: a FieldSet of reserved volume names
1613 for node, n_img in node_image.items():
1614 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1615 # skip non-healthy nodes
1617 for volume in n_img.volumes:
1618 test = ((node not in node_vol_should or
1619 volume not in node_vol_should[node]) and
1620 not reserved.Matches(volume))
1621 self._ErrorIf(test, self.ENODEORPHANLV, node,
1622 "volume %s is unknown", volume)
1624 def _VerifyOrphanInstances(self, instancelist, node_image):
1625 """Verify the list of running instances.
1627 This checks what instances are running but unknown to the cluster.
1630 for node, n_img in node_image.items():
1631 for o_inst in n_img.instances:
1632 test = o_inst not in instancelist
1633 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1634 "instance %s on node %s should not exist", o_inst, node)
1636 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1637 """Verify N+1 Memory Resilience.
1639 Check that if one single node dies we can still start all the
1640 instances it was primary for.
1643 for node, n_img in node_image.items():
1644 # This code checks that every node which is now listed as
1645 # secondary has enough memory to host all instances it is
1646 # supposed to should a single other node in the cluster fail.
1647 # FIXME: not ready for failover to an arbitrary node
1648 # FIXME: does not support file-backed instances
1649 # WARNING: we currently take into account down instances as well
1650 # as up ones, considering that even if they're down someone
1651 # might want to start them even in the event of a node failure.
1653 # we're skipping offline nodes from the N+1 warning, since
1654 # most likely we don't have good memory infromation from them;
1655 # we already list instances living on such nodes, and that's
1658 for prinode, instances in n_img.sbp.items():
1660 for instance in instances:
1661 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1662 if bep[constants.BE_AUTO_BALANCE]:
1663 needed_mem += bep[constants.BE_MEMORY]
1664 test = n_img.mfree < needed_mem
1665 self._ErrorIf(test, self.ENODEN1, node,
1666 "not enough memory to accomodate instance failovers"
1667 " should node %s fail (%dMiB needed, %dMiB available)",
1668 prinode, needed_mem, n_img.mfree)
1670 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1672 """Verifies and computes the node required file checksums.
1674 @type ninfo: L{objects.Node}
1675 @param ninfo: the node to check
1676 @param nresult: the remote results for the node
1677 @param file_list: required list of files
1678 @param local_cksum: dictionary of local files and their checksums
1679 @param master_files: list of files that only masters should have
1683 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1685 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1686 test = not isinstance(remote_cksum, dict)
1687 _ErrorIf(test, self.ENODEFILECHECK, node,
1688 "node hasn't returned file checksum data")
1692 for file_name in file_list:
1693 node_is_mc = ninfo.master_candidate
1694 must_have = (file_name not in master_files) or node_is_mc
1696 test1 = file_name not in remote_cksum
1698 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1700 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1701 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1702 "file '%s' missing", file_name)
1703 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1704 "file '%s' has wrong checksum", file_name)
1705 # not candidate and this is not a must-have file
1706 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1707 "file '%s' should not exist on non master"
1708 " candidates (and the file is outdated)", file_name)
1709 # all good, except non-master/non-must have combination
1710 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1711 "file '%s' should not exist"
1712 " on non master candidates", file_name)
1714 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1716 """Verifies and the node DRBD status.
1718 @type ninfo: L{objects.Node}
1719 @param ninfo: the node to check
1720 @param nresult: the remote results for the node
1721 @param instanceinfo: the dict of instances
1722 @param drbd_helper: the configured DRBD usermode helper
1723 @param drbd_map: the DRBD map as returned by
1724 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1728 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1731 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1732 test = (helper_result == None)
1733 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1734 "no drbd usermode helper returned")
1736 status, payload = helper_result
1738 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1739 "drbd usermode helper check unsuccessful: %s", payload)
1740 test = status and (payload != drbd_helper)
1741 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1742 "wrong drbd usermode helper: %s", payload)
1744 # compute the DRBD minors
1746 for minor, instance in drbd_map[node].items():
1747 test = instance not in instanceinfo
1748 _ErrorIf(test, self.ECLUSTERCFG, None,
1749 "ghost instance '%s' in temporary DRBD map", instance)
1750 # ghost instance should not be running, but otherwise we
1751 # don't give double warnings (both ghost instance and
1752 # unallocated minor in use)
1754 node_drbd[minor] = (instance, False)
1756 instance = instanceinfo[instance]
1757 node_drbd[minor] = (instance.name, instance.admin_up)
1759 # and now check them
1760 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1761 test = not isinstance(used_minors, (tuple, list))
1762 _ErrorIf(test, self.ENODEDRBD, node,
1763 "cannot parse drbd status file: %s", str(used_minors))
1765 # we cannot check drbd status
1768 for minor, (iname, must_exist) in node_drbd.items():
1769 test = minor not in used_minors and must_exist
1770 _ErrorIf(test, self.ENODEDRBD, node,
1771 "drbd minor %d of instance %s is not active", minor, iname)
1772 for minor in used_minors:
1773 test = minor not in node_drbd
1774 _ErrorIf(test, self.ENODEDRBD, node,
1775 "unallocated drbd minor %d is in use", minor)
1777 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1778 """Builds the node OS structures.
1780 @type ninfo: L{objects.Node}
1781 @param ninfo: the node to check
1782 @param nresult: the remote results for the node
1783 @param nimg: the node image object
1787 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1789 remote_os = nresult.get(constants.NV_OSLIST, None)
1790 test = (not isinstance(remote_os, list) or
1791 not compat.all(isinstance(v, list) and len(v) == 7
1792 for v in remote_os))
1794 _ErrorIf(test, self.ENODEOS, node,
1795 "node hasn't returned valid OS data")
1804 for (name, os_path, status, diagnose,
1805 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1807 if name not in os_dict:
1810 # parameters is a list of lists instead of list of tuples due to
1811 # JSON lacking a real tuple type, fix it:
1812 parameters = [tuple(v) for v in parameters]
1813 os_dict[name].append((os_path, status, diagnose,
1814 set(variants), set(parameters), set(api_ver)))
1816 nimg.oslist = os_dict
1818 def _VerifyNodeOS(self, ninfo, nimg, base):
1819 """Verifies the node OS list.
1821 @type ninfo: L{objects.Node}
1822 @param ninfo: the node to check
1823 @param nimg: the node image object
1824 @param base: the 'template' node we match against (e.g. from the master)
1828 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1830 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1832 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
1833 for os_name, os_data in nimg.oslist.items():
1834 assert os_data, "Empty OS status for OS %s?!" % os_name
1835 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1836 _ErrorIf(not f_status, self.ENODEOS, node,
1837 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1838 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1839 "OS '%s' has multiple entries (first one shadows the rest): %s",
1840 os_name, utils.CommaJoin([v[0] for v in os_data]))
1841 # this will catched in backend too
1842 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1843 and not f_var, self.ENODEOS, node,
1844 "OS %s with API at least %d does not declare any variant",
1845 os_name, constants.OS_API_V15)
1846 # comparisons with the 'base' image
1847 test = os_name not in base.oslist
1848 _ErrorIf(test, self.ENODEOS, node,
1849 "Extra OS %s not present on reference node (%s)",
1853 assert base.oslist[os_name], "Base node has empty OS status?"
1854 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1856 # base OS is invalid, skipping
1858 for kind, a, b in [("API version", f_api, b_api),
1859 ("variants list", f_var, b_var),
1860 ("parameters", beautify_params(f_param),
1861 beautify_params(b_param))]:
1862 _ErrorIf(a != b, self.ENODEOS, node,
1863 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
1864 kind, os_name, base.name,
1865 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
1867 # check any missing OSes
1868 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1869 _ErrorIf(missing, self.ENODEOS, node,
1870 "OSes present on reference node %s but missing on this node: %s",
1871 base.name, utils.CommaJoin(missing))
1873 def _VerifyOob(self, ninfo, nresult):
1874 """Verifies out of band functionality of a node.
1876 @type ninfo: L{objects.Node}
1877 @param ninfo: the node to check
1878 @param nresult: the remote results for the node
1882 # We just have to verify the paths on master and/or master candidates
1883 # as the oob helper is invoked on the master
1884 if ((ninfo.master_candidate or ninfo.master_capable) and
1885 constants.NV_OOB_PATHS in nresult):
1886 for path_result in nresult[constants.NV_OOB_PATHS]:
1887 self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1889 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1890 """Verifies and updates the node volume data.
1892 This function will update a L{NodeImage}'s internal structures
1893 with data from the remote call.
1895 @type ninfo: L{objects.Node}
1896 @param ninfo: the node to check
1897 @param nresult: the remote results for the node
1898 @param nimg: the node image object
1899 @param vg_name: the configured VG name
1903 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1905 nimg.lvm_fail = True
1906 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1909 elif isinstance(lvdata, basestring):
1910 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1911 utils.SafeEncode(lvdata))
1912 elif not isinstance(lvdata, dict):
1913 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1915 nimg.volumes = lvdata
1916 nimg.lvm_fail = False
1918 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1919 """Verifies and updates the node instance list.
1921 If the listing was successful, then updates this node's instance
1922 list. Otherwise, it marks the RPC call as failed for the instance
1925 @type ninfo: L{objects.Node}
1926 @param ninfo: the node to check
1927 @param nresult: the remote results for the node
1928 @param nimg: the node image object
1931 idata = nresult.get(constants.NV_INSTANCELIST, None)
1932 test = not isinstance(idata, list)
1933 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1934 " (instancelist): %s", utils.SafeEncode(str(idata)))
1936 nimg.hyp_fail = True
1938 nimg.instances = idata
1940 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1941 """Verifies and computes a node information map
1943 @type ninfo: L{objects.Node}
1944 @param ninfo: the node to check
1945 @param nresult: the remote results for the node
1946 @param nimg: the node image object
1947 @param vg_name: the configured VG name
1951 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1953 # try to read free memory (from the hypervisor)
1954 hv_info = nresult.get(constants.NV_HVINFO, None)
1955 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1956 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1959 nimg.mfree = int(hv_info["memory_free"])
1960 except (ValueError, TypeError):
1961 _ErrorIf(True, self.ENODERPC, node,
1962 "node returned invalid nodeinfo, check hypervisor")
1964 # FIXME: devise a free space model for file based instances as well
1965 if vg_name is not None:
1966 test = (constants.NV_VGLIST not in nresult or
1967 vg_name not in nresult[constants.NV_VGLIST])
1968 _ErrorIf(test, self.ENODELVM, node,
1969 "node didn't return data for the volume group '%s'"
1970 " - it is either missing or broken", vg_name)
1973 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1974 except (ValueError, TypeError):
1975 _ErrorIf(True, self.ENODERPC, node,
1976 "node returned invalid LVM info, check LVM status")
1978 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1979 """Gets per-disk status information for all instances.
1981 @type nodelist: list of strings
1982 @param nodelist: Node names
1983 @type node_image: dict of (name, L{objects.Node})
1984 @param node_image: Node objects
1985 @type instanceinfo: dict of (name, L{objects.Instance})
1986 @param instanceinfo: Instance objects
1987 @rtype: {instance: {node: [(succes, payload)]}}
1988 @return: a dictionary of per-instance dictionaries with nodes as
1989 keys and disk information as values; the disk information is a
1990 list of tuples (success, payload)
1993 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1996 node_disks_devonly = {}
1997 diskless_instances = set()
1998 diskless = constants.DT_DISKLESS
2000 for nname in nodelist:
2001 node_instances = list(itertools.chain(node_image[nname].pinst,
2002 node_image[nname].sinst))
2003 diskless_instances.update(inst for inst in node_instances
2004 if instanceinfo[inst].disk_template == diskless)
2005 disks = [(inst, disk)
2006 for inst in node_instances
2007 for disk in instanceinfo[inst].disks]
2010 # No need to collect data
2013 node_disks[nname] = disks
2015 # Creating copies as SetDiskID below will modify the objects and that can
2016 # lead to incorrect data returned from nodes
2017 devonly = [dev.Copy() for (_, dev) in disks]
2020 self.cfg.SetDiskID(dev, nname)
2022 node_disks_devonly[nname] = devonly
2024 assert len(node_disks) == len(node_disks_devonly)
2026 # Collect data from all nodes with disks
2027 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2030 assert len(result) == len(node_disks)
2034 for (nname, nres) in result.items():
2035 disks = node_disks[nname]
2038 # No data from this node
2039 data = len(disks) * [(False, "node offline")]
2042 _ErrorIf(msg, self.ENODERPC, nname,
2043 "while getting disk information: %s", msg)
2045 # No data from this node
2046 data = len(disks) * [(False, msg)]
2049 for idx, i in enumerate(nres.payload):
2050 if isinstance(i, (tuple, list)) and len(i) == 2:
2053 logging.warning("Invalid result from node %s, entry %d: %s",
2055 data.append((False, "Invalid result from the remote node"))
2057 for ((inst, _), status) in zip(disks, data):
2058 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2060 # Add empty entries for diskless instances.
2061 for inst in diskless_instances:
2062 assert inst not in instdisk
2065 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2066 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2067 compat.all(isinstance(s, (tuple, list)) and
2068 len(s) == 2 for s in statuses)
2069 for inst, nnames in instdisk.items()
2070 for nname, statuses in nnames.items())
2071 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2075 def _VerifyHVP(self, hvp_data):
2076 """Verifies locally the syntax of the hypervisor parameters.
2079 for item, hv_name, hv_params in hvp_data:
2080 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2083 hv_class = hypervisor.GetHypervisor(hv_name)
2084 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2085 hv_class.CheckParameterSyntax(hv_params)
2086 except errors.GenericError, err:
2087 self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2090 def BuildHooksEnv(self):
2093 Cluster-Verify hooks just ran in the post phase and their failure makes
2094 the output be logged in the verify output and the verification to fail.
2097 all_nodes = self.cfg.GetNodeList()
2099 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2101 for node in self.cfg.GetAllNodesInfo().values():
2102 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2104 return env, [], all_nodes
2106 def Exec(self, feedback_fn):
2107 """Verify integrity of cluster, performing various test on nodes.
2110 # This method has too many local variables. pylint: disable-msg=R0914
2112 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2113 verbose = self.op.verbose
2114 self._feedback_fn = feedback_fn
2115 feedback_fn("* Verifying global settings")
2116 for msg in self.cfg.VerifyConfig():
2117 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2119 # Check the cluster certificates
2120 for cert_filename in constants.ALL_CERT_FILES:
2121 (errcode, msg) = _VerifyCertificate(cert_filename)
2122 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2124 vg_name = self.cfg.GetVGName()
2125 drbd_helper = self.cfg.GetDRBDHelper()
2126 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2127 cluster = self.cfg.GetClusterInfo()
2128 nodeinfo_byname = self.cfg.GetAllNodesInfo()
2129 nodelist = utils.NiceSort(nodeinfo_byname.keys())
2130 nodeinfo = [nodeinfo_byname[nname] for nname in nodelist]
2131 instanceinfo = self.cfg.GetAllInstancesInfo()
2132 instancelist = utils.NiceSort(instanceinfo.keys())
2133 groupinfo = self.cfg.GetAllNodeGroupsInfo()
2134 i_non_redundant = [] # Non redundant instances
2135 i_non_a_balanced = [] # Non auto-balanced instances
2136 n_offline = 0 # Count of offline nodes
2137 n_drained = 0 # Count of nodes being drained
2138 node_vol_should = {}
2140 # FIXME: verify OS list
2141 # do local checksums
2142 master_files = [constants.CLUSTER_CONF_FILE]
2143 master_node = self.master_node = self.cfg.GetMasterNode()
2144 master_ip = self.cfg.GetMasterIP()
2146 file_names = ssconf.SimpleStore().GetFileList()
2147 file_names.extend(constants.ALL_CERT_FILES)
2148 file_names.extend(master_files)
2149 if cluster.modify_etc_hosts:
2150 file_names.append(constants.ETC_HOSTS)
2152 local_checksums = utils.FingerprintFiles(file_names)
2154 # Compute the set of hypervisor parameters
2156 for hv_name in hypervisors:
2157 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2158 for os_name, os_hvp in cluster.os_hvp.items():
2159 for hv_name, hv_params in os_hvp.items():
2162 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2163 hvp_data.append(("os %s" % os_name, hv_name, full_params))
2164 # TODO: collapse identical parameter values in a single one
2165 for instance in instanceinfo.values():
2166 if not instance.hvparams:
2168 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2169 cluster.FillHV(instance)))
2170 # and verify them locally
2171 self._VerifyHVP(hvp_data)
2173 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2174 node_verify_param = {
2175 constants.NV_FILELIST: file_names,
2176 constants.NV_NODELIST: [node.name for node in nodeinfo
2177 if not node.offline],
2178 constants.NV_HYPERVISOR: hypervisors,
2179 constants.NV_HVPARAMS: hvp_data,
2180 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2181 node.secondary_ip) for node in nodeinfo
2182 if not node.offline],
2183 constants.NV_INSTANCELIST: hypervisors,
2184 constants.NV_VERSION: None,
2185 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2186 constants.NV_NODESETUP: None,
2187 constants.NV_TIME: None,
2188 constants.NV_MASTERIP: (master_node, master_ip),
2189 constants.NV_OSLIST: None,
2190 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2193 if vg_name is not None:
2194 node_verify_param[constants.NV_VGLIST] = None
2195 node_verify_param[constants.NV_LVLIST] = vg_name
2196 node_verify_param[constants.NV_PVLIST] = [vg_name]
2197 node_verify_param[constants.NV_DRBDLIST] = None
2200 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2203 # FIXME: this needs to be changed per node-group, not cluster-wide
2205 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2206 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2207 bridges.add(default_nicpp[constants.NIC_LINK])
2208 for instance in instanceinfo.values():
2209 for nic in instance.nics:
2210 full_nic = cluster.SimpleFillNIC(nic.nicparams)
2211 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2212 bridges.add(full_nic[constants.NIC_LINK])
2215 node_verify_param[constants.NV_BRIDGES] = list(bridges)
2217 # Build our expected cluster state
2218 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2220 vm_capable=node.vm_capable))
2221 for node in nodeinfo)
2225 for node in nodeinfo:
2226 path = _SupportsOob(self.cfg, node)
2227 if path and path not in oob_paths:
2228 oob_paths.append(path)
2231 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2233 for instance in instancelist:
2234 inst_config = instanceinfo[instance]
2236 for nname in inst_config.all_nodes:
2237 if nname not in node_image:
2239 gnode = self.NodeImage(name=nname)
2241 node_image[nname] = gnode
2243 inst_config.MapLVsByNode(node_vol_should)
2245 pnode = inst_config.primary_node
2246 node_image[pnode].pinst.append(instance)
2248 for snode in inst_config.secondary_nodes:
2249 nimg = node_image[snode]
2250 nimg.sinst.append(instance)
2251 if pnode not in nimg.sbp:
2252 nimg.sbp[pnode] = []
2253 nimg.sbp[pnode].append(instance)
2255 # At this point, we have the in-memory data structures complete,
2256 # except for the runtime information, which we'll gather next
2258 # Due to the way our RPC system works, exact response times cannot be
2259 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2260 # time before and after executing the request, we can at least have a time
2262 nvinfo_starttime = time.time()
2263 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2264 self.cfg.GetClusterName())
2265 nvinfo_endtime = time.time()
2267 all_drbd_map = self.cfg.ComputeDRBDMap()
2269 feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2270 instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2272 feedback_fn("* Verifying node status")
2276 for node_i in nodeinfo:
2278 nimg = node_image[node]
2282 feedback_fn("* Skipping offline node %s" % (node,))
2286 if node == master_node:
2288 elif node_i.master_candidate:
2289 ntype = "master candidate"
2290 elif node_i.drained:
2296 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2298 msg = all_nvinfo[node].fail_msg
2299 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2301 nimg.rpc_fail = True
2304 nresult = all_nvinfo[node].payload
2306 nimg.call_ok = self._VerifyNode(node_i, nresult)
2307 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2308 self._VerifyNodeNetwork(node_i, nresult)
2309 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2312 self._VerifyOob(node_i, nresult)
2315 self._VerifyNodeLVM(node_i, nresult, vg_name)
2316 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2319 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2320 self._UpdateNodeInstances(node_i, nresult, nimg)
2321 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2322 self._UpdateNodeOS(node_i, nresult, nimg)
2323 if not nimg.os_fail:
2324 if refos_img is None:
2326 self._VerifyNodeOS(node_i, nimg, refos_img)
2327 self._VerifyNodeBridges(node_i, nresult, bridges)
2329 feedback_fn("* Verifying instance status")
2330 for instance in instancelist:
2332 feedback_fn("* Verifying instance %s" % instance)
2333 inst_config = instanceinfo[instance]
2334 self._VerifyInstance(instance, inst_config, node_image,
2336 inst_nodes_offline = []
2338 pnode = inst_config.primary_node
2339 pnode_img = node_image[pnode]
2340 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2341 self.ENODERPC, pnode, "instance %s, connection to"
2342 " primary node failed", instance)
2344 _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2345 "instance lives on offline node %s", inst_config.primary_node)
2347 # If the instance is non-redundant we cannot survive losing its primary
2348 # node, so we are not N+1 compliant. On the other hand we have no disk
2349 # templates with more than one secondary so that situation is not well
2351 # FIXME: does not support file-backed instances
2352 if not inst_config.secondary_nodes:
2353 i_non_redundant.append(instance)
2355 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2356 instance, "instance has multiple secondary nodes: %s",
2357 utils.CommaJoin(inst_config.secondary_nodes),
2358 code=self.ETYPE_WARNING)
2360 if inst_config.disk_template in constants.DTS_NET_MIRROR:
2361 pnode = inst_config.primary_node
2362 instance_nodes = utils.NiceSort(inst_config.all_nodes)
2363 instance_groups = {}
2365 for node in instance_nodes:
2366 instance_groups.setdefault(nodeinfo_byname[node].group,
2370 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2371 # Sort so that we always list the primary node first.
2372 for group, nodes in sorted(instance_groups.items(),
2373 key=lambda (_, nodes): pnode in nodes,
2376 self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2377 instance, "instance has primary and secondary nodes in"
2378 " different groups: %s", utils.CommaJoin(pretty_list),
2379 code=self.ETYPE_WARNING)
2381 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2382 i_non_a_balanced.append(instance)
2384 for snode in inst_config.secondary_nodes:
2385 s_img = node_image[snode]
2386 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2387 "instance %s, connection to secondary node failed", instance)
2390 inst_nodes_offline.append(snode)
2392 # warn that the instance lives on offline nodes
2393 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2394 "instance has offline secondary node(s) %s",
2395 utils.CommaJoin(inst_nodes_offline))
2396 # ... or ghost/non-vm_capable nodes
2397 for node in inst_config.all_nodes:
2398 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2399 "instance lives on ghost node %s", node)
2400 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2401 instance, "instance lives on non-vm_capable node %s", node)
2403 feedback_fn("* Verifying orphan volumes")
2404 reserved = utils.FieldSet(*cluster.reserved_lvs)
2405 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2407 feedback_fn("* Verifying orphan instances")
2408 self._VerifyOrphanInstances(instancelist, node_image)
2410 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2411 feedback_fn("* Verifying N+1 Memory redundancy")
2412 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2414 feedback_fn("* Other Notes")
2416 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2417 % len(i_non_redundant))
2419 if i_non_a_balanced:
2420 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2421 % len(i_non_a_balanced))
2424 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2427 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2431 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2432 """Analyze the post-hooks' result
2434 This method analyses the hook result, handles it, and sends some
2435 nicely-formatted feedback back to the user.
2437 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2438 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2439 @param hooks_results: the results of the multi-node hooks rpc call
2440 @param feedback_fn: function used send feedback back to the caller
2441 @param lu_result: previous Exec result
2442 @return: the new Exec result, based on the previous result
2446 # We only really run POST phase hooks, and are only interested in
2448 if phase == constants.HOOKS_PHASE_POST:
2449 # Used to change hooks' output to proper indentation
2450 feedback_fn("* Hooks Results")
2451 assert hooks_results, "invalid result from hooks"
2453 for node_name in hooks_results:
2454 res = hooks_results[node_name]
2456 test = msg and not res.offline
2457 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2458 "Communication failure in hooks execution: %s", msg)
2459 if res.offline or msg:
2460 # No need to investigate payload if node is offline or gave an error.
2461 # override manually lu_result here as _ErrorIf only
2462 # overrides self.bad
2465 for script, hkr, output in res.payload:
2466 test = hkr == constants.HKR_FAIL
2467 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2468 "Script %s failed, output:", script)
2470 output = self._HOOKS_INDENT_RE.sub(' ', output)
2471 feedback_fn("%s" % output)
2477 class LUClusterVerifyDisks(NoHooksLU):
2478 """Verifies the cluster disks status.
2483 def ExpandNames(self):
2484 self.needed_locks = {
2485 locking.LEVEL_NODE: locking.ALL_SET,
2486 locking.LEVEL_INSTANCE: locking.ALL_SET,
2488 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2490 def Exec(self, feedback_fn):
2491 """Verify integrity of cluster disks.
2493 @rtype: tuple of three items
2494 @return: a tuple of (dict of node-to-node_error, list of instances
2495 which need activate-disks, dict of instance: (node, volume) for
2499 result = res_nodes, res_instances, res_missing = {}, [], {}
2501 nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2502 instances = self.cfg.GetAllInstancesInfo().values()
2505 for inst in instances:
2507 if not inst.admin_up:
2509 inst.MapLVsByNode(inst_lvs)
2510 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2511 for node, vol_list in inst_lvs.iteritems():
2512 for vol in vol_list:
2513 nv_dict[(node, vol)] = inst
2518 node_lvs = self.rpc.call_lv_list(nodes, [])
2519 for node, node_res in node_lvs.items():
2520 if node_res.offline:
2522 msg = node_res.fail_msg
2524 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2525 res_nodes[node] = msg
2528 lvs = node_res.payload
2529 for lv_name, (_, _, lv_online) in lvs.items():
2530 inst = nv_dict.pop((node, lv_name), None)
2531 if (not lv_online and inst is not None
2532 and inst.name not in res_instances):
2533 res_instances.append(inst.name)
2535 # any leftover items in nv_dict are missing LVs, let's arrange the
2537 for key, inst in nv_dict.iteritems():
2538 if inst.name not in res_missing:
2539 res_missing[inst.name] = []
2540 res_missing[inst.name].append(key)
2545 class LUClusterRepairDiskSizes(NoHooksLU):
2546 """Verifies the cluster disks sizes.
2551 def ExpandNames(self):
2552 if self.op.instances:
2553 self.wanted_names = []
2554 for name in self.op.instances:
2555 full_name = _ExpandInstanceName(self.cfg, name)
2556 self.wanted_names.append(full_name)
2557 self.needed_locks = {
2558 locking.LEVEL_NODE: [],
2559 locking.LEVEL_INSTANCE: self.wanted_names,
2561 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2563 self.wanted_names = None
2564 self.needed_locks = {
2565 locking.LEVEL_NODE: locking.ALL_SET,
2566 locking.LEVEL_INSTANCE: locking.ALL_SET,
2568 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2570 def DeclareLocks(self, level):
2571 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2572 self._LockInstancesNodes(primary_only=True)
2574 def CheckPrereq(self):
2575 """Check prerequisites.
2577 This only checks the optional instance list against the existing names.
2580 if self.wanted_names is None:
2581 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2583 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2584 in self.wanted_names]
2586 def _EnsureChildSizes(self, disk):
2587 """Ensure children of the disk have the needed disk size.
2589 This is valid mainly for DRBD8 and fixes an issue where the
2590 children have smaller disk size.
2592 @param disk: an L{ganeti.objects.Disk} object
2595 if disk.dev_type == constants.LD_DRBD8:
2596 assert disk.children, "Empty children for DRBD8?"
2597 fchild = disk.children[0]
2598 mismatch = fchild.size < disk.size
2600 self.LogInfo("Child disk has size %d, parent %d, fixing",
2601 fchild.size, disk.size)
2602 fchild.size = disk.size
2604 # and we recurse on this child only, not on the metadev
2605 return self._EnsureChildSizes(fchild) or mismatch
2609 def Exec(self, feedback_fn):
2610 """Verify the size of cluster disks.
2613 # TODO: check child disks too
2614 # TODO: check differences in size between primary/secondary nodes
2616 for instance in self.wanted_instances:
2617 pnode = instance.primary_node
2618 if pnode not in per_node_disks:
2619 per_node_disks[pnode] = []
2620 for idx, disk in enumerate(instance.disks):
2621 per_node_disks[pnode].append((instance, idx, disk))
2624 for node, dskl in per_node_disks.items():
2625 newl = [v[2].Copy() for v in dskl]
2627 self.cfg.SetDiskID(dsk, node)
2628 result = self.rpc.call_blockdev_getsize(node, newl)
2630 self.LogWarning("Failure in blockdev_getsize call to node"
2631 " %s, ignoring", node)
2633 if len(result.payload) != len(dskl):
2634 logging.warning("Invalid result from node %s: len(dksl)=%d,"
2635 " result.payload=%s", node, len(dskl), result.payload)
2636 self.LogWarning("Invalid result from node %s, ignoring node results",
2639 for ((instance, idx, disk), size) in zip(dskl, result.payload):
2641 self.LogWarning("Disk %d of instance %s did not return size"
2642 " information, ignoring", idx, instance.name)
2644 if not isinstance(size, (int, long)):
2645 self.LogWarning("Disk %d of instance %s did not return valid"
2646 " size information, ignoring", idx, instance.name)
2649 if size != disk.size:
2650 self.LogInfo("Disk %d of instance %s has mismatched size,"
2651 " correcting: recorded %d, actual %d", idx,
2652 instance.name, disk.size, size)
2654 self.cfg.Update(instance, feedback_fn)
2655 changed.append((instance.name, idx, size))
2656 if self._EnsureChildSizes(disk):
2657 self.cfg.Update(instance, feedback_fn)
2658 changed.append((instance.name, idx, disk.size))
2662 class LUClusterRename(LogicalUnit):
2663 """Rename the cluster.
2666 HPATH = "cluster-rename"
2667 HTYPE = constants.HTYPE_CLUSTER
2669 def BuildHooksEnv(self):
2674 "OP_TARGET": self.cfg.GetClusterName(),
2675 "NEW_NAME": self.op.name,
2677 mn = self.cfg.GetMasterNode()
2678 all_nodes = self.cfg.GetNodeList()
2679 return env, [mn], all_nodes
2681 def CheckPrereq(self):
2682 """Verify that the passed name is a valid one.
2685 hostname = netutils.GetHostname(name=self.op.name,
2686 family=self.cfg.GetPrimaryIPFamily())
2688 new_name = hostname.name
2689 self.ip = new_ip = hostname.ip
2690 old_name = self.cfg.GetClusterName()
2691 old_ip = self.cfg.GetMasterIP()
2692 if new_name == old_name and new_ip == old_ip:
2693 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2694 " cluster has changed",
2696 if new_ip != old_ip:
2697 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2698 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2699 " reachable on the network" %
2700 new_ip, errors.ECODE_NOTUNIQUE)
2702 self.op.name = new_name
2704 def Exec(self, feedback_fn):
2705 """Rename the cluster.
2708 clustername = self.op.name
2711 # shutdown the master IP
2712 master = self.cfg.GetMasterNode()
2713 result = self.rpc.call_node_stop_master(master, False)
2714 result.Raise("Could not disable the master role")
2717 cluster = self.cfg.GetClusterInfo()
2718 cluster.cluster_name = clustername
2719 cluster.master_ip = ip
2720 self.cfg.Update(cluster, feedback_fn)
2722 # update the known hosts file
2723 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2724 node_list = self.cfg.GetOnlineNodeList()
2726 node_list.remove(master)
2729 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2731 result = self.rpc.call_node_start_master(master, False, False)
2732 msg = result.fail_msg
2734 self.LogWarning("Could not re-enable the master role on"
2735 " the master, please restart manually: %s", msg)
2740 class LUClusterSetParams(LogicalUnit):
2741 """Change the parameters of the cluster.
2744 HPATH = "cluster-modify"
2745 HTYPE = constants.HTYPE_CLUSTER
2748 def CheckArguments(self):
2752 if self.op.uid_pool:
2753 uidpool.CheckUidPool(self.op.uid_pool)
2755 if self.op.add_uids:
2756 uidpool.CheckUidPool(self.op.add_uids)
2758 if self.op.remove_uids:
2759 uidpool.CheckUidPool(self.op.remove_uids)
2761 def ExpandNames(self):
2762 # FIXME: in the future maybe other cluster params won't require checking on
2763 # all nodes to be modified.
2764 self.needed_locks = {
2765 locking.LEVEL_NODE: locking.ALL_SET,
2767 self.share_locks[locking.LEVEL_NODE] = 1
2769 def BuildHooksEnv(self):
2774 "OP_TARGET": self.cfg.GetClusterName(),
2775 "NEW_VG_NAME": self.op.vg_name,
2777 mn = self.cfg.GetMasterNode()
2778 return env, [mn], [mn]
2780 def CheckPrereq(self):
2781 """Check prerequisites.
2783 This checks whether the given params don't conflict and
2784 if the given volume group is valid.
2787 if self.op.vg_name is not None and not self.op.vg_name:
2788 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2789 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2790 " instances exist", errors.ECODE_INVAL)
2792 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2793 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2794 raise errors.OpPrereqError("Cannot disable drbd helper while"
2795 " drbd-based instances exist",
2798 node_list = self.acquired_locks[locking.LEVEL_NODE]
2800 # if vg_name not None, checks given volume group on all nodes
2802 vglist = self.rpc.call_vg_list(node_list)
2803 for node in node_list:
2804 msg = vglist[node].fail_msg
2806 # ignoring down node
2807 self.LogWarning("Error while gathering data on node %s"
2808 " (ignoring node): %s", node, msg)
2810 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2812 constants.MIN_VG_SIZE)
2814 raise errors.OpPrereqError("Error on node '%s': %s" %
2815 (node, vgstatus), errors.ECODE_ENVIRON)
2817 if self.op.drbd_helper:
2818 # checks given drbd helper on all nodes
2819 helpers = self.rpc.call_drbd_helper(node_list)
2820 for node in node_list:
2821 ninfo = self.cfg.GetNodeInfo(node)
2823 self.LogInfo("Not checking drbd helper on offline node %s", node)
2825 msg = helpers[node].fail_msg
2827 raise errors.OpPrereqError("Error checking drbd helper on node"
2828 " '%s': %s" % (node, msg),
2829 errors.ECODE_ENVIRON)
2830 node_helper = helpers[node].payload
2831 if node_helper != self.op.drbd_helper:
2832 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2833 (node, node_helper), errors.ECODE_ENVIRON)
2835 self.cluster = cluster = self.cfg.GetClusterInfo()
2836 # validate params changes
2837 if self.op.beparams:
2838 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2839 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2841 if self.op.ndparams:
2842 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2843 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2845 # TODO: we need a more general way to handle resetting
2846 # cluster-level parameters to default values
2847 if self.new_ndparams["oob_program"] == "":
2848 self.new_ndparams["oob_program"] = \
2849 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
2851 if self.op.nicparams:
2852 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2853 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2854 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2857 # check all instances for consistency
2858 for instance in self.cfg.GetAllInstancesInfo().values():
2859 for nic_idx, nic in enumerate(instance.nics):
2860 params_copy = copy.deepcopy(nic.nicparams)
2861 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2863 # check parameter syntax
2865 objects.NIC.CheckParameterSyntax(params_filled)
2866 except errors.ConfigurationError, err:
2867 nic_errors.append("Instance %s, nic/%d: %s" %
2868 (instance.name, nic_idx, err))
2870 # if we're moving instances to routed, check that they have an ip
2871 target_mode = params_filled[constants.NIC_MODE]
2872 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2873 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
2874 " address" % (instance.name, nic_idx))
2876 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2877 "\n".join(nic_errors))
2879 # hypervisor list/parameters
2880 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2881 if self.op.hvparams:
2882 for hv_name, hv_dict in self.op.hvparams.items():
2883 if hv_name not in self.new_hvparams:
2884 self.new_hvparams[hv_name] = hv_dict
2886 self.new_hvparams[hv_name].update(hv_dict)
2888 # os hypervisor parameters
2889 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2891 for os_name, hvs in self.op.os_hvp.items():
2892 if os_name not in self.new_os_hvp:
2893 self.new_os_hvp[os_name] = hvs
2895 for hv_name, hv_dict in hvs.items():
2896 if hv_name not in self.new_os_hvp[os_name]:
2897 self.new_os_hvp[os_name][hv_name] = hv_dict
2899 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2902 self.new_osp = objects.FillDict(cluster.osparams, {})
2903 if self.op.osparams:
2904 for os_name, osp in self.op.osparams.items():
2905 if os_name not in self.new_osp:
2906 self.new_osp[os_name] = {}
2908 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2911 if not self.new_osp[os_name]:
2912 # we removed all parameters
2913 del self.new_osp[os_name]
2915 # check the parameter validity (remote check)
2916 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2917 os_name, self.new_osp[os_name])
2919 # changes to the hypervisor list
2920 if self.op.enabled_hypervisors is not None:
2921 self.hv_list = self.op.enabled_hypervisors
2922 for hv in self.hv_list:
2923 # if the hypervisor doesn't already exist in the cluster
2924 # hvparams, we initialize it to empty, and then (in both
2925 # cases) we make sure to fill the defaults, as we might not
2926 # have a complete defaults list if the hypervisor wasn't
2928 if hv not in new_hvp:
2930 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2931 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2933 self.hv_list = cluster.enabled_hypervisors
2935 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2936 # either the enabled list has changed, or the parameters have, validate
2937 for hv_name, hv_params in self.new_hvparams.items():
2938 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2939 (self.op.enabled_hypervisors and
2940 hv_name in self.op.enabled_hypervisors)):
2941 # either this is a new hypervisor, or its parameters have changed
2942 hv_class = hypervisor.GetHypervisor(hv_name)
2943 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2944 hv_class.CheckParameterSyntax(hv_params)
2945 _CheckHVParams(self, node_list, hv_name, hv_params)
2948 # no need to check any newly-enabled hypervisors, since the
2949 # defaults have already been checked in the above code-block
2950 for os_name, os_hvp in self.new_os_hvp.items():
2951 for hv_name, hv_params in os_hvp.items():
2952 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2953 # we need to fill in the new os_hvp on top of the actual hv_p
2954 cluster_defaults = self.new_hvparams.get(hv_name, {})
2955 new_osp = objects.FillDict(cluster_defaults, hv_params)
2956 hv_class = hypervisor.GetHypervisor(hv_name)
2957 hv_class.CheckParameterSyntax(new_osp)
2958 _CheckHVParams(self, node_list, hv_name, new_osp)
2960 if self.op.default_iallocator:
2961 alloc_script = utils.FindFile(self.op.default_iallocator,
2962 constants.IALLOCATOR_SEARCH_PATH,
2964 if alloc_script is None:
2965 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2966 " specified" % self.op.default_iallocator,
2969 def Exec(self, feedback_fn):
2970 """Change the parameters of the cluster.
2973 if self.op.vg_name is not None:
2974 new_volume = self.op.vg_name
2977 if new_volume != self.cfg.GetVGName():
2978 self.cfg.SetVGName(new_volume)
2980 feedback_fn("Cluster LVM configuration already in desired"
2981 " state, not changing")
2982 if self.op.drbd_helper is not None:
2983 new_helper = self.op.drbd_helper
2986 if new_helper != self.cfg.GetDRBDHelper():
2987 self.cfg.SetDRBDHelper(new_helper)
2989 feedback_fn("Cluster DRBD helper already in desired state,"
2991 if self.op.hvparams:
2992 self.cluster.hvparams = self.new_hvparams
2994 self.cluster.os_hvp = self.new_os_hvp
2995 if self.op.enabled_hypervisors is not None:
2996 self.cluster.hvparams = self.new_hvparams
2997 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2998 if self.op.beparams:
2999 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3000 if self.op.nicparams:
3001 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3002 if self.op.osparams:
3003 self.cluster.osparams = self.new_osp
3004 if self.op.ndparams:
3005 self.cluster.ndparams = self.new_ndparams
3007 if self.op.candidate_pool_size is not None:
3008 self.cluster.candidate_pool_size = self.op.candidate_pool_size
3009 # we need to update the pool size here, otherwise the save will fail
3010 _AdjustCandidatePool(self, [])
3012 if self.op.maintain_node_health is not None:
3013 self.cluster.maintain_node_health = self.op.maintain_node_health
3015 if self.op.prealloc_wipe_disks is not None:
3016 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3018 if self.op.add_uids is not None:
3019 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3021 if self.op.remove_uids is not None:
3022 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3024 if self.op.uid_pool is not None:
3025 self.cluster.uid_pool = self.op.uid_pool
3027 if self.op.default_iallocator is not None:
3028 self.cluster.default_iallocator = self.op.default_iallocator
3030 if self.op.reserved_lvs is not None:
3031 self.cluster.reserved_lvs = self.op.reserved_lvs
3033 def helper_os(aname, mods, desc):
3035 lst = getattr(self.cluster, aname)
3036 for key, val in mods:
3037 if key == constants.DDM_ADD:
3039 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3042 elif key == constants.DDM_REMOVE:
3046 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3048 raise errors.ProgrammerError("Invalid modification '%s'" % key)
3050 if self.op.hidden_os:
3051 helper_os("hidden_os", self.op.hidden_os, "hidden")
3053 if self.op.blacklisted_os:
3054 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3056 if self.op.master_netdev:
3057 master = self.cfg.GetMasterNode()
3058 feedback_fn("Shutting down master ip on the current netdev (%s)" %
3059 self.cluster.master_netdev)
3060 result = self.rpc.call_node_stop_master(master, False)
3061 result.Raise("Could not disable the master ip")
3062 feedback_fn("Changing master_netdev from %s to %s" %
3063 (self.cluster.master_netdev, self.op.master_netdev))
3064 self.cluster.master_netdev = self.op.master_netdev
3066 self.cfg.Update(self.cluster, feedback_fn)
3068 if self.op.master_netdev:
3069 feedback_fn("Starting the master ip on the new master netdev (%s)" %
3070 self.op.master_netdev)
3071 result = self.rpc.call_node_start_master(master, False, False)
3073 self.LogWarning("Could not re-enable the master ip on"
3074 " the master, please restart manually: %s",
3078 def _UploadHelper(lu, nodes, fname):
3079 """Helper for uploading a file and showing warnings.
3082 if os.path.exists(fname):
3083 result = lu.rpc.call_upload_file(nodes, fname)
3084 for to_node, to_result in result.items():
3085 msg = to_result.fail_msg
3087 msg = ("Copy of file %s to node %s failed: %s" %
3088 (fname, to_node, msg))
3089 lu.proc.LogWarning(msg)
3092 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3093 """Distribute additional files which are part of the cluster configuration.
3095 ConfigWriter takes care of distributing the config and ssconf files, but
3096 there are more files which should be distributed to all nodes. This function
3097 makes sure those are copied.
3099 @param lu: calling logical unit
3100 @param additional_nodes: list of nodes not in the config to distribute to
3101 @type additional_vm: boolean
3102 @param additional_vm: whether the additional nodes are vm-capable or not
3105 # 1. Gather target nodes
3106 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3107 dist_nodes = lu.cfg.GetOnlineNodeList()
3108 nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3109 vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3110 if additional_nodes is not None:
3111 dist_nodes.extend(additional_nodes)
3113 vm_nodes.extend(additional_nodes)
3114 if myself.name in dist_nodes:
3115 dist_nodes.remove(myself.name)
3116 if myself.name in vm_nodes:
3117 vm_nodes.remove(myself.name)
3119 # 2. Gather files to distribute
3120 dist_files = set([constants.ETC_HOSTS,
3121 constants.SSH_KNOWN_HOSTS_FILE,
3122 constants.RAPI_CERT_FILE,
3123 constants.RAPI_USERS_FILE,
3124 constants.CONFD_HMAC_KEY,
3125 constants.CLUSTER_DOMAIN_SECRET_FILE,
3129 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3130 for hv_name in enabled_hypervisors:
3131 hv_class = hypervisor.GetHypervisor(hv_name)
3132 vm_files.update(hv_class.GetAncillaryFiles())
3134 # 3. Perform the files upload
3135 for fname in dist_files:
3136 _UploadHelper(lu, dist_nodes, fname)
3137 for fname in vm_files:
3138 _UploadHelper(lu, vm_nodes, fname)
3141 class LUClusterRedistConf(NoHooksLU):
3142 """Force the redistribution of cluster configuration.
3144 This is a very simple LU.
3149 def ExpandNames(self):
3150 self.needed_locks = {
3151 locking.LEVEL_NODE: locking.ALL_SET,
3153 self.share_locks[locking.LEVEL_NODE] = 1
3155 def Exec(self, feedback_fn):
3156 """Redistribute the configuration.
3159 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3160 _RedistributeAncillaryFiles(self)
3163 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3164 """Sleep and poll for an instance's disk to sync.
3167 if not instance.disks or disks is not None and not disks:
3170 disks = _ExpandCheckDisks(instance, disks)
3173 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3175 node = instance.primary_node
3178 lu.cfg.SetDiskID(dev, node)
3180 # TODO: Convert to utils.Retry
3183 degr_retries = 10 # in seconds, as we sleep 1 second each time
3187 cumul_degraded = False
3188 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3189 msg = rstats.fail_msg
3191 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3194 raise errors.RemoteError("Can't contact node %s for mirror data,"
3195 " aborting." % node)
3198 rstats = rstats.payload
3200 for i, mstat in enumerate(rstats):
3202 lu.LogWarning("Can't compute data for node %s/%s",
3203 node, disks[i].iv_name)
3206 cumul_degraded = (cumul_degraded or
3207 (mstat.is_degraded and mstat.sync_percent is None))
3208 if mstat.sync_percent is not None:
3210 if mstat.estimated_time is not None:
3211 rem_time = ("%s remaining (estimated)" %
3212 utils.FormatSeconds(mstat.estimated_time))
3213 max_time = mstat.estimated_time
3215 rem_time = "no time estimate"
3216 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3217 (disks[i].iv_name, mstat.sync_percent, rem_time))
3219 # if we're done but degraded, let's do a few small retries, to
3220 # make sure we see a stable and not transient situation; therefore
3221 # we force restart of the loop
3222 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3223 logging.info("Degraded disks found, %d retries left", degr_retries)
3231 time.sleep(min(60, max_time))
3234 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3235 return not cumul_degraded
3238 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3239 """Check that mirrors are not degraded.
3241 The ldisk parameter, if True, will change the test from the
3242 is_degraded attribute (which represents overall non-ok status for
3243 the device(s)) to the ldisk (representing the local storage status).
3246 lu.cfg.SetDiskID(dev, node)
3250 if on_primary or dev.AssembleOnSecondary():
3251 rstats = lu.rpc.call_blockdev_find(node, dev)
3252 msg = rstats.fail_msg
3254 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3256 elif not rstats.payload:
3257 lu.LogWarning("Can't find disk on node %s", node)
3261 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3263 result = result and not rstats.payload.is_degraded
3266 for child in dev.children:
3267 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3272 class LUOobCommand(NoHooksLU):
3273 """Logical unit for OOB handling.
3278 def CheckPrereq(self):
3279 """Check prerequisites.
3282 - the node exists in the configuration
3285 Any errors are signaled by raising errors.OpPrereqError.
3289 for node_name in self.op.node_names:
3290 node = self.cfg.GetNodeInfo(node_name)
3293 raise errors.OpPrereqError("Node %s not found" % node_name,
3296 self.nodes.append(node)
3298 if (self.op.command == constants.OOB_POWER_OFF and not node.offline):
3299 raise errors.OpPrereqError(("Cannot power off node %s because it is"
3300 " not marked offline") % node_name,
3303 def ExpandNames(self):
3304 """Gather locks we need.
3307 if self.op.node_names:
3308 self.op.node_names = [_ExpandNodeName(self.cfg, name)
3309 for name in self.op.node_names]
3311 self.op.node_names = self.cfg.GetNodeList()
3313 self.needed_locks = {
3314 locking.LEVEL_NODE: self.op.node_names,
3317 def Exec(self, feedback_fn):
3318 """Execute OOB and return result if we expect any.
3321 master_node = self.cfg.GetMasterNode()
3324 for node in self.nodes:
3325 node_entry = [(constants.RS_NORMAL, node.name)]
3326 ret.append(node_entry)
3328 oob_program = _SupportsOob(self.cfg, node)
3331 node_entry.append((constants.RS_UNAVAIL, None))
3334 logging.info("Executing out-of-band command '%s' using '%s' on %s",
3335 self.op.command, oob_program, node.name)
3336 result = self.rpc.call_run_oob(master_node, oob_program,
3337 self.op.command, node.name,
3341 self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3342 node.name, result.fail_msg)
3343 node_entry.append((constants.RS_NODATA, None))
3346 self._CheckPayload(result)
3347 except errors.OpExecError, err:
3348 self.LogWarning("The payload returned by '%s' is not valid: %s",
3350 node_entry.append((constants.RS_NODATA, None))
3352 if self.op.command == constants.OOB_HEALTH:
3353 # For health we should log important events
3354 for item, status in result.payload:
3355 if status in [constants.OOB_STATUS_WARNING,
3356 constants.OOB_STATUS_CRITICAL]:
3357 self.LogWarning("On node '%s' item '%s' has status '%s'",
3358 node.name, item, status)
3360 if self.op.command == constants.OOB_POWER_ON:
3362 elif self.op.command == constants.OOB_POWER_OFF:
3363 node.powered = False
3364 elif self.op.command == constants.OOB_POWER_STATUS:
3365 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3366 if powered != node.powered:
3367 logging.warning(("Recorded power state (%s) of node '%s' does not"
3368 " match actual power state (%s)"), node.powered,
3371 # For configuration changing commands we should update the node
3372 if self.op.command in (constants.OOB_POWER_ON,
3373 constants.OOB_POWER_OFF):
3374 self.cfg.Update(node, feedback_fn)
3376 node_entry.append((constants.RS_NORMAL, result.payload))
3380 def _CheckPayload(self, result):
3381 """Checks if the payload is valid.
3383 @param result: RPC result
3384 @raises errors.OpExecError: If payload is not valid
3388 if self.op.command == constants.OOB_HEALTH:
3389 if not isinstance(result.payload, list):
3390 errs.append("command 'health' is expected to return a list but got %s" %
3391 type(result.payload))
3393 for item, status in result.payload:
3394 if status not in constants.OOB_STATUSES:
3395 errs.append("health item '%s' has invalid status '%s'" %
3398 if self.op.command == constants.OOB_POWER_STATUS:
3399 if not isinstance(result.payload, dict):
3400 errs.append("power-status is expected to return a dict but got %s" %
3401 type(result.payload))
3403 if self.op.command in [
3404 constants.OOB_POWER_ON,
3405 constants.OOB_POWER_OFF,
3406 constants.OOB_POWER_CYCLE,
3408 if result.payload is not None:
3409 errs.append("%s is expected to not return payload but got '%s'" %
3410 (self.op.command, result.payload))
3413 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3414 utils.CommaJoin(errs))
3418 class LUOsDiagnose(NoHooksLU):
3419 """Logical unit for OS diagnose/query.
3424 _BLK = "blacklisted"
3426 _FIELDS_STATIC = utils.FieldSet()
3427 _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3428 "parameters", "api_versions", _HID, _BLK)
3430 def CheckArguments(self):
3432 raise errors.OpPrereqError("Selective OS query not supported",
3435 _CheckOutputFields(static=self._FIELDS_STATIC,
3436 dynamic=self._FIELDS_DYNAMIC,
3437 selected=self.op.output_fields)
3439 def ExpandNames(self):
3440 # Lock all nodes, in shared mode
3441 # Temporary removal of locks, should be reverted later
3442 # TODO: reintroduce locks when they are lighter-weight
3443 self.needed_locks = {}
3444 #self.share_locks[locking.LEVEL_NODE] = 1
3445 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3448 def _DiagnoseByOS(rlist):
3449 """Remaps a per-node return list into an a per-os per-node dictionary
3451 @param rlist: a map with node names as keys and OS objects as values
3454 @return: a dictionary with osnames as keys and as value another
3455 map, with nodes as keys and tuples of (path, status, diagnose,
3456 variants, parameters, api_versions) as values, eg::
3458 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3459 (/srv/..., False, "invalid api")],
3460 "node2": [(/srv/..., True, "", [], [])]}
3465 # we build here the list of nodes that didn't fail the RPC (at RPC
3466 # level), so that nodes with a non-responding node daemon don't
3467 # make all OSes invalid
3468 good_nodes = [node_name for node_name in rlist
3469 if not rlist[node_name].fail_msg]
3470 for node_name, nr in rlist.items():
3471 if nr.fail_msg or not nr.payload:
3473 for (name, path, status, diagnose, variants,
3474 params, api_versions) in nr.payload:
3475 if name not in all_os:
3476 # build a list of nodes for this os containing empty lists
3477 # for each node in node_list
3479 for nname in good_nodes:
3480 all_os[name][nname] = []
3481 # convert params from [name, help] to (name, help)
3482 params = [tuple(v) for v in params]
3483 all_os[name][node_name].append((path, status, diagnose,
3484 variants, params, api_versions))
3487 def Exec(self, feedback_fn):
3488 """Compute the list of OSes.
3491 valid_nodes = [node.name
3492 for node in self.cfg.GetAllNodesInfo().values()
3493 if not node.offline and node.vm_capable]
3494 node_data = self.rpc.call_os_diagnose(valid_nodes)
3495 pol = self._DiagnoseByOS(node_data)
3497 cluster = self.cfg.GetClusterInfo()
3499 for os_name in utils.NiceSort(pol.keys()):
3500 os_data = pol[os_name]
3503 (variants, params, api_versions) = null_state = (set(), set(), set())
3504 for idx, osl in enumerate(os_data.values()):
3505 valid = bool(valid and osl and osl[0][1])
3507 (variants, params, api_versions) = null_state
3509 node_variants, node_params, node_api = osl[0][3:6]
3510 if idx == 0: # first entry
3511 variants = set(node_variants)
3512 params = set(node_params)
3513 api_versions = set(node_api)
3514 else: # keep consistency
3515 variants.intersection_update(node_variants)
3516 params.intersection_update(node_params)
3517 api_versions.intersection_update(node_api)
3519 is_hid = os_name in cluster.hidden_os
3520 is_blk = os_name in cluster.blacklisted_os
3521 if ((self._HID not in self.op.output_fields and is_hid) or
3522 (self._BLK not in self.op.output_fields and is_blk) or
3523 (self._VLD not in self.op.output_fields and not valid)):
3526 for field in self.op.output_fields:
3529 elif field == self._VLD:
3531 elif field == "node_status":
3532 # this is just a copy of the dict
3534 for node_name, nos_list in os_data.items():
3535 val[node_name] = nos_list
3536 elif field == "variants":
3537 val = utils.NiceSort(list(variants))
3538 elif field == "parameters":
3540 elif field == "api_versions":
3541 val = list(api_versions)
3542 elif field == self._HID:
3544 elif field == self._BLK:
3547 raise errors.ParameterError(field)
3554 class LUNodeRemove(LogicalUnit):
3555 """Logical unit for removing a node.
3558 HPATH = "node-remove"
3559 HTYPE = constants.HTYPE_NODE
3561 def BuildHooksEnv(self):
3564 This doesn't run on the target node in the pre phase as a failed
3565 node would then be impossible to remove.
3569 "OP_TARGET": self.op.node_name,
3570 "NODE_NAME": self.op.node_name,
3572 all_nodes = self.cfg.GetNodeList()
3574 all_nodes.remove(self.op.node_name)
3576 logging.warning("Node %s which is about to be removed not found"
3577 " in the all nodes list", self.op.node_name)
3578 return env, all_nodes, all_nodes
3580 def CheckPrereq(self):
3581 """Check prerequisites.
3584 - the node exists in the configuration
3585 - it does not have primary or secondary instances
3586 - it's not the master
3588 Any errors are signaled by raising errors.OpPrereqError.
3591 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3592 node = self.cfg.GetNodeInfo(self.op.node_name)
3593 assert node is not None
3595 instance_list = self.cfg.GetInstanceList()
3597 masternode = self.cfg.GetMasterNode()
3598 if node.name == masternode:
3599 raise errors.OpPrereqError("Node is the master node,"
3600 " you need to failover first.",
3603 for instance_name in instance_list:
3604 instance = self.cfg.GetInstanceInfo(instance_name)
3605 if node.name in instance.all_nodes:
3606 raise errors.OpPrereqError("Instance %s is still running on the node,"
3607 " please remove first." % instance_name,
3609 self.op.node_name = node.name
3612 def Exec(self, feedback_fn):
3613 """Removes the node from the cluster.
3617 logging.info("Stopping the node daemon and removing configs from node %s",
3620 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3622 # Promote nodes to master candidate as needed
3623 _AdjustCandidatePool(self, exceptions=[node.name])
3624 self.context.RemoveNode(node.name)
3626 # Run post hooks on the node before it's removed
3627 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3629 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3631 # pylint: disable-msg=W0702
3632 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3634 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3635 msg = result.fail_msg
3637 self.LogWarning("Errors encountered on the remote node while leaving"
3638 " the cluster: %s", msg)
3640 # Remove node from our /etc/hosts
3641 if self.cfg.GetClusterInfo().modify_etc_hosts:
3642 master_node = self.cfg.GetMasterNode()
3643 result = self.rpc.call_etc_hosts_modify(master_node,
3644 constants.ETC_HOSTS_REMOVE,
3646 result.Raise("Can't update hosts file with new host data")
3647 _RedistributeAncillaryFiles(self)
3650 class _NodeQuery(_QueryBase):
3651 FIELDS = query.NODE_FIELDS
3653 def ExpandNames(self, lu):
3654 lu.needed_locks = {}
3655 lu.share_locks[locking.LEVEL_NODE] = 1
3658 self.wanted = _GetWantedNodes(lu, self.names)
3660 self.wanted = locking.ALL_SET
3662 self.do_locking = (self.use_locking and
3663 query.NQ_LIVE in self.requested_data)
3666 # if we don't request only static fields, we need to lock the nodes
3667 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3669 def DeclareLocks(self, lu, level):
3672 def _GetQueryData(self, lu):
3673 """Computes the list of nodes and their attributes.
3676 all_info = lu.cfg.GetAllNodesInfo()
3678 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3680 # Gather data as requested
3681 if query.NQ_LIVE in self.requested_data:
3682 # filter out non-vm_capable nodes
3683 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3685 node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3686 lu.cfg.GetHypervisorType())
3687 live_data = dict((name, nresult.payload)
3688 for (name, nresult) in node_data.items()
3689 if not nresult.fail_msg and nresult.payload)
3693 if query.NQ_INST in self.requested_data:
3694 node_to_primary = dict([(name, set()) for name in nodenames])
3695 node_to_secondary = dict([(name, set()) for name in nodenames])
3697 inst_data = lu.cfg.GetAllInstancesInfo()
3699 for inst in inst_data.values():
3700 if inst.primary_node in node_to_primary:
3701 node_to_primary[inst.primary_node].add(inst.name)
3702 for secnode in inst.secondary_nodes:
3703 if secnode in node_to_secondary:
3704 node_to_secondary[secnode].add(inst.name)
3706 node_to_primary = None
3707 node_to_secondary = None
3709 if query.NQ_OOB in self.requested_data:
3710 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3711 for name, node in all_info.iteritems())
3715 if query.NQ_GROUP in self.requested_data:
3716 groups = lu.cfg.GetAllNodeGroupsInfo()
3720 return query.NodeQueryData([all_info[name] for name in nodenames],
3721 live_data, lu.cfg.GetMasterNode(),
3722 node_to_primary, node_to_secondary, groups,
3723 oob_support, lu.cfg.GetClusterInfo())
3726 class LUNodeQuery(NoHooksLU):
3727 """Logical unit for querying nodes.
3730 # pylint: disable-msg=W0142
3733 def CheckArguments(self):
3734 self.nq = _NodeQuery(self.op.names, self.op.output_fields,
3735 self.op.use_locking)
3737 def ExpandNames(self):
3738 self.nq.ExpandNames(self)
3740 def Exec(self, feedback_fn):
3741 return self.nq.OldStyleQuery(self)
3744 class LUNodeQueryvols(NoHooksLU):
3745 """Logical unit for getting volumes on node(s).
3749 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3750 _FIELDS_STATIC = utils.FieldSet("node")
3752 def CheckArguments(self):
3753 _CheckOutputFields(static=self._FIELDS_STATIC,
3754 dynamic=self._FIELDS_DYNAMIC,
3755 selected=self.op.output_fields)
3757 def ExpandNames(self):
3758 self.needed_locks = {}
3759 self.share_locks[locking.LEVEL_NODE] = 1
3760 if not self.op.nodes:
3761 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3763 self.needed_locks[locking.LEVEL_NODE] = \
3764 _GetWantedNodes(self, self.op.nodes)
3766 def Exec(self, feedback_fn):
3767 """Computes the list of nodes and their attributes.
3770 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3771 volumes = self.rpc.call_node_volumes(nodenames)
3773 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3774 in self.cfg.GetInstanceList()]
3776 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3779 for node in nodenames:
3780 nresult = volumes[node]
3783 msg = nresult.fail_msg
3785 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3788 node_vols = nresult.payload[:]
3789 node_vols.sort(key=lambda vol: vol['dev'])
3791 for vol in node_vols:
3793 for field in self.op.output_fields:
3796 elif field == "phys":
3800 elif field == "name":
3802 elif field == "size":
3803 val = int(float(vol['size']))
3804 elif field == "instance":
3806 if node not in lv_by_node[inst]:
3808 if vol['name'] in lv_by_node[inst][node]:
3814 raise errors.ParameterError(field)
3815 node_output.append(str(val))
3817 output.append(node_output)
3822 class LUNodeQueryStorage(NoHooksLU):
3823 """Logical unit for getting information on storage units on node(s).
3826 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3829 def CheckArguments(self):
3830 _CheckOutputFields(static=self._FIELDS_STATIC,
3831 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3832 selected=self.op.output_fields)
3834 def ExpandNames(self):
3835 self.needed_locks = {}
3836 self.share_locks[locking.LEVEL_NODE] = 1
3839 self.needed_locks[locking.LEVEL_NODE] = \
3840 _GetWantedNodes(self, self.op.nodes)
3842 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3844 def Exec(self, feedback_fn):
3845 """Computes the list of nodes and their attributes.
3848 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3850 # Always get name to sort by
3851 if constants.SF_NAME in self.op.output_fields:
3852 fields = self.op.output_fields[:]
3854 fields = [constants.SF_NAME] + self.op.output_fields
3856 # Never ask for node or type as it's only known to the LU
3857 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3858 while extra in fields:
3859 fields.remove(extra)
3861 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3862 name_idx = field_idx[constants.SF_NAME]
3864 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3865 data = self.rpc.call_storage_list(self.nodes,
3866 self.op.storage_type, st_args,
3867 self.op.name, fields)
3871 for node in utils.NiceSort(self.nodes):
3872 nresult = data[node]
3876 msg = nresult.fail_msg
3878 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3881 rows = dict([(row[name_idx], row) for row in nresult.payload])
3883 for name in utils.NiceSort(rows.keys()):
3888 for field in self.op.output_fields:
3889 if field == constants.SF_NODE:
3891 elif field == constants.SF_TYPE:
3892 val = self.op.storage_type
3893 elif field in field_idx:
3894 val = row[field_idx[field]]
3896 raise errors.ParameterError(field)
3905 class _InstanceQuery(_QueryBase):
3906 FIELDS = query.INSTANCE_FIELDS
3908 def ExpandNames(self, lu):
3909 lu.needed_locks = {}
3910 lu.share_locks[locking.LEVEL_INSTANCE] = 1
3911 lu.share_locks[locking.LEVEL_NODE] = 1
3914 self.wanted = _GetWantedInstances(lu, self.names)
3916 self.wanted = locking.ALL_SET
3918 self.do_locking = (self.use_locking and
3919 query.IQ_LIVE in self.requested_data)
3921 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3922 lu.needed_locks[locking.LEVEL_NODE] = []
3923 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3925 def DeclareLocks(self, lu, level):
3926 if level == locking.LEVEL_NODE and self.do_locking:
3927 lu._LockInstancesNodes() # pylint: disable-msg=W0212
3929 def _GetQueryData(self, lu):
3930 """Computes the list of instances and their attributes.
3933 cluster = lu.cfg.GetClusterInfo()
3934 all_info = lu.cfg.GetAllInstancesInfo()
3936 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3938 instance_list = [all_info[name] for name in instance_names]
3939 nodes = frozenset(itertools.chain(*(inst.all_nodes
3940 for inst in instance_list)))
3941 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3944 wrongnode_inst = set()
3946 # Gather data as requested
3947 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3949 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3951 result = node_data[name]
3953 # offline nodes will be in both lists
3954 assert result.fail_msg
3955 offline_nodes.append(name)
3957 bad_nodes.append(name)
3958 elif result.payload:
3959 for inst in result.payload:
3960 if inst in all_info:
3961 if all_info[inst].primary_node == name:
3962 live_data.update(result.payload)
3964 wrongnode_inst.add(inst)
3966 # orphan instance; we don't list it here as we don't
3967 # handle this case yet in the output of instance listing
3968 logging.warning("Orphan instance '%s' found on node %s",
3970 # else no instance is alive
3974 if query.IQ_DISKUSAGE in self.requested_data:
3975 disk_usage = dict((inst.name,
3976 _ComputeDiskSize(inst.disk_template,
3977 [{"size": disk.size}
3978 for disk in inst.disks]))
3979 for inst in instance_list)
3983 if query.IQ_CONSOLE in self.requested_data:
3985 for inst in instance_list:
3986 if inst.name in live_data:
3987 # Instance is running
3988 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3990 consinfo[inst.name] = None
3991 assert set(consinfo.keys()) == set(instance_names)
3995 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3996 disk_usage, offline_nodes, bad_nodes,
3997 live_data, wrongnode_inst, consinfo)
4000 class LUQuery(NoHooksLU):
4001 """Query for resources/items of a certain kind.
4004 # pylint: disable-msg=W0142
4007 def CheckArguments(self):
4008 qcls = _GetQueryImplementation(self.op.what)
4009 names = qlang.ReadSimpleFilter("name", self.op.filter)
4011 self.impl = qcls(names, self.op.fields, False)
4013 def ExpandNames(self):
4014 self.impl.ExpandNames(self)
4016 def DeclareLocks(self, level):
4017 self.impl.DeclareLocks(self, level)
4019 def Exec(self, feedback_fn):
4020 return self.impl.NewStyleQuery(self)
4023 class LUQueryFields(NoHooksLU):
4024 """Query for resources/items of a certain kind.
4027 # pylint: disable-msg=W0142
4030 def CheckArguments(self):
4031 self.qcls = _GetQueryImplementation(self.op.what)
4033 def ExpandNames(self):
4034 self.needed_locks = {}
4036 def Exec(self, feedback_fn):
4037 return self.qcls.FieldsQuery(self.op.fields)
4040 class LUNodeModifyStorage(NoHooksLU):
4041 """Logical unit for modifying a storage volume on a node.
4046 def CheckArguments(self):
4047 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4049 storage_type = self.op.storage_type
4052 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4054 raise errors.OpPrereqError("Storage units of type '%s' can not be"
4055 " modified" % storage_type,
4058 diff = set(self.op.changes.keys()) - modifiable
4060 raise errors.OpPrereqError("The following fields can not be modified for"
4061 " storage units of type '%s': %r" %
4062 (storage_type, list(diff)),
4065 def ExpandNames(self):
4066 self.needed_locks = {
4067 locking.LEVEL_NODE: self.op.node_name,
4070 def Exec(self, feedback_fn):
4071 """Computes the list of nodes and their attributes.
4074 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4075 result = self.rpc.call_storage_modify(self.op.node_name,
4076 self.op.storage_type, st_args,
4077 self.op.name, self.op.changes)
4078 result.Raise("Failed to modify storage unit '%s' on %s" %
4079 (self.op.name, self.op.node_name))
4082 class LUNodeAdd(LogicalUnit):
4083 """Logical unit for adding node to the cluster.
4087 HTYPE = constants.HTYPE_NODE
4088 _NFLAGS = ["master_capable", "vm_capable"]
4090 def CheckArguments(self):
4091 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4092 # validate/normalize the node name
4093 self.hostname = netutils.GetHostname(name=self.op.node_name,
4094 family=self.primary_ip_family)
4095 self.op.node_name = self.hostname.name
4097 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4098 raise errors.OpPrereqError("Cannot readd the master node",
4101 if self.op.readd and self.op.group:
4102 raise errors.OpPrereqError("Cannot pass a node group when a node is"
4103 " being readded", errors.ECODE_INVAL)
4105 def BuildHooksEnv(self):
4108 This will run on all nodes before, and on all nodes + the new node after.
4112 "OP_TARGET": self.op.node_name,
4113 "NODE_NAME": self.op.node_name,
4114 "NODE_PIP": self.op.primary_ip,
4115 "NODE_SIP": self.op.secondary_ip,
4116 "MASTER_CAPABLE": str(self.op.master_capable),
4117 "VM_CAPABLE": str(self.op.vm_capable),
4119 nodes_0 = self.cfg.GetNodeList()
4120 nodes_1 = nodes_0 + [self.op.node_name, ]
4121 return env, nodes_0, nodes_1
4123 def CheckPrereq(self):
4124 """Check prerequisites.
4127 - the new node is not already in the config
4129 - its parameters (single/dual homed) matches the cluster
4131 Any errors are signaled by raising errors.OpPrereqError.
4135 hostname = self.hostname
4136 node = hostname.name
4137 primary_ip = self.op.primary_ip = hostname.ip
4138 if self.op.secondary_ip is None:
4139 if self.primary_ip_family == netutils.IP6Address.family:
4140 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4141 " IPv4 address must be given as secondary",
4143 self.op.secondary_ip = primary_ip
4145 secondary_ip = self.op.secondary_ip
4146 if not netutils.IP4Address.IsValid(secondary_ip):
4147 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4148 " address" % secondary_ip, errors.ECODE_INVAL)
4150 node_list = cfg.GetNodeList()
4151 if not self.op.readd and node in node_list:
4152 raise errors.OpPrereqError("Node %s is already in the configuration" %
4153 node, errors.ECODE_EXISTS)
4154 elif self.op.readd and node not in node_list:
4155 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4158 self.changed_primary_ip = False
4160 for existing_node_name in node_list:
4161 existing_node = cfg.GetNodeInfo(existing_node_name)
4163 if self.op.readd and node == existing_node_name:
4164 if existing_node.secondary_ip != secondary_ip:
4165 raise errors.OpPrereqError("Readded node doesn't have the same IP"
4166 " address configuration as before",
4168 if existing_node.primary_ip != primary_ip:
4169 self.changed_primary_ip = True
4173 if (existing_node.primary_ip == primary_ip or
4174 existing_node.secondary_ip == primary_ip or
4175 existing_node.primary_ip == secondary_ip or
4176 existing_node.secondary_ip == secondary_ip):
4177 raise errors.OpPrereqError("New node ip address(es) conflict with"
4178 " existing node %s" % existing_node.name,
4179 errors.ECODE_NOTUNIQUE)
4181 # After this 'if' block, None is no longer a valid value for the
4182 # _capable op attributes
4184 old_node = self.cfg.GetNodeInfo(node)
4185 assert old_node is not None, "Can't retrieve locked node %s" % node
4186 for attr in self._NFLAGS:
4187 if getattr(self.op, attr) is None:
4188 setattr(self.op, attr, getattr(old_node, attr))
4190 for attr in self._NFLAGS:
4191 if getattr(self.op, attr) is None:
4192 setattr(self.op, attr, True)
4194 if self.op.readd and not self.op.vm_capable:
4195 pri, sec = cfg.GetNodeInstances(node)
4197 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4198 " flag set to false, but it already holds"
4199 " instances" % node,
4202 # check that the type of the node (single versus dual homed) is the
4203 # same as for the master
4204 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4205 master_singlehomed = myself.secondary_ip == myself.primary_ip
4206 newbie_singlehomed = secondary_ip == primary_ip
4207 if master_singlehomed != newbie_singlehomed:
4208 if master_singlehomed:
4209 raise errors.OpPrereqError("The master has no secondary ip but the"
4210 " new node has one",
4213 raise errors.OpPrereqError("The master has a secondary ip but the"
4214 " new node doesn't have one",
4217 # checks reachability
4218 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4219 raise errors.OpPrereqError("Node not reachable by ping",
4220 errors.ECODE_ENVIRON)
4222 if not newbie_singlehomed:
4223 # check reachability from my secondary ip to newbie's secondary ip
4224 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4225 source=myself.secondary_ip):
4226 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4227 " based ping to node daemon port",
4228 errors.ECODE_ENVIRON)
4235 if self.op.master_capable:
4236 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4238 self.master_candidate = False
4241 self.new_node = old_node
4243 node_group = cfg.LookupNodeGroup(self.op.group)
4244 self.new_node = objects.Node(name=node,
4245 primary_ip=primary_ip,
4246 secondary_ip=secondary_ip,
4247 master_candidate=self.master_candidate,
4248 offline=False, drained=False,
4251 if self.op.ndparams:
4252 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4254 def Exec(self, feedback_fn):
4255 """Adds the new node to the cluster.
4258 new_node = self.new_node
4259 node = new_node.name
4261 # We adding a new node so we assume it's powered
4262 new_node.powered = True
4264 # for re-adds, reset the offline/drained/master-candidate flags;
4265 # we need to reset here, otherwise offline would prevent RPC calls
4266 # later in the procedure; this also means that if the re-add
4267 # fails, we are left with a non-offlined, broken node
4269 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4270 self.LogInfo("Readding a node, the offline/drained flags were reset")
4271 # if we demote the node, we do cleanup later in the procedure
4272 new_node.master_candidate = self.master_candidate
4273 if self.changed_primary_ip:
4274 new_node.primary_ip = self.op.primary_ip
4276 # copy the master/vm_capable flags
4277 for attr in self._NFLAGS:
4278 setattr(new_node, attr, getattr(self.op, attr))
4280 # notify the user about any possible mc promotion
4281 if new_node.master_candidate:
4282 self.LogInfo("Node will be a master candidate")
4284 if self.op.ndparams:
4285 new_node.ndparams = self.op.ndparams
4287 new_node.ndparams = {}
4289 # check connectivity
4290 result = self.rpc.call_version([node])[node]
4291 result.Raise("Can't get version information from node %s" % node)
4292 if constants.PROTOCOL_VERSION == result.payload:
4293 logging.info("Communication to node %s fine, sw version %s match",
4294 node, result.payload)
4296 raise errors.OpExecError("Version mismatch master version %s,"
4297 " node version %s" %
4298 (constants.PROTOCOL_VERSION, result.payload))
4300 # Add node to our /etc/hosts, and add key to known_hosts
4301 if self.cfg.GetClusterInfo().modify_etc_hosts:
4302 master_node = self.cfg.GetMasterNode()
4303 result = self.rpc.call_etc_hosts_modify(master_node,
4304 constants.ETC_HOSTS_ADD,
4307 result.Raise("Can't update hosts file with new host data")
4309 if new_node.secondary_ip != new_node.primary_ip:
4310 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4313 node_verify_list = [self.cfg.GetMasterNode()]
4314 node_verify_param = {
4315 constants.NV_NODELIST: [node],
4316 # TODO: do a node-net-test as well?
4319 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4320 self.cfg.GetClusterName())
4321 for verifier in node_verify_list:
4322 result[verifier].Raise("Cannot communicate with node %s" % verifier)
4323 nl_payload = result[verifier].payload[constants.NV_NODELIST]
4325 for failed in nl_payload:
4326 feedback_fn("ssh/hostname verification failed"
4327 " (checking from %s): %s" %
4328 (verifier, nl_payload[failed]))
4329 raise errors.OpExecError("ssh/hostname verification failed")
4332 _RedistributeAncillaryFiles(self)
4333 self.context.ReaddNode(new_node)
4334 # make sure we redistribute the config
4335 self.cfg.Update(new_node, feedback_fn)
4336 # and make sure the new node will not have old files around
4337 if not new_node.master_candidate:
4338 result = self.rpc.call_node_demote_from_mc(new_node.name)
4339 msg = result.fail_msg
4341 self.LogWarning("Node failed to demote itself from master"
4342 " candidate status: %s" % msg)
4344 _RedistributeAncillaryFiles(self, additional_nodes=[node],
4345 additional_vm=self.op.vm_capable)
4346 self.context.AddNode(new_node, self.proc.GetECId())
4349 class LUNodeSetParams(LogicalUnit):
4350 """Modifies the parameters of a node.
4352 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4353 to the node role (as _ROLE_*)
4354 @cvar _R2F: a dictionary from node role to tuples of flags
4355 @cvar _FLAGS: a list of attribute names corresponding to the flags
4358 HPATH = "node-modify"
4359 HTYPE = constants.HTYPE_NODE
4361 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4363 (True, False, False): _ROLE_CANDIDATE,
4364 (False, True, False): _ROLE_DRAINED,
4365 (False, False, True): _ROLE_OFFLINE,
4366 (False, False, False): _ROLE_REGULAR,
4368 _R2F = dict((v, k) for k, v in _F2R.items())
4369 _FLAGS = ["master_candidate", "drained", "offline"]
4371 def CheckArguments(self):
4372 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4373 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4374 self.op.master_capable, self.op.vm_capable,
4375 self.op.secondary_ip, self.op.ndparams]
4376 if all_mods.count(None) == len(all_mods):
4377 raise errors.OpPrereqError("Please pass at least one modification",
4379 if all_mods.count(True) > 1:
4380 raise errors.OpPrereqError("Can't set the node into more than one"
4381 " state at the same time",
4384 # Boolean value that tells us whether we might be demoting from MC
4385 self.might_demote = (self.op.master_candidate == False or
4386 self.op.offline == True or
4387 self.op.drained == True or
4388 self.op.master_capable == False)
4390 if self.op.secondary_ip:
4391 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4392 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4393 " address" % self.op.secondary_ip,
4396 self.lock_all = self.op.auto_promote and self.might_demote
4397 self.lock_instances = self.op.secondary_ip is not None
4399 def ExpandNames(self):
4401 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4403 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4405 if self.lock_instances:
4406 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4408 def DeclareLocks(self, level):
4409 # If we have locked all instances, before waiting to lock nodes, release
4410 # all the ones living on nodes unrelated to the current operation.
4411 if level == locking.LEVEL_NODE and self.lock_instances:
4412 instances_release = []
4414 self.affected_instances = []
4415 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4416 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4417 instance = self.context.cfg.GetInstanceInfo(instance_name)
4418 i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4419 if i_mirrored and self.op.node_name in instance.all_nodes:
4420 instances_keep.append(instance_name)
4421 self.affected_instances.append(instance)
4423 instances_release.append(instance_name)
4424 if instances_release:
4425 self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4426 self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4428 def BuildHooksEnv(self):
4431 This runs on the master node.
4435 "OP_TARGET": self.op.node_name,
4436 "MASTER_CANDIDATE": str(self.op.master_candidate),
4437 "OFFLINE": str(self.op.offline),
4438 "DRAINED": str(self.op.drained),
4439 "MASTER_CAPABLE": str(self.op.master_capable),
4440 "VM_CAPABLE": str(self.op.vm_capable),
4442 nl = [self.cfg.GetMasterNode(),
4446 def CheckPrereq(self):
4447 """Check prerequisites.
4449 This only checks the instance list against the existing names.
4452 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4454 if (self.op.master_candidate is not None or
4455 self.op.drained is not None or
4456 self.op.offline is not None):
4457 # we can't change the master's node flags
4458 if self.op.node_name == self.cfg.GetMasterNode():
4459 raise errors.OpPrereqError("The master role can be changed"
4460 " only via master-failover",
4463 if self.op.master_candidate and not node.master_capable:
4464 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4465 " it a master candidate" % node.name,
4468 if self.op.vm_capable == False:
4469 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4471 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4472 " the vm_capable flag" % node.name,
4475 if node.master_candidate and self.might_demote and not self.lock_all:
4476 assert not self.op.auto_promote, "auto_promote set but lock_all not"
4477 # check if after removing the current node, we're missing master
4479 (mc_remaining, mc_should, _) = \
4480 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4481 if mc_remaining < mc_should:
4482 raise errors.OpPrereqError("Not enough master candidates, please"
4483 " pass auto promote option to allow"
4484 " promotion", errors.ECODE_STATE)
4486 self.old_flags = old_flags = (node.master_candidate,
4487 node.drained, node.offline)
4488 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4489 self.old_role = old_role = self._F2R[old_flags]
4491 # Check for ineffective changes
4492 for attr in self._FLAGS:
4493 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4494 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4495 setattr(self.op, attr, None)
4497 # Past this point, any flag change to False means a transition
4498 # away from the respective state, as only real changes are kept
4500 # TODO: We might query the real power state if it supports OOB
4501 if _SupportsOob(self.cfg, node):
4502 if self.op.offline is False and not (node.powered or
4503 self.op.powered == True):
4504 raise errors.OpPrereqError(("Please power on node %s first before you"
4505 " can reset offline state") %
4507 elif self.op.powered is not None:
4508 raise errors.OpPrereqError(("Unable to change powered state for node %s"
4509 " which does not support out-of-band"
4510 " handling") % self.op.node_name)
4512 # If we're being deofflined/drained, we'll MC ourself if needed
4513 if (self.op.drained == False or self.op.offline == False or
4514 (self.op.master_capable and not node.master_capable)):
4515 if _DecideSelfPromotion(self):
4516 self.op.master_candidate = True
4517 self.LogInfo("Auto-promoting node to master candidate")
4519 # If we're no longer master capable, we'll demote ourselves from MC
4520 if self.op.master_capable == False and node.master_candidate:
4521 self.LogInfo("Demoting from master candidate")
4522 self.op.master_candidate = False
4525 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4526 if self.op.master_candidate:
4527 new_role = self._ROLE_CANDIDATE
4528 elif self.op.drained:
4529 new_role = self._ROLE_DRAINED
4530 elif self.op.offline:
4531 new_role = self._ROLE_OFFLINE
4532 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4533 # False is still in new flags, which means we're un-setting (the
4535 new_role = self._ROLE_REGULAR
4536 else: # no new flags, nothing, keep old role
4539 self.new_role = new_role
4541 if old_role == self._ROLE_OFFLINE and new_role != old_role:
4542 # Trying to transition out of offline status
4543 result = self.rpc.call_version([node.name])[node.name]
4545 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4546 " to report its version: %s" %
4547 (node.name, result.fail_msg),
4550 self.LogWarning("Transitioning node from offline to online state"
4551 " without using re-add. Please make sure the node"
4554 if self.op.secondary_ip:
4555 # Ok even without locking, because this can't be changed by any LU
4556 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4557 master_singlehomed = master.secondary_ip == master.primary_ip
4558 if master_singlehomed and self.op.secondary_ip:
4559 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4560 " homed cluster", errors.ECODE_INVAL)
4563 if self.affected_instances:
4564 raise errors.OpPrereqError("Cannot change secondary ip: offline"
4565 " node has instances (%s) configured"
4566 " to use it" % self.affected_instances)
4568 # On online nodes, check that no instances are running, and that
4569 # the node has the new ip and we can reach it.
4570 for instance in self.affected_instances:
4571 _CheckInstanceDown(self, instance, "cannot change secondary ip")
4573 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4574 if master.name != node.name:
4575 # check reachability from master secondary ip to new secondary ip
4576 if not netutils.TcpPing(self.op.secondary_ip,
4577 constants.DEFAULT_NODED_PORT,
4578 source=master.secondary_ip):
4579 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4580 " based ping to node daemon port",
4581 errors.ECODE_ENVIRON)
4583 if self.op.ndparams:
4584 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4585 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4586 self.new_ndparams = new_ndparams
4588 def Exec(self, feedback_fn):
4593 old_role = self.old_role
4594 new_role = self.new_role
4598 if self.op.ndparams:
4599 node.ndparams = self.new_ndparams
4601 if self.op.powered is not None:
4602 node.powered = self.op.powered
4604 for attr in ["master_capable", "vm_capable"]:
4605 val = getattr(self.op, attr)
4607 setattr(node, attr, val)
4608 result.append((attr, str(val)))
4610 if new_role != old_role:
4611 # Tell the node to demote itself, if no longer MC and not offline
4612 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4613 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4615 self.LogWarning("Node failed to demote itself: %s", msg)
4617 new_flags = self._R2F[new_role]
4618 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4620 result.append((desc, str(nf)))
4621 (node.master_candidate, node.drained, node.offline) = new_flags
4623 # we locked all nodes, we adjust the CP before updating this node
4625 _AdjustCandidatePool(self, [node.name])
4627 if self.op.secondary_ip:
4628 node.secondary_ip = self.op.secondary_ip
4629 result.append(("secondary_ip", self.op.secondary_ip))
4631 # this will trigger configuration file update, if needed
4632 self.cfg.Update(node, feedback_fn)
4634 # this will trigger job queue propagation or cleanup if the mc
4636 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4637 self.context.ReaddNode(node)
4642 class LUNodePowercycle(NoHooksLU):
4643 """Powercycles a node.
4648 def CheckArguments(self):
4649 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4650 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4651 raise errors.OpPrereqError("The node is the master and the force"
4652 " parameter was not set",
4655 def ExpandNames(self):
4656 """Locking for PowercycleNode.
4658 This is a last-resort option and shouldn't block on other
4659 jobs. Therefore, we grab no locks.
4662 self.needed_locks = {}
4664 def Exec(self, feedback_fn):
4668 result = self.rpc.call_node_powercycle(self.op.node_name,
4669 self.cfg.GetHypervisorType())
4670 result.Raise("Failed to schedule the reboot")
4671 return result.payload
4674 class LUClusterQuery(NoHooksLU):
4675 """Query cluster configuration.
4680 def ExpandNames(self):
4681 self.needed_locks = {}
4683 def Exec(self, feedback_fn):
4684 """Return cluster config.
4687 cluster = self.cfg.GetClusterInfo()
4690 # Filter just for enabled hypervisors
4691 for os_name, hv_dict in cluster.os_hvp.items():
4692 os_hvp[os_name] = {}
4693 for hv_name, hv_params in hv_dict.items():
4694 if hv_name in cluster.enabled_hypervisors:
4695 os_hvp[os_name][hv_name] = hv_params
4697 # Convert ip_family to ip_version
4698 primary_ip_version = constants.IP4_VERSION
4699 if cluster.primary_ip_family == netutils.IP6Address.family:
4700 primary_ip_version = constants.IP6_VERSION
4703 "software_version": constants.RELEASE_VERSION,
4704 "protocol_version": constants.PROTOCOL_VERSION,
4705 "config_version": constants.CONFIG_VERSION,
4706 "os_api_version": max(constants.OS_API_VERSIONS),
4707 "export_version": constants.EXPORT_VERSION,
4708 "architecture": (platform.architecture()[0], platform.machine()),
4709 "name": cluster.cluster_name,
4710 "master": cluster.master_node,
4711 "default_hypervisor": cluster.enabled_hypervisors[0],
4712 "enabled_hypervisors": cluster.enabled_hypervisors,
4713 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4714 for hypervisor_name in cluster.enabled_hypervisors]),
4716 "beparams": cluster.beparams,
4717 "osparams": cluster.osparams,
4718 "nicparams": cluster.nicparams,
4719 "ndparams": cluster.ndparams,
4720 "candidate_pool_size": cluster.candidate_pool_size,
4721 "master_netdev": cluster.master_netdev,
4722 "volume_group_name": cluster.volume_group_name,
4723 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4724 "file_storage_dir": cluster.file_storage_dir,
4725 "maintain_node_health": cluster.maintain_node_health,
4726 "ctime": cluster.ctime,
4727 "mtime": cluster.mtime,
4728 "uuid": cluster.uuid,
4729 "tags": list(cluster.GetTags()),
4730 "uid_pool": cluster.uid_pool,
4731 "default_iallocator": cluster.default_iallocator,
4732 "reserved_lvs": cluster.reserved_lvs,
4733 "primary_ip_version": primary_ip_version,
4734 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4735 "hidden_os": cluster.hidden_os,
4736 "blacklisted_os": cluster.blacklisted_os,
4742 class LUClusterConfigQuery(NoHooksLU):
4743 """Return configuration values.
4747 _FIELDS_DYNAMIC = utils.FieldSet()
4748 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4749 "watcher_pause", "volume_group_name")
4751 def CheckArguments(self):
4752 _CheckOutputFields(static=self._FIELDS_STATIC,
4753 dynamic=self._FIELDS_DYNAMIC,
4754 selected=self.op.output_fields)
4756 def ExpandNames(self):
4757 self.needed_locks = {}
4759 def Exec(self, feedback_fn):
4760 """Dump a representation of the cluster config to the standard output.
4764 for field in self.op.output_fields:
4765 if field == "cluster_name":
4766 entry = self.cfg.GetClusterName()
4767 elif field == "master_node":
4768 entry = self.cfg.GetMasterNode()
4769 elif field == "drain_flag":
4770 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4771 elif field == "watcher_pause":
4772 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4773 elif field == "volume_group_name":
4774 entry = self.cfg.GetVGName()
4776 raise errors.ParameterError(field)
4777 values.append(entry)
4781 class LUInstanceActivateDisks(NoHooksLU):
4782 """Bring up an instance's disks.
4787 def ExpandNames(self):
4788 self._ExpandAndLockInstance()
4789 self.needed_locks[locking.LEVEL_NODE] = []
4790 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4792 def DeclareLocks(self, level):
4793 if level == locking.LEVEL_NODE:
4794 self._LockInstancesNodes()
4796 def CheckPrereq(self):
4797 """Check prerequisites.
4799 This checks that the instance is in the cluster.
4802 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4803 assert self.instance is not None, \
4804 "Cannot retrieve locked instance %s" % self.op.instance_name
4805 _CheckNodeOnline(self, self.instance.primary_node)
4807 def Exec(self, feedback_fn):
4808 """Activate the disks.
4811 disks_ok, disks_info = \
4812 _AssembleInstanceDisks(self, self.instance,
4813 ignore_size=self.op.ignore_size)
4815 raise errors.OpExecError("Cannot activate block devices")
4820 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4822 """Prepare the block devices for an instance.
4824 This sets up the block devices on all nodes.
4826 @type lu: L{LogicalUnit}
4827 @param lu: the logical unit on whose behalf we execute
4828 @type instance: L{objects.Instance}
4829 @param instance: the instance for whose disks we assemble
4830 @type disks: list of L{objects.Disk} or None
4831 @param disks: which disks to assemble (or all, if None)
4832 @type ignore_secondaries: boolean
4833 @param ignore_secondaries: if true, errors on secondary nodes
4834 won't result in an error return from the function
4835 @type ignore_size: boolean
4836 @param ignore_size: if true, the current known size of the disk
4837 will not be used during the disk activation, useful for cases
4838 when the size is wrong
4839 @return: False if the operation failed, otherwise a list of
4840 (host, instance_visible_name, node_visible_name)
4841 with the mapping from node devices to instance devices
4846 iname = instance.name
4847 disks = _ExpandCheckDisks(instance, disks)
4849 # With the two passes mechanism we try to reduce the window of
4850 # opportunity for the race condition of switching DRBD to primary
4851 # before handshaking occured, but we do not eliminate it
4853 # The proper fix would be to wait (with some limits) until the
4854 # connection has been made and drbd transitions from WFConnection
4855 # into any other network-connected state (Connected, SyncTarget,
4858 # 1st pass, assemble on all nodes in secondary mode
4859 for idx, inst_disk in enumerate(disks):
4860 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4862 node_disk = node_disk.Copy()
4863 node_disk.UnsetSize()
4864 lu.cfg.SetDiskID(node_disk, node)
4865 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4866 msg = result.fail_msg
4868 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4869 " (is_primary=False, pass=1): %s",
4870 inst_disk.iv_name, node, msg)
4871 if not ignore_secondaries:
4874 # FIXME: race condition on drbd migration to primary
4876 # 2nd pass, do only the primary node
4877 for idx, inst_disk in enumerate(disks):
4880 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4881 if node != instance.primary_node:
4884 node_disk = node_disk.Copy()
4885 node_disk.UnsetSize()
4886 lu.cfg.SetDiskID(node_disk, node)
4887 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4888 msg = result.fail_msg
4890 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4891 " (is_primary=True, pass=2): %s",
4892 inst_disk.iv_name, node, msg)
4895 dev_path = result.payload
4897 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4899 # leave the disks configured for the primary node
4900 # this is a workaround that would be fixed better by
4901 # improving the logical/physical id handling
4903 lu.cfg.SetDiskID(disk, instance.primary_node)
4905 return disks_ok, device_info
4908 def _StartInstanceDisks(lu, instance, force):
4909 """Start the disks of an instance.
4912 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4913 ignore_secondaries=force)
4915 _ShutdownInstanceDisks(lu, instance)
4916 if force is not None and not force:
4917 lu.proc.LogWarning("", hint="If the message above refers to a"
4919 " you can retry the operation using '--force'.")
4920 raise errors.OpExecError("Disk consistency error")
4923 class LUInstanceDeactivateDisks(NoHooksLU):
4924 """Shutdown an instance's disks.
4929 def ExpandNames(self):
4930 self._ExpandAndLockInstance()
4931 self.needed_locks[locking.LEVEL_NODE] = []
4932 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4934 def DeclareLocks(self, level):
4935 if level == locking.LEVEL_NODE:
4936 self._LockInstancesNodes()
4938 def CheckPrereq(self):
4939 """Check prerequisites.
4941 This checks that the instance is in the cluster.
4944 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4945 assert self.instance is not None, \
4946 "Cannot retrieve locked instance %s" % self.op.instance_name
4948 def Exec(self, feedback_fn):
4949 """Deactivate the disks
4952 instance = self.instance
4954 _ShutdownInstanceDisks(self, instance)
4956 _SafeShutdownInstanceDisks(self, instance)
4959 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4960 """Shutdown block devices of an instance.
4962 This function checks if an instance is running, before calling
4963 _ShutdownInstanceDisks.
4966 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4967 _ShutdownInstanceDisks(lu, instance, disks=disks)
4970 def _ExpandCheckDisks(instance, disks):
4971 """Return the instance disks selected by the disks list
4973 @type disks: list of L{objects.Disk} or None
4974 @param disks: selected disks
4975 @rtype: list of L{objects.Disk}
4976 @return: selected instance disks to act on
4980 return instance.disks
4982 if not set(disks).issubset(instance.disks):
4983 raise errors.ProgrammerError("Can only act on disks belonging to the"
4988 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4989 """Shutdown block devices of an instance.
4991 This does the shutdown on all nodes of the instance.
4993 If the ignore_primary is false, errors on the primary node are
4998 disks = _ExpandCheckDisks(instance, disks)
5001 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5002 lu.cfg.SetDiskID(top_disk, node)
5003 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5004 msg = result.fail_msg
5006 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5007 disk.iv_name, node, msg)
5008 if ((node == instance.primary_node and not ignore_primary) or
5009 (node != instance.primary_node and not result.offline)):
5014 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5015 """Checks if a node has enough free memory.
5017 This function check if a given node has the needed amount of free
5018 memory. In case the node has less memory or we cannot get the
5019 information from the node, this function raise an OpPrereqError
5022 @type lu: C{LogicalUnit}
5023 @param lu: a logical unit from which we get configuration data
5025 @param node: the node to check
5026 @type reason: C{str}
5027 @param reason: string to use in the error message
5028 @type requested: C{int}
5029 @param requested: the amount of memory in MiB to check for
5030 @type hypervisor_name: C{str}
5031 @param hypervisor_name: the hypervisor to ask for memory stats
5032 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5033 we cannot check the node
5036 nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5037 nodeinfo[node].Raise("Can't get data from node %s" % node,
5038 prereq=True, ecode=errors.ECODE_ENVIRON)
5039 free_mem = nodeinfo[node].payload.get('memory_free', None)
5040 if not isinstance(free_mem, int):
5041 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5042 " was '%s'" % (node, free_mem),
5043 errors.ECODE_ENVIRON)
5044 if requested > free_mem:
5045 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5046 " needed %s MiB, available %s MiB" %
5047 (node, reason, requested, free_mem),
5051 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5052 """Checks if nodes have enough free disk space in the all VGs.
5054 This function check if all given nodes have the needed amount of
5055 free disk. In case any node has less disk or we cannot get the
5056 information from the node, this function raise an OpPrereqError
5059 @type lu: C{LogicalUnit}
5060 @param lu: a logical unit from which we get configuration data
5061 @type nodenames: C{list}
5062 @param nodenames: the list of node names to check
5063 @type req_sizes: C{dict}
5064 @param req_sizes: the hash of vg and corresponding amount of disk in
5066 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5067 or we cannot check the node
5070 for vg, req_size in req_sizes.items():
5071 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5074 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5075 """Checks if nodes have enough free disk space in the specified VG.
5077 This function check if all given nodes have the needed amount of
5078 free disk. In case any node has less disk or we cannot get the
5079 information from the node, this function raise an OpPrereqError
5082 @type lu: C{LogicalUnit}
5083 @param lu: a logical unit from which we get configuration data
5084 @type nodenames: C{list}
5085 @param nodenames: the list of node names to check
5087 @param vg: the volume group to check
5088 @type requested: C{int}
5089 @param requested: the amount of disk in MiB to check for
5090 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5091 or we cannot check the node
5094 nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5095 for node in nodenames:
5096 info = nodeinfo[node]
5097 info.Raise("Cannot get current information from node %s" % node,
5098 prereq=True, ecode=errors.ECODE_ENVIRON)
5099 vg_free = info.payload.get("vg_free", None)
5100 if not isinstance(vg_free, int):
5101 raise errors.OpPrereqError("Can't compute free disk space on node"
5102 " %s for vg %s, result was '%s'" %
5103 (node, vg, vg_free), errors.ECODE_ENVIRON)
5104 if requested > vg_free:
5105 raise errors.OpPrereqError("Not enough disk space on target node %s"
5106 " vg %s: required %d MiB, available %d MiB" %
5107 (node, vg, requested, vg_free),
5111 class LUInstanceStartup(LogicalUnit):
5112 """Starts an instance.
5115 HPATH = "instance-start"
5116 HTYPE = constants.HTYPE_INSTANCE
5119 def CheckArguments(self):
5121 if self.op.beparams:
5122 # fill the beparams dict
5123 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5125 def ExpandNames(self):
5126 self._ExpandAndLockInstance()
5128 def BuildHooksEnv(self):
5131 This runs on master, primary and secondary nodes of the instance.
5135 "FORCE": self.op.force,
5137 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5138 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5141 def CheckPrereq(self):
5142 """Check prerequisites.
5144 This checks that the instance is in the cluster.
5147 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5148 assert self.instance is not None, \
5149 "Cannot retrieve locked instance %s" % self.op.instance_name
5152 if self.op.hvparams:
5153 # check hypervisor parameter syntax (locally)
5154 cluster = self.cfg.GetClusterInfo()
5155 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5156 filled_hvp = cluster.FillHV(instance)
5157 filled_hvp.update(self.op.hvparams)
5158 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5159 hv_type.CheckParameterSyntax(filled_hvp)
5160 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5162 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5164 if self.primary_offline and self.op.ignore_offline_nodes:
5165 self.proc.LogWarning("Ignoring offline primary node")
5167 if self.op.hvparams or self.op.beparams:
5168 self.proc.LogWarning("Overridden parameters are ignored")
5170 _CheckNodeOnline(self, instance.primary_node)
5172 bep = self.cfg.GetClusterInfo().FillBE(instance)
5174 # check bridges existence
5175 _CheckInstanceBridgesExist(self, instance)
5177 remote_info = self.rpc.call_instance_info(instance.primary_node,
5179 instance.hypervisor)
5180 remote_info.Raise("Error checking node %s" % instance.primary_node,
5181 prereq=True, ecode=errors.ECODE_ENVIRON)
5182 if not remote_info.payload: # not running already
5183 _CheckNodeFreeMemory(self, instance.primary_node,
5184 "starting instance %s" % instance.name,
5185 bep[constants.BE_MEMORY], instance.hypervisor)
5187 def Exec(self, feedback_fn):
5188 """Start the instance.
5191 instance = self.instance
5192 force = self.op.force
5194 if not self.op.no_remember:
5195 self.cfg.MarkInstanceUp(instance.name)
5197 if self.primary_offline:
5198 assert self.op.ignore_offline_nodes
5199 self.proc.LogInfo("Primary node offline, marked instance as started")
5201 node_current = instance.primary_node
5203 _StartInstanceDisks(self, instance, force)
5205 result = self.rpc.call_instance_start(node_current, instance,
5206 self.op.hvparams, self.op.beparams)
5207 msg = result.fail_msg
5209 _ShutdownInstanceDisks(self, instance)
5210 raise errors.OpExecError("Could not start instance: %s" % msg)
5213 class LUInstanceReboot(LogicalUnit):
5214 """Reboot an instance.
5217 HPATH = "instance-reboot"
5218 HTYPE = constants.HTYPE_INSTANCE
5221 def ExpandNames(self):
5222 self._ExpandAndLockInstance()
5224 def BuildHooksEnv(self):
5227 This runs on master, primary and secondary nodes of the instance.
5231 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5232 "REBOOT_TYPE": self.op.reboot_type,
5233 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5235 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5236 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5239 def CheckPrereq(self):
5240 """Check prerequisites.
5242 This checks that the instance is in the cluster.
5245 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5246 assert self.instance is not None, \
5247 "Cannot retrieve locked instance %s" % self.op.instance_name
5249 _CheckNodeOnline(self, instance.primary_node)
5251 # check bridges existence
5252 _CheckInstanceBridgesExist(self, instance)
5254 def Exec(self, feedback_fn):
5255 """Reboot the instance.
5258 instance = self.instance
5259 ignore_secondaries = self.op.ignore_secondaries
5260 reboot_type = self.op.reboot_type
5262 node_current = instance.primary_node
5264 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5265 constants.INSTANCE_REBOOT_HARD]:
5266 for disk in instance.disks:
5267 self.cfg.SetDiskID(disk, node_current)
5268 result = self.rpc.call_instance_reboot(node_current, instance,
5270 self.op.shutdown_timeout)
5271 result.Raise("Could not reboot instance")
5273 result = self.rpc.call_instance_shutdown(node_current, instance,
5274 self.op.shutdown_timeout)
5275 result.Raise("Could not shutdown instance for full reboot")
5276 _ShutdownInstanceDisks(self, instance)
5277 _StartInstanceDisks(self, instance, ignore_secondaries)
5278 result = self.rpc.call_instance_start(node_current, instance, None, None)
5279 msg = result.fail_msg
5281 _ShutdownInstanceDisks(self, instance)
5282 raise errors.OpExecError("Could not start instance for"
5283 " full reboot: %s" % msg)
5285 self.cfg.MarkInstanceUp(instance.name)
5288 class LUInstanceShutdown(LogicalUnit):
5289 """Shutdown an instance.
5292 HPATH = "instance-stop"
5293 HTYPE = constants.HTYPE_INSTANCE
5296 def ExpandNames(self):
5297 self._ExpandAndLockInstance()
5299 def BuildHooksEnv(self):
5302 This runs on master, primary and secondary nodes of the instance.
5305 env = _BuildInstanceHookEnvByObject(self, self.instance)
5306 env["TIMEOUT"] = self.op.timeout
5307 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5310 def CheckPrereq(self):
5311 """Check prerequisites.
5313 This checks that the instance is in the cluster.
5316 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5317 assert self.instance is not None, \
5318 "Cannot retrieve locked instance %s" % self.op.instance_name
5320 self.primary_offline = \
5321 self.cfg.GetNodeInfo(self.instance.primary_node).offline
5323 if self.primary_offline and self.op.ignore_offline_nodes:
5324 self.proc.LogWarning("Ignoring offline primary node")
5326 _CheckNodeOnline(self, self.instance.primary_node)
5328 def Exec(self, feedback_fn):
5329 """Shutdown the instance.
5332 instance = self.instance
5333 node_current = instance.primary_node
5334 timeout = self.op.timeout
5336 if not self.op.no_remember:
5337 self.cfg.MarkInstanceDown(instance.name)
5339 if self.primary_offline:
5340 assert self.op.ignore_offline_nodes
5341 self.proc.LogInfo("Primary node offline, marked instance as stopped")
5343 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5344 msg = result.fail_msg
5346 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5348 _ShutdownInstanceDisks(self, instance)
5351 class LUInstanceReinstall(LogicalUnit):
5352 """Reinstall an instance.
5355 HPATH = "instance-reinstall"
5356 HTYPE = constants.HTYPE_INSTANCE
5359 def ExpandNames(self):
5360 self._ExpandAndLockInstance()
5362 def BuildHooksEnv(self):
5365 This runs on master, primary and secondary nodes of the instance.
5368 env = _BuildInstanceHookEnvByObject(self, self.instance)
5369 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5372 def CheckPrereq(self):
5373 """Check prerequisites.
5375 This checks that the instance is in the cluster and is not running.
5378 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5379 assert instance is not None, \
5380 "Cannot retrieve locked instance %s" % self.op.instance_name
5381 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5382 " offline, cannot reinstall")
5383 for node in instance.secondary_nodes:
5384 _CheckNodeOnline(self, node, "Instance secondary node offline,"
5385 " cannot reinstall")
5387 if instance.disk_template == constants.DT_DISKLESS:
5388 raise errors.OpPrereqError("Instance '%s' has no disks" %
5389 self.op.instance_name,
5391 _CheckInstanceDown(self, instance, "cannot reinstall")
5393 if self.op.os_type is not None:
5395 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5396 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5397 instance_os = self.op.os_type
5399 instance_os = instance.os
5401 nodelist = list(instance.all_nodes)
5403 if self.op.osparams:
5404 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5405 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5406 self.os_inst = i_osdict # the new dict (without defaults)
5410 self.instance = instance
5412 def Exec(self, feedback_fn):
5413 """Reinstall the instance.
5416 inst = self.instance
5418 if self.op.os_type is not None:
5419 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5420 inst.os = self.op.os_type
5421 # Write to configuration
5422 self.cfg.Update(inst, feedback_fn)
5424 _StartInstanceDisks(self, inst, None)
5426 feedback_fn("Running the instance OS create scripts...")
5427 # FIXME: pass debug option from opcode to backend
5428 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5429 self.op.debug_level,
5430 osparams=self.os_inst)
5431 result.Raise("Could not install OS for instance %s on node %s" %
5432 (inst.name, inst.primary_node))
5434 _ShutdownInstanceDisks(self, inst)
5437 class LUInstanceRecreateDisks(LogicalUnit):
5438 """Recreate an instance's missing disks.
5441 HPATH = "instance-recreate-disks"
5442 HTYPE = constants.HTYPE_INSTANCE
5445 def CheckArguments(self):
5446 # normalise the disk list
5447 self.op.disks = sorted(frozenset(self.op.disks))
5449 def ExpandNames(self):
5450 self._ExpandAndLockInstance()
5451 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5453 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
5454 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
5456 self.needed_locks[locking.LEVEL_NODE] = []
5458 def DeclareLocks(self, level):
5459 if level == locking.LEVEL_NODE:
5460 # if we replace the nodes, we only need to lock the old primary,
5461 # otherwise we need to lock all nodes for disk re-creation
5462 primary_only = bool(self.op.nodes)
5463 self._LockInstancesNodes(primary_only=primary_only)
5465 def BuildHooksEnv(self):
5468 This runs on master, primary and secondary nodes of the instance.
5471 env = _BuildInstanceHookEnvByObject(self, self.instance)
5472 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5475 def CheckPrereq(self):
5476 """Check prerequisites.
5478 This checks that the instance is in the cluster and is not running.
5481 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5482 assert instance is not None, \
5483 "Cannot retrieve locked instance %s" % self.op.instance_name
5485 if len(self.op.nodes) != len(instance.all_nodes):
5486 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
5487 " %d replacement nodes were specified" %
5488 (instance.name, len(instance.all_nodes),
5489 len(self.op.nodes)),
5491 assert instance.disk_template != constants.DT_DRBD8 or \
5492 len(self.op.nodes) == 2
5493 assert instance.disk_template != constants.DT_PLAIN or \
5494 len(self.op.nodes) == 1
5495 primary_node = self.op.nodes[0]
5497 primary_node = instance.primary_node
5498 _CheckNodeOnline(self, primary_node)
5500 if instance.disk_template == constants.DT_DISKLESS:
5501 raise errors.OpPrereqError("Instance '%s' has no disks" %
5502 self.op.instance_name, errors.ECODE_INVAL)
5503 # if we replace nodes *and* the old primary is offline, we don't
5505 assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
5506 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
5507 if not (self.op.nodes and old_pnode.offline):
5508 _CheckInstanceDown(self, instance, "cannot recreate disks")
5510 if not self.op.disks:
5511 self.op.disks = range(len(instance.disks))
5513 for idx in self.op.disks:
5514 if idx >= len(instance.disks):
5515 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5517 if self.op.disks != range(len(instance.disks)) and self.op.nodes:
5518 raise errors.OpPrereqError("Can't recreate disks partially and"
5519 " change the nodes at the same time",
5521 self.instance = instance
5523 def Exec(self, feedback_fn):
5524 """Recreate the disks.
5527 # change primary node, if needed
5529 self.instance.primary_node = self.op.nodes[0]
5530 self.LogWarning("Changing the instance's nodes, you will have to"
5531 " remove any disks left on the older nodes manually")
5534 for idx, disk in enumerate(self.instance.disks):
5535 if idx not in self.op.disks: # disk idx has not been passed in
5538 # update secondaries for disks, if needed
5540 if disk.dev_type == constants.LD_DRBD8:
5541 # need to update the nodes
5542 assert len(self.op.nodes) == 2
5543 logical_id = list(disk.logical_id)
5544 logical_id[0] = self.op.nodes[0]
5545 logical_id[1] = self.op.nodes[1]
5546 disk.logical_id = tuple(logical_id)
5549 self.cfg.Update(self.instance, feedback_fn)
5551 _CreateDisks(self, self.instance, to_skip=to_skip)
5554 class LUInstanceRename(LogicalUnit):
5555 """Rename an instance.
5558 HPATH = "instance-rename"
5559 HTYPE = constants.HTYPE_INSTANCE
5561 def CheckArguments(self):
5565 if self.op.ip_check and not self.op.name_check:
5566 # TODO: make the ip check more flexible and not depend on the name check
5567 raise errors.OpPrereqError("Cannot do ip check without a name check",
5570 def BuildHooksEnv(self):
5573 This runs on master, primary and secondary nodes of the instance.
5576 env = _BuildInstanceHookEnvByObject(self, self.instance)
5577 env["INSTANCE_NEW_NAME"] = self.op.new_name
5578 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5581 def CheckPrereq(self):
5582 """Check prerequisites.
5584 This checks that the instance is in the cluster and is not running.
5587 self.op.instance_name = _ExpandInstanceName(self.cfg,
5588 self.op.instance_name)
5589 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5590 assert instance is not None
5591 _CheckNodeOnline(self, instance.primary_node)
5592 _CheckInstanceDown(self, instance, "cannot rename")
5593 self.instance = instance
5595 new_name = self.op.new_name
5596 if self.op.name_check:
5597 hostname = netutils.GetHostname(name=new_name)
5598 if hostname != new_name:
5599 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5601 new_name = self.op.new_name = hostname.name
5602 if (self.op.ip_check and
5603 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5604 raise errors.OpPrereqError("IP %s of instance %s already in use" %
5605 (hostname.ip, new_name),
5606 errors.ECODE_NOTUNIQUE)
5608 instance_list = self.cfg.GetInstanceList()
5609 if new_name in instance_list and new_name != instance.name:
5610 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5611 new_name, errors.ECODE_EXISTS)
5613 def Exec(self, feedback_fn):
5614 """Rename the instance.
5617 inst = self.instance
5618 old_name = inst.name
5620 rename_file_storage = False
5621 if (inst.disk_template == constants.DT_FILE and
5622 self.op.new_name != inst.name):
5623 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5624 rename_file_storage = True
5626 self.cfg.RenameInstance(inst.name, self.op.new_name)
5627 # Change the instance lock. This is definitely safe while we hold the BGL
5628 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5629 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5631 # re-read the instance from the configuration after rename
5632 inst = self.cfg.GetInstanceInfo(self.op.new_name)
5634 if rename_file_storage:
5635 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5636 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5637 old_file_storage_dir,
5638 new_file_storage_dir)
5639 result.Raise("Could not rename on node %s directory '%s' to '%s'"
5640 " (but the instance has been renamed in Ganeti)" %
5641 (inst.primary_node, old_file_storage_dir,
5642 new_file_storage_dir))
5644 _StartInstanceDisks(self, inst, None)
5646 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5647 old_name, self.op.debug_level)
5648 msg = result.fail_msg
5650 msg = ("Could not run OS rename script for instance %s on node %s"
5651 " (but the instance has been renamed in Ganeti): %s" %
5652 (inst.name, inst.primary_node, msg))
5653 self.proc.LogWarning(msg)
5655 _ShutdownInstanceDisks(self, inst)
5660 class LUInstanceRemove(LogicalUnit):
5661 """Remove an instance.
5664 HPATH = "instance-remove"
5665 HTYPE = constants.HTYPE_INSTANCE
5668 def ExpandNames(self):
5669 self._ExpandAndLockInstance()
5670 self.needed_locks[locking.LEVEL_NODE] = []
5671 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5673 def DeclareLocks(self, level):
5674 if level == locking.LEVEL_NODE:
5675 self._LockInstancesNodes()
5677 def BuildHooksEnv(self):
5680 This runs on master, primary and secondary nodes of the instance.
5683 env = _BuildInstanceHookEnvByObject(self, self.instance)
5684 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5685 nl = [self.cfg.GetMasterNode()]
5686 nl_post = list(self.instance.all_nodes) + nl
5687 return env, nl, nl_post
5689 def CheckPrereq(self):
5690 """Check prerequisites.
5692 This checks that the instance is in the cluster.
5695 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5696 assert self.instance is not None, \
5697 "Cannot retrieve locked instance %s" % self.op.instance_name
5699 def Exec(self, feedback_fn):
5700 """Remove the instance.
5703 instance = self.instance
5704 logging.info("Shutting down instance %s on node %s",
5705 instance.name, instance.primary_node)
5707 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5708 self.op.shutdown_timeout)
5709 msg = result.fail_msg
5711 if self.op.ignore_failures:
5712 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5714 raise errors.OpExecError("Could not shutdown instance %s on"
5716 (instance.name, instance.primary_node, msg))
5718 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5721 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5722 """Utility function to remove an instance.
5725 logging.info("Removing block devices for instance %s", instance.name)
5727 if not _RemoveDisks(lu, instance):
5728 if not ignore_failures:
5729 raise errors.OpExecError("Can't remove instance's disks")
5730 feedback_fn("Warning: can't remove instance's disks")
5732 logging.info("Removing instance %s out of cluster config", instance.name)
5734 lu.cfg.RemoveInstance(instance.name)
5736 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5737 "Instance lock removal conflict"
5739 # Remove lock for the instance
5740 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5743 class LUInstanceQuery(NoHooksLU):
5744 """Logical unit for querying instances.
5747 # pylint: disable-msg=W0142
5750 def CheckArguments(self):
5751 self.iq = _InstanceQuery(self.op.names, self.op.output_fields,
5752 self.op.use_locking)
5754 def ExpandNames(self):
5755 self.iq.ExpandNames(self)
5757 def DeclareLocks(self, level):
5758 self.iq.DeclareLocks(self, level)
5760 def Exec(self, feedback_fn):
5761 return self.iq.OldStyleQuery(self)
5764 class LUInstanceFailover(LogicalUnit):
5765 """Failover an instance.
5768 HPATH = "instance-failover"
5769 HTYPE = constants.HTYPE_INSTANCE
5772 def ExpandNames(self):
5773 self._ExpandAndLockInstance()
5774 self.needed_locks[locking.LEVEL_NODE] = []
5775 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5777 def DeclareLocks(self, level):
5778 if level == locking.LEVEL_NODE:
5779 self._LockInstancesNodes()
5781 def BuildHooksEnv(self):
5784 This runs on master, primary and secondary nodes of the instance.
5787 instance = self.instance
5788 source_node = instance.primary_node
5789 target_node = instance.secondary_nodes[0]
5791 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5792 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5793 "OLD_PRIMARY": source_node,
5794 "OLD_SECONDARY": target_node,
5795 "NEW_PRIMARY": target_node,
5796 "NEW_SECONDARY": source_node,
5798 env.update(_BuildInstanceHookEnvByObject(self, instance))
5799 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5801 nl_post.append(source_node)
5802 return env, nl, nl_post
5804 def CheckPrereq(self):
5805 """Check prerequisites.
5807 This checks that the instance is in the cluster.
5810 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5811 assert self.instance is not None, \
5812 "Cannot retrieve locked instance %s" % self.op.instance_name
5814 bep = self.cfg.GetClusterInfo().FillBE(instance)
5815 if instance.disk_template not in constants.DTS_NET_MIRROR:
5816 raise errors.OpPrereqError("Instance's disk layout is not"
5817 " network mirrored, cannot failover.",
5820 secondary_nodes = instance.secondary_nodes
5821 if not secondary_nodes:
5822 raise errors.ProgrammerError("no secondary node but using "
5823 "a mirrored disk template")
5825 target_node = secondary_nodes[0]
5826 _CheckNodeOnline(self, target_node)
5827 _CheckNodeNotDrained(self, target_node)
5828 if instance.admin_up:
5829 # check memory requirements on the secondary node
5830 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5831 instance.name, bep[constants.BE_MEMORY],
5832 instance.hypervisor)
5834 self.LogInfo("Not checking memory on the secondary node as"
5835 " instance will not be started")
5837 # check bridge existance
5838 _CheckInstanceBridgesExist(self, instance, node=target_node)
5840 def Exec(self, feedback_fn):
5841 """Failover an instance.
5843 The failover is done by shutting it down on its present node and
5844 starting it on the secondary.
5847 instance = self.instance
5848 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5850 source_node = instance.primary_node
5851 target_node = instance.secondary_nodes[0]
5853 if instance.admin_up:
5854 feedback_fn("* checking disk consistency between source and target")
5855 for dev in instance.disks:
5856 # for drbd, these are drbd over lvm
5857 if not _CheckDiskConsistency(self, dev, target_node, False):
5858 if not self.op.ignore_consistency:
5859 raise errors.OpExecError("Disk %s is degraded on target node,"
5860 " aborting failover." % dev.iv_name)
5862 feedback_fn("* not checking disk consistency as instance is not running")
5864 feedback_fn("* shutting down instance on source node")
5865 logging.info("Shutting down instance %s on node %s",
5866 instance.name, source_node)
5868 result = self.rpc.call_instance_shutdown(source_node, instance,
5869 self.op.shutdown_timeout)
5870 msg = result.fail_msg
5872 if self.op.ignore_consistency or primary_node.offline:
5873 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5874 " Proceeding anyway. Please make sure node"
5875 " %s is down. Error details: %s",
5876 instance.name, source_node, source_node, msg)
5878 raise errors.OpExecError("Could not shutdown instance %s on"
5880 (instance.name, source_node, msg))
5882 feedback_fn("* deactivating the instance's disks on source node")
5883 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5884 raise errors.OpExecError("Can't shut down the instance's disks.")
5886 instance.primary_node = target_node
5887 # distribute new instance config to the other nodes
5888 self.cfg.Update(instance, feedback_fn)
5890 # Only start the instance if it's marked as up
5891 if instance.admin_up:
5892 feedback_fn("* activating the instance's disks on target node")
5893 logging.info("Starting instance %s on node %s",
5894 instance.name, target_node)
5896 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5897 ignore_secondaries=True)
5899 _ShutdownInstanceDisks(self, instance)
5900 raise errors.OpExecError("Can't activate the instance's disks")
5902 feedback_fn("* starting the instance on the target node")
5903 result = self.rpc.call_instance_start(target_node, instance, None, None)
5904 msg = result.fail_msg
5906 _ShutdownInstanceDisks(self, instance)
5907 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5908 (instance.name, target_node, msg))
5911 class LUInstanceMigrate(LogicalUnit):
5912 """Migrate an instance.
5914 This is migration without shutting down, compared to the failover,
5915 which is done with shutdown.
5918 HPATH = "instance-migrate"
5919 HTYPE = constants.HTYPE_INSTANCE
5922 def ExpandNames(self):
5923 self._ExpandAndLockInstance()
5925 self.needed_locks[locking.LEVEL_NODE] = []
5926 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5928 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5930 self.tasklets = [self._migrater]
5932 def DeclareLocks(self, level):
5933 if level == locking.LEVEL_NODE:
5934 self._LockInstancesNodes()
5936 def BuildHooksEnv(self):
5939 This runs on master, primary and secondary nodes of the instance.
5942 instance = self._migrater.instance
5943 source_node = instance.primary_node
5944 target_node = instance.secondary_nodes[0]
5945 env = _BuildInstanceHookEnvByObject(self, instance)
5946 env["MIGRATE_LIVE"] = self._migrater.live
5947 env["MIGRATE_CLEANUP"] = self.op.cleanup
5949 "OLD_PRIMARY": source_node,
5950 "OLD_SECONDARY": target_node,
5951 "NEW_PRIMARY": target_node,
5952 "NEW_SECONDARY": source_node,
5954 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5956 nl_post.append(source_node)
5957 return env, nl, nl_post
5960 class LUInstanceMove(LogicalUnit):
5961 """Move an instance by data-copying.
5964 HPATH = "instance-move"
5965 HTYPE = constants.HTYPE_INSTANCE
5968 def ExpandNames(self):
5969 self._ExpandAndLockInstance()
5970 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5971 self.op.target_node = target_node
5972 self.needed_locks[locking.LEVEL_NODE] = [target_node]
5973 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5975 def DeclareLocks(self, level):
5976 if level == locking.LEVEL_NODE:
5977 self._LockInstancesNodes(primary_only=True)
5979 def BuildHooksEnv(self):
5982 This runs on master, primary and secondary nodes of the instance.
5986 "TARGET_NODE": self.op.target_node,
5987 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5989 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5990 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5991 self.op.target_node]
5994 def CheckPrereq(self):
5995 """Check prerequisites.
5997 This checks that the instance is in the cluster.
6000 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6001 assert self.instance is not None, \
6002 "Cannot retrieve locked instance %s" % self.op.instance_name
6004 node = self.cfg.GetNodeInfo(self.op.target_node)
6005 assert node is not None, \
6006 "Cannot retrieve locked node %s" % self.op.target_node
6008 self.target_node = target_node = node.name
6010 if target_node == instance.primary_node:
6011 raise errors.OpPrereqError("Instance %s is already on the node %s" %
6012 (instance.name, target_node),
6015 bep = self.cfg.GetClusterInfo().FillBE(instance)
6017 for idx, dsk in enumerate(instance.disks):
6018 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6019 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6020 " cannot copy" % idx, errors.ECODE_STATE)
6022 _CheckNodeOnline(self, target_node)
6023 _CheckNodeNotDrained(self, target_node)
6024 _CheckNodeVmCapable(self, target_node)
6026 if instance.admin_up:
6027 # check memory requirements on the secondary node
6028 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6029 instance.name, bep[constants.BE_MEMORY],
6030 instance.hypervisor)
6032 self.LogInfo("Not checking memory on the secondary node as"
6033 " instance will not be started")
6035 # check bridge existance
6036 _CheckInstanceBridgesExist(self, instance, node=target_node)
6038 def Exec(self, feedback_fn):
6039 """Move an instance.
6041 The move is done by shutting it down on its present node, copying
6042 the data over (slow) and starting it on the new node.
6045 instance = self.instance
6047 source_node = instance.primary_node
6048 target_node = self.target_node
6050 self.LogInfo("Shutting down instance %s on source node %s",
6051 instance.name, source_node)
6053 result = self.rpc.call_instance_shutdown(source_node, instance,
6054 self.op.shutdown_timeout)
6055 msg = result.fail_msg
6057 if self.op.ignore_consistency:
6058 self.proc.LogWarning("Could not shutdown instance %s on node %s."
6059 " Proceeding anyway. Please make sure node"
6060 " %s is down. Error details: %s",
6061 instance.name, source_node, source_node, msg)
6063 raise errors.OpExecError("Could not shutdown instance %s on"
6065 (instance.name, source_node, msg))
6067 # create the target disks
6069 _CreateDisks(self, instance, target_node=target_node)
6070 except errors.OpExecError:
6071 self.LogWarning("Device creation failed, reverting...")
6073 _RemoveDisks(self, instance, target_node=target_node)
6075 self.cfg.ReleaseDRBDMinors(instance.name)
6078 cluster_name = self.cfg.GetClusterInfo().cluster_name
6081 # activate, get path, copy the data over
6082 for idx, disk in enumerate(instance.disks):
6083 self.LogInfo("Copying data for disk %d", idx)
6084 result = self.rpc.call_blockdev_assemble(target_node, disk,
6085 instance.name, True, idx)
6087 self.LogWarning("Can't assemble newly created disk %d: %s",
6088 idx, result.fail_msg)
6089 errs.append(result.fail_msg)
6091 dev_path = result.payload
6092 result = self.rpc.call_blockdev_export(source_node, disk,
6093 target_node, dev_path,
6096 self.LogWarning("Can't copy data over for disk %d: %s",
6097 idx, result.fail_msg)
6098 errs.append(result.fail_msg)
6102 self.LogWarning("Some disks failed to copy, aborting")
6104 _RemoveDisks(self, instance, target_node=target_node)
6106 self.cfg.ReleaseDRBDMinors(instance.name)
6107 raise errors.OpExecError("Errors during disk copy: %s" %
6110 instance.primary_node = target_node
6111 self.cfg.Update(instance, feedback_fn)
6113 self.LogInfo("Removing the disks on the original node")
6114 _RemoveDisks(self, instance, target_node=source_node)
6116 # Only start the instance if it's marked as up
6117 if instance.admin_up:
6118 self.LogInfo("Starting instance %s on node %s",
6119 instance.name, target_node)
6121 disks_ok, _ = _AssembleInstanceDisks(self, instance,
6122 ignore_secondaries=True)
6124 _ShutdownInstanceDisks(self, instance)
6125 raise errors.OpExecError("Can't activate the instance's disks")
6127 result = self.rpc.call_instance_start(target_node, instance, None, None)
6128 msg = result.fail_msg
6130 _ShutdownInstanceDisks(self, instance)
6131 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6132 (instance.name, target_node, msg))
6135 class LUNodeMigrate(LogicalUnit):
6136 """Migrate all instances from a node.
6139 HPATH = "node-migrate"
6140 HTYPE = constants.HTYPE_NODE
6143 def ExpandNames(self):
6144 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6146 self.needed_locks = {
6147 locking.LEVEL_NODE: [self.op.node_name],
6150 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6152 # Create tasklets for migrating instances for all instances on this node
6156 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6157 logging.debug("Migrating instance %s", inst.name)
6158 names.append(inst.name)
6160 tasklets.append(TLMigrateInstance(self, inst.name, False))
6162 self.tasklets = tasklets
6164 # Declare instance locks
6165 self.needed_locks[locking.LEVEL_INSTANCE] = names
6167 def DeclareLocks(self, level):
6168 if level == locking.LEVEL_NODE:
6169 self._LockInstancesNodes()
6171 def BuildHooksEnv(self):
6174 This runs on the master, the primary and all the secondaries.
6178 "NODE_NAME": self.op.node_name,
6181 nl = [self.cfg.GetMasterNode()]
6183 return (env, nl, nl)
6186 class TLMigrateInstance(Tasklet):
6187 """Tasklet class for instance migration.
6190 @ivar live: whether the migration will be done live or non-live;
6191 this variable is initalized only after CheckPrereq has run
6194 def __init__(self, lu, instance_name, cleanup):
6195 """Initializes this class.
6198 Tasklet.__init__(self, lu)
6201 self.instance_name = instance_name
6202 self.cleanup = cleanup
6203 self.live = False # will be overridden later
6205 def CheckPrereq(self):
6206 """Check prerequisites.
6208 This checks that the instance is in the cluster.
6211 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6212 instance = self.cfg.GetInstanceInfo(instance_name)
6213 assert instance is not None
6215 if instance.disk_template != constants.DT_DRBD8:
6216 raise errors.OpPrereqError("Instance's disk layout is not"
6217 " drbd8, cannot migrate.", errors.ECODE_STATE)
6219 secondary_nodes = instance.secondary_nodes
6220 if not secondary_nodes:
6221 raise errors.ConfigurationError("No secondary node but using"
6222 " drbd8 disk template")
6224 i_be = self.cfg.GetClusterInfo().FillBE(instance)
6226 target_node = secondary_nodes[0]
6227 # check memory requirements on the secondary node
6228 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6229 instance.name, i_be[constants.BE_MEMORY],
6230 instance.hypervisor)
6232 # check bridge existance
6233 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6235 if not self.cleanup:
6236 _CheckNodeNotDrained(self.lu, target_node)
6237 result = self.rpc.call_instance_migratable(instance.primary_node,
6239 result.Raise("Can't migrate, please use failover",
6240 prereq=True, ecode=errors.ECODE_STATE)
6242 self.instance = instance
6244 if self.lu.op.live is not None and self.lu.op.mode is not None:
6245 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6246 " parameters are accepted",
6248 if self.lu.op.live is not None:
6250 self.lu.op.mode = constants.HT_MIGRATION_LIVE
6252 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6253 # reset the 'live' parameter to None so that repeated
6254 # invocations of CheckPrereq do not raise an exception
6255 self.lu.op.live = None
6256 elif self.lu.op.mode is None:
6257 # read the default value from the hypervisor
6258 i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6259 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6261 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6263 def _WaitUntilSync(self):
6264 """Poll with custom rpc for disk sync.
6266 This uses our own step-based rpc call.
6269 self.feedback_fn("* wait until resync is done")
6273 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6275 self.instance.disks)
6277 for node, nres in result.items():
6278 nres.Raise("Cannot resync disks on node %s" % node)
6279 node_done, node_percent = nres.payload
6280 all_done = all_done and node_done
6281 if node_percent is not None:
6282 min_percent = min(min_percent, node_percent)
6284 if min_percent < 100:
6285 self.feedback_fn(" - progress: %.1f%%" % min_percent)
6288 def _EnsureSecondary(self, node):
6289 """Demote a node to secondary.
6292 self.feedback_fn("* switching node %s to secondary mode" % node)
6294 for dev in self.instance.disks:
6295 self.cfg.SetDiskID(dev, node)
6297 result = self.rpc.call_blockdev_close(node, self.instance.name,
6298 self.instance.disks)
6299 result.Raise("Cannot change disk to secondary on node %s" % node)
6301 def _GoStandalone(self):
6302 """Disconnect from the network.
6305 self.feedback_fn("* changing into standalone mode")
6306 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6307 self.instance.disks)
6308 for node, nres in result.items():
6309 nres.Raise("Cannot disconnect disks node %s" % node)
6311 def _GoReconnect(self, multimaster):
6312 """Reconnect to the network.
6318 msg = "single-master"
6319 self.feedback_fn("* changing disks into %s mode" % msg)
6320 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6321 self.instance.disks,
6322 self.instance.name, multimaster)
6323 for node, nres in result.items():
6324 nres.Raise("Cannot change disks config on node %s" % node)
6326 def _ExecCleanup(self):
6327 """Try to cleanup after a failed migration.
6329 The cleanup is done by:
6330 - check that the instance is running only on one node
6331 (and update the config if needed)
6332 - change disks on its secondary node to secondary
6333 - wait until disks are fully synchronized
6334 - disconnect from the network
6335 - change disks into single-master mode
6336 - wait again until disks are fully synchronized
6339 instance = self.instance
6340 target_node = self.target_node
6341 source_node = self.source_node
6343 # check running on only one node
6344 self.feedback_fn("* checking where the instance actually runs"
6345 " (if this hangs, the hypervisor might be in"
6347 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6348 for node, result in ins_l.items():
6349 result.Raise("Can't contact node %s" % node)
6351 runningon_source = instance.name in ins_l[source_node].payload
6352 runningon_target = instance.name in ins_l[target_node].payload
6354 if runningon_source and runningon_target:
6355 raise errors.OpExecError("Instance seems to be running on two nodes,"
6356 " or the hypervisor is confused. You will have"
6357 " to ensure manually that it runs only on one"
6358 " and restart this operation.")
6360 if not (runningon_source or runningon_target):
6361 raise errors.OpExecError("Instance does not seem to be running at all."
6362 " In this case, it's safer to repair by"
6363 " running 'gnt-instance stop' to ensure disk"
6364 " shutdown, and then restarting it.")
6366 if runningon_target:
6367 # the migration has actually succeeded, we need to update the config
6368 self.feedback_fn("* instance running on secondary node (%s),"
6369 " updating config" % target_node)
6370 instance.primary_node = target_node
6371 self.cfg.Update(instance, self.feedback_fn)
6372 demoted_node = source_node
6374 self.feedback_fn("* instance confirmed to be running on its"
6375 " primary node (%s)" % source_node)
6376 demoted_node = target_node
6378 self._EnsureSecondary(demoted_node)
6380 self._WaitUntilSync()
6381 except errors.OpExecError:
6382 # we ignore here errors, since if the device is standalone, it
6383 # won't be able to sync
6385 self._GoStandalone()
6386 self._GoReconnect(False)
6387 self._WaitUntilSync()
6389 self.feedback_fn("* done")
6391 def _RevertDiskStatus(self):
6392 """Try to revert the disk status after a failed migration.
6395 target_node = self.target_node
6397 self._EnsureSecondary(target_node)
6398 self._GoStandalone()
6399 self._GoReconnect(False)
6400 self._WaitUntilSync()
6401 except errors.OpExecError, err:
6402 self.lu.LogWarning("Migration failed and I can't reconnect the"
6403 " drives: error '%s'\n"
6404 "Please look and recover the instance status" %
6407 def _AbortMigration(self):
6408 """Call the hypervisor code to abort a started migration.
6411 instance = self.instance
6412 target_node = self.target_node
6413 migration_info = self.migration_info
6415 abort_result = self.rpc.call_finalize_migration(target_node,
6419 abort_msg = abort_result.fail_msg
6421 logging.error("Aborting migration failed on target node %s: %s",
6422 target_node, abort_msg)
6423 # Don't raise an exception here, as we stil have to try to revert the
6424 # disk status, even if this step failed.
6426 def _ExecMigration(self):
6427 """Migrate an instance.
6429 The migrate is done by:
6430 - change the disks into dual-master mode
6431 - wait until disks are fully synchronized again
6432 - migrate the instance
6433 - change disks on the new secondary node (the old primary) to secondary
6434 - wait until disks are fully synchronized
6435 - change disks into single-master mode
6438 instance = self.instance
6439 target_node = self.target_node
6440 source_node = self.source_node
6442 self.feedback_fn("* checking disk consistency between source and target")
6443 for dev in instance.disks:
6444 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6445 raise errors.OpExecError("Disk %s is degraded or not fully"
6446 " synchronized on target node,"
6447 " aborting migrate." % dev.iv_name)
6449 # First get the migration information from the remote node
6450 result = self.rpc.call_migration_info(source_node, instance)
6451 msg = result.fail_msg
6453 log_err = ("Failed fetching source migration information from %s: %s" %
6455 logging.error(log_err)
6456 raise errors.OpExecError(log_err)
6458 self.migration_info = migration_info = result.payload
6460 # Then switch the disks to master/master mode
6461 self._EnsureSecondary(target_node)
6462 self._GoStandalone()
6463 self._GoReconnect(True)
6464 self._WaitUntilSync()
6466 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6467 result = self.rpc.call_accept_instance(target_node,
6470 self.nodes_ip[target_node])
6472 msg = result.fail_msg
6474 logging.error("Instance pre-migration failed, trying to revert"
6475 " disk status: %s", msg)
6476 self.feedback_fn("Pre-migration failed, aborting")
6477 self._AbortMigration()
6478 self._RevertDiskStatus()
6479 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6480 (instance.name, msg))
6482 self.feedback_fn("* migrating instance to %s" % target_node)
6484 result = self.rpc.call_instance_migrate(source_node, instance,
6485 self.nodes_ip[target_node],
6487 msg = result.fail_msg
6489 logging.error("Instance migration failed, trying to revert"
6490 " disk status: %s", msg)
6491 self.feedback_fn("Migration failed, aborting")
6492 self._AbortMigration()
6493 self._RevertDiskStatus()
6494 raise errors.OpExecError("Could not migrate instance %s: %s" %
6495 (instance.name, msg))
6498 instance.primary_node = target_node
6499 # distribute new instance config to the other nodes
6500 self.cfg.Update(instance, self.feedback_fn)
6502 result = self.rpc.call_finalize_migration(target_node,
6506 msg = result.fail_msg
6508 logging.error("Instance migration succeeded, but finalization failed:"
6510 raise errors.OpExecError("Could not finalize instance migration: %s" %
6513 self._EnsureSecondary(source_node)
6514 self._WaitUntilSync()
6515 self._GoStandalone()
6516 self._GoReconnect(False)
6517 self._WaitUntilSync()
6519 self.feedback_fn("* done")
6521 def Exec(self, feedback_fn):
6522 """Perform the migration.
6525 feedback_fn("Migrating instance %s" % self.instance.name)
6527 self.feedback_fn = feedback_fn
6529 self.source_node = self.instance.primary_node
6530 self.target_node = self.instance.secondary_nodes[0]
6531 self.all_nodes = [self.source_node, self.target_node]
6533 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6534 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6538 return self._ExecCleanup()
6540 return self._ExecMigration()
6543 def _CreateBlockDev(lu, node, instance, device, force_create,
6545 """Create a tree of block devices on a given node.
6547 If this device type has to be created on secondaries, create it and
6550 If not, just recurse to children keeping the same 'force' value.
6552 @param lu: the lu on whose behalf we execute
6553 @param node: the node on which to create the device
6554 @type instance: L{objects.Instance}
6555 @param instance: the instance which owns the device
6556 @type device: L{objects.Disk}
6557 @param device: the device to create
6558 @type force_create: boolean
6559 @param force_create: whether to force creation of this device; this
6560 will be change to True whenever we find a device which has
6561 CreateOnSecondary() attribute
6562 @param info: the extra 'metadata' we should attach to the device
6563 (this will be represented as a LVM tag)
6564 @type force_open: boolean
6565 @param force_open: this parameter will be passes to the
6566 L{backend.BlockdevCreate} function where it specifies
6567 whether we run on primary or not, and it affects both
6568 the child assembly and the device own Open() execution
6571 if device.CreateOnSecondary():
6575 for child in device.children:
6576 _CreateBlockDev(lu, node, instance, child, force_create,
6579 if not force_create:
6582 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6585 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6586 """Create a single block device on a given node.
6588 This will not recurse over children of the device, so they must be
6591 @param lu: the lu on whose behalf we execute
6592 @param node: the node on which to create the device
6593 @type instance: L{objects.Instance}
6594 @param instance: the instance which owns the device
6595 @type device: L{objects.Disk}
6596 @param device: the device to create
6597 @param info: the extra 'metadata' we should attach to the device
6598 (this will be represented as a LVM tag)
6599 @type force_open: boolean
6600 @param force_open: this parameter will be passes to the
6601 L{backend.BlockdevCreate} function where it specifies
6602 whether we run on primary or not, and it affects both
6603 the child assembly and the device own Open() execution
6606 lu.cfg.SetDiskID(device, node)
6607 result = lu.rpc.call_blockdev_create(node, device, device.size,
6608 instance.name, force_open, info)
6609 result.Raise("Can't create block device %s on"
6610 " node %s for instance %s" % (device, node, instance.name))
6611 if device.physical_id is None:
6612 device.physical_id = result.payload
6615 def _GenerateUniqueNames(lu, exts):
6616 """Generate a suitable LV name.
6618 This will generate a logical volume name for the given instance.
6623 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6624 results.append("%s%s" % (new_id, val))
6628 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
6629 iv_name, p_minor, s_minor):
6630 """Generate a drbd8 device complete with its children.
6633 assert len(vgnames) == len(names) == 2
6634 port = lu.cfg.AllocatePort()
6635 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6636 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6637 logical_id=(vgnames[0], names[0]))
6638 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6639 logical_id=(vgnames[1], names[1]))
6640 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6641 logical_id=(primary, secondary, port,
6644 children=[dev_data, dev_meta],
6649 def _GenerateDiskTemplate(lu, template_name,
6650 instance_name, primary_node,
6651 secondary_nodes, disk_info,
6652 file_storage_dir, file_driver,
6653 base_index, feedback_fn):
6654 """Generate the entire disk layout for a given template type.
6657 #TODO: compute space requirements
6659 vgname = lu.cfg.GetVGName()
6660 disk_count = len(disk_info)
6662 if template_name == constants.DT_DISKLESS:
6664 elif template_name == constants.DT_PLAIN:
6665 if len(secondary_nodes) != 0:
6666 raise errors.ProgrammerError("Wrong template configuration")
6668 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6669 for i in range(disk_count)])
6670 for idx, disk in enumerate(disk_info):
6671 disk_index = idx + base_index
6672 vg = disk.get("vg", vgname)
6673 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6674 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6675 logical_id=(vg, names[idx]),
6676 iv_name="disk/%d" % disk_index,
6678 disks.append(disk_dev)
6679 elif template_name == constants.DT_DRBD8:
6680 if len(secondary_nodes) != 1:
6681 raise errors.ProgrammerError("Wrong template configuration")
6682 remote_node = secondary_nodes[0]
6683 minors = lu.cfg.AllocateDRBDMinor(
6684 [primary_node, remote_node] * len(disk_info), instance_name)
6687 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6688 for i in range(disk_count)]):
6689 names.append(lv_prefix + "_data")
6690 names.append(lv_prefix + "_meta")
6691 for idx, disk in enumerate(disk_info):
6692 disk_index = idx + base_index
6693 data_vg = disk.get("vg", vgname)
6694 meta_vg = disk.get("metavg", data_vg)
6695 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6696 disk["size"], [data_vg, meta_vg],
6697 names[idx*2:idx*2+2],
6698 "disk/%d" % disk_index,
6699 minors[idx*2], minors[idx*2+1])
6700 disk_dev.mode = disk["mode"]
6701 disks.append(disk_dev)
6702 elif template_name == constants.DT_FILE:
6703 if len(secondary_nodes) != 0:
6704 raise errors.ProgrammerError("Wrong template configuration")
6706 opcodes.RequireFileStorage()
6708 for idx, disk in enumerate(disk_info):
6709 disk_index = idx + base_index
6710 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6711 iv_name="disk/%d" % disk_index,
6712 logical_id=(file_driver,
6713 "%s/disk%d" % (file_storage_dir,
6716 disks.append(disk_dev)
6718 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6722 def _GetInstanceInfoText(instance):
6723 """Compute that text that should be added to the disk's metadata.
6726 return "originstname+%s" % instance.name
6729 def _CalcEta(time_taken, written, total_size):
6730 """Calculates the ETA based on size written and total size.
6732 @param time_taken: The time taken so far
6733 @param written: amount written so far
6734 @param total_size: The total size of data to be written
6735 @return: The remaining time in seconds
6738 avg_time = time_taken / float(written)
6739 return (total_size - written) * avg_time
6742 def _WipeDisks(lu, instance):
6743 """Wipes instance disks.
6745 @type lu: L{LogicalUnit}
6746 @param lu: the logical unit on whose behalf we execute
6747 @type instance: L{objects.Instance}
6748 @param instance: the instance whose disks we should create
6749 @return: the success of the wipe
6752 node = instance.primary_node
6754 for device in instance.disks:
6755 lu.cfg.SetDiskID(device, node)
6757 logging.info("Pause sync of instance %s disks", instance.name)
6758 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6760 for idx, success in enumerate(result.payload):
6762 logging.warn("pause-sync of instance %s for disks %d failed",
6766 for idx, device in enumerate(instance.disks):
6767 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6768 # MAX_WIPE_CHUNK at max
6769 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6770 constants.MIN_WIPE_CHUNK_PERCENT)
6771 # we _must_ make this an int, otherwise rounding errors will
6773 wipe_chunk_size = int(wipe_chunk_size)
6775 lu.LogInfo("* Wiping disk %d", idx)
6776 logging.info("Wiping disk %d for instance %s, node %s using"
6777 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
6782 start_time = time.time()
6784 while offset < size:
6785 wipe_size = min(wipe_chunk_size, size - offset)
6786 logging.debug("Wiping disk %d, offset %s, chunk %s",
6787 idx, offset, wipe_size)
6788 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6789 result.Raise("Could not wipe disk %d at offset %d for size %d" %
6790 (idx, offset, wipe_size))
6793 if now - last_output >= 60:
6794 eta = _CalcEta(now - start_time, offset, size)
6795 lu.LogInfo(" - done: %.1f%% ETA: %s" %
6796 (offset / float(size) * 100, utils.FormatSeconds(eta)))
6799 logging.info("Resume sync of instance %s disks", instance.name)
6801 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6803 for idx, success in enumerate(result.payload):
6805 lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6806 " look at the status and troubleshoot the issue.", idx)
6807 logging.warn("resume-sync of instance %s for disks %d failed",
6811 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6812 """Create all disks for an instance.
6814 This abstracts away some work from AddInstance.
6816 @type lu: L{LogicalUnit}
6817 @param lu: the logical unit on whose behalf we execute
6818 @type instance: L{objects.Instance}
6819 @param instance: the instance whose disks we should create
6821 @param to_skip: list of indices to skip
6822 @type target_node: string
6823 @param target_node: if passed, overrides the target node for creation
6825 @return: the success of the creation
6828 info = _GetInstanceInfoText(instance)
6829 if target_node is None:
6830 pnode = instance.primary_node
6831 all_nodes = instance.all_nodes
6836 if instance.disk_template == constants.DT_FILE:
6837 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6838 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6840 result.Raise("Failed to create directory '%s' on"
6841 " node %s" % (file_storage_dir, pnode))
6843 # Note: this needs to be kept in sync with adding of disks in
6844 # LUInstanceSetParams
6845 for idx, device in enumerate(instance.disks):
6846 if to_skip and idx in to_skip:
6848 logging.info("Creating volume %s for instance %s",
6849 device.iv_name, instance.name)
6851 for node in all_nodes:
6852 f_create = node == pnode
6853 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6856 def _RemoveDisks(lu, instance, target_node=None):
6857 """Remove all disks for an instance.
6859 This abstracts away some work from `AddInstance()` and
6860 `RemoveInstance()`. Note that in case some of the devices couldn't
6861 be removed, the removal will continue with the other ones (compare
6862 with `_CreateDisks()`).
6864 @type lu: L{LogicalUnit}
6865 @param lu: the logical unit on whose behalf we execute
6866 @type instance: L{objects.Instance}
6867 @param instance: the instance whose disks we should remove
6868 @type target_node: string
6869 @param target_node: used to override the node on which to remove the disks
6871 @return: the success of the removal
6874 logging.info("Removing block devices for instance %s", instance.name)
6877 for device in instance.disks:
6879 edata = [(target_node, device)]
6881 edata = device.ComputeNodeTree(instance.primary_node)
6882 for node, disk in edata:
6883 lu.cfg.SetDiskID(disk, node)
6884 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6886 lu.LogWarning("Could not remove block device %s on node %s,"
6887 " continuing anyway: %s", device.iv_name, node, msg)
6890 if instance.disk_template == constants.DT_FILE:
6891 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6895 tgt = instance.primary_node
6896 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6898 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6899 file_storage_dir, instance.primary_node, result.fail_msg)
6905 def _ComputeDiskSizePerVG(disk_template, disks):
6906 """Compute disk size requirements in the volume group
6909 def _compute(disks, payload):
6910 """Universal algorithm
6915 vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
6919 # Required free disk space as a function of disk and swap space
6921 constants.DT_DISKLESS: {},
6922 constants.DT_PLAIN: _compute(disks, 0),
6923 # 128 MB are added for drbd metadata for each disk
6924 constants.DT_DRBD8: _compute(disks, 128),
6925 constants.DT_FILE: {},
6928 if disk_template not in req_size_dict:
6929 raise errors.ProgrammerError("Disk template '%s' size requirement"
6930 " is unknown" % disk_template)
6932 return req_size_dict[disk_template]
6935 def _ComputeDiskSize(disk_template, disks):
6936 """Compute disk size requirements in the volume group
6939 # Required free disk space as a function of disk and swap space
6941 constants.DT_DISKLESS: None,
6942 constants.DT_PLAIN: sum(d["size"] for d in disks),
6943 # 128 MB are added for drbd metadata for each disk
6944 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6945 constants.DT_FILE: None,
6948 if disk_template not in req_size_dict:
6949 raise errors.ProgrammerError("Disk template '%s' size requirement"
6950 " is unknown" % disk_template)
6952 return req_size_dict[disk_template]
6955 def _FilterVmNodes(lu, nodenames):
6956 """Filters out non-vm_capable nodes from a list.
6958 @type lu: L{LogicalUnit}
6959 @param lu: the logical unit for which we check
6960 @type nodenames: list
6961 @param nodenames: the list of nodes on which we should check
6963 @return: the list of vm-capable nodes
6966 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
6967 return [name for name in nodenames if name not in vm_nodes]
6970 def _CheckHVParams(lu, nodenames, hvname, hvparams):
6971 """Hypervisor parameter validation.
6973 This function abstract the hypervisor parameter validation to be
6974 used in both instance create and instance modify.
6976 @type lu: L{LogicalUnit}
6977 @param lu: the logical unit for which we check
6978 @type nodenames: list
6979 @param nodenames: the list of nodes on which we should check
6980 @type hvname: string
6981 @param hvname: the name of the hypervisor we should use
6982 @type hvparams: dict
6983 @param hvparams: the parameters which we need to check
6984 @raise errors.OpPrereqError: if the parameters are not valid
6987 nodenames = _FilterVmNodes(lu, nodenames)
6988 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6991 for node in nodenames:
6995 info.Raise("Hypervisor parameter validation failed on node %s" % node)
6998 def _CheckOSParams(lu, required, nodenames, osname, osparams):
6999 """OS parameters validation.
7001 @type lu: L{LogicalUnit}
7002 @param lu: the logical unit for which we check
7003 @type required: boolean
7004 @param required: whether the validation should fail if the OS is not
7006 @type nodenames: list
7007 @param nodenames: the list of nodes on which we should check
7008 @type osname: string
7009 @param osname: the name of the hypervisor we should use
7010 @type osparams: dict
7011 @param osparams: the parameters which we need to check
7012 @raise errors.OpPrereqError: if the parameters are not valid
7015 nodenames = _FilterVmNodes(lu, nodenames)
7016 result = lu.rpc.call_os_validate(required, nodenames, osname,
7017 [constants.OS_VALIDATE_PARAMETERS],
7019 for node, nres in result.items():
7020 # we don't check for offline cases since this should be run only
7021 # against the master node and/or an instance's nodes
7022 nres.Raise("OS Parameters validation failed on node %s" % node)
7023 if not nres.payload:
7024 lu.LogInfo("OS %s not found on node %s, validation skipped",
7028 class LUInstanceCreate(LogicalUnit):
7029 """Create an instance.
7032 HPATH = "instance-add"
7033 HTYPE = constants.HTYPE_INSTANCE
7036 def CheckArguments(self):
7040 # do not require name_check to ease forward/backward compatibility
7042 if self.op.no_install and self.op.start:
7043 self.LogInfo("No-installation mode selected, disabling startup")
7044 self.op.start = False
7045 # validate/normalize the instance name
7046 self.op.instance_name = \
7047 netutils.Hostname.GetNormalizedName(self.op.instance_name)
7049 if self.op.ip_check and not self.op.name_check:
7050 # TODO: make the ip check more flexible and not depend on the name check
7051 raise errors.OpPrereqError("Cannot do ip check without a name check",
7054 # check nics' parameter names
7055 for nic in self.op.nics:
7056 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7058 # check disks. parameter names and consistent adopt/no-adopt strategy
7059 has_adopt = has_no_adopt = False
7060 for disk in self.op.disks:
7061 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7066 if has_adopt and has_no_adopt:
7067 raise errors.OpPrereqError("Either all disks are adopted or none is",
7070 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7071 raise errors.OpPrereqError("Disk adoption is not supported for the"
7072 " '%s' disk template" %
7073 self.op.disk_template,
7075 if self.op.iallocator is not None:
7076 raise errors.OpPrereqError("Disk adoption not allowed with an"
7077 " iallocator script", errors.ECODE_INVAL)
7078 if self.op.mode == constants.INSTANCE_IMPORT:
7079 raise errors.OpPrereqError("Disk adoption not allowed for"
7080 " instance import", errors.ECODE_INVAL)
7082 self.adopt_disks = has_adopt
7084 # instance name verification
7085 if self.op.name_check:
7086 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7087 self.op.instance_name = self.hostname1.name
7088 # used in CheckPrereq for ip ping check
7089 self.check_ip = self.hostname1.ip
7091 self.check_ip = None
7093 # file storage checks
7094 if (self.op.file_driver and
7095 not self.op.file_driver in constants.FILE_DRIVER):
7096 raise errors.OpPrereqError("Invalid file driver name '%s'" %
7097 self.op.file_driver, errors.ECODE_INVAL)
7099 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7100 raise errors.OpPrereqError("File storage directory path not absolute",
7103 ### Node/iallocator related checks
7104 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7106 if self.op.pnode is not None:
7107 if self.op.disk_template in constants.DTS_NET_MIRROR:
7108 if self.op.snode is None:
7109 raise errors.OpPrereqError("The networked disk templates need"
7110 " a mirror node", errors.ECODE_INVAL)
7112 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7114 self.op.snode = None
7116 self._cds = _GetClusterDomainSecret()
7118 if self.op.mode == constants.INSTANCE_IMPORT:
7119 # On import force_variant must be True, because if we forced it at
7120 # initial install, our only chance when importing it back is that it
7122 self.op.force_variant = True
7124 if self.op.no_install:
7125 self.LogInfo("No-installation mode has no effect during import")
7127 elif self.op.mode == constants.INSTANCE_CREATE:
7128 if self.op.os_type is None:
7129 raise errors.OpPrereqError("No guest OS specified",
7131 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7132 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7133 " installation" % self.op.os_type,
7135 if self.op.disk_template is None:
7136 raise errors.OpPrereqError("No disk template specified",
7139 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7140 # Check handshake to ensure both clusters have the same domain secret
7141 src_handshake = self.op.source_handshake
7142 if not src_handshake:
7143 raise errors.OpPrereqError("Missing source handshake",
7146 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7149 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7152 # Load and check source CA
7153 self.source_x509_ca_pem = self.op.source_x509_ca
7154 if not self.source_x509_ca_pem:
7155 raise errors.OpPrereqError("Missing source X509 CA",
7159 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7161 except OpenSSL.crypto.Error, err:
7162 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7163 (err, ), errors.ECODE_INVAL)
7165 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7166 if errcode is not None:
7167 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7170 self.source_x509_ca = cert
7172 src_instance_name = self.op.source_instance_name
7173 if not src_instance_name:
7174 raise errors.OpPrereqError("Missing source instance name",
7177 self.source_instance_name = \
7178 netutils.GetHostname(name=src_instance_name).name
7181 raise errors.OpPrereqError("Invalid instance creation mode %r" %
7182 self.op.mode, errors.ECODE_INVAL)
7184 def ExpandNames(self):
7185 """ExpandNames for CreateInstance.
7187 Figure out the right locks for instance creation.
7190 self.needed_locks = {}
7192 instance_name = self.op.instance_name
7193 # this is just a preventive check, but someone might still add this
7194 # instance in the meantime, and creation will fail at lock-add time
7195 if instance_name in self.cfg.GetInstanceList():
7196 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7197 instance_name, errors.ECODE_EXISTS)
7199 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7201 if self.op.iallocator:
7202 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7204 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7205 nodelist = [self.op.pnode]
7206 if self.op.snode is not None:
7207 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7208 nodelist.append(self.op.snode)
7209 self.needed_locks[locking.LEVEL_NODE] = nodelist
7211 # in case of import lock the source node too
7212 if self.op.mode == constants.INSTANCE_IMPORT:
7213 src_node = self.op.src_node
7214 src_path = self.op.src_path
7216 if src_path is None:
7217 self.op.src_path = src_path = self.op.instance_name
7219 if src_node is None:
7220 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7221 self.op.src_node = None
7222 if os.path.isabs(src_path):
7223 raise errors.OpPrereqError("Importing an instance from an absolute"
7224 " path requires a source node option.",
7227 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7228 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7229 self.needed_locks[locking.LEVEL_NODE].append(src_node)
7230 if not os.path.isabs(src_path):
7231 self.op.src_path = src_path = \
7232 utils.PathJoin(constants.EXPORT_DIR, src_path)
7234 def _RunAllocator(self):
7235 """Run the allocator based on input opcode.
7238 nics = [n.ToDict() for n in self.nics]
7239 ial = IAllocator(self.cfg, self.rpc,
7240 mode=constants.IALLOCATOR_MODE_ALLOC,
7241 name=self.op.instance_name,
7242 disk_template=self.op.disk_template,
7245 vcpus=self.be_full[constants.BE_VCPUS],
7246 mem_size=self.be_full[constants.BE_MEMORY],
7249 hypervisor=self.op.hypervisor,
7252 ial.Run(self.op.iallocator)
7255 raise errors.OpPrereqError("Can't compute nodes using"
7256 " iallocator '%s': %s" %
7257 (self.op.iallocator, ial.info),
7259 if len(ial.result) != ial.required_nodes:
7260 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7261 " of nodes (%s), required %s" %
7262 (self.op.iallocator, len(ial.result),
7263 ial.required_nodes), errors.ECODE_FAULT)
7264 self.op.pnode = ial.result[0]
7265 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7266 self.op.instance_name, self.op.iallocator,
7267 utils.CommaJoin(ial.result))
7268 if ial.required_nodes == 2:
7269 self.op.snode = ial.result[1]
7271 def BuildHooksEnv(self):
7274 This runs on master, primary and secondary nodes of the instance.
7278 "ADD_MODE": self.op.mode,
7280 if self.op.mode == constants.INSTANCE_IMPORT:
7281 env["SRC_NODE"] = self.op.src_node
7282 env["SRC_PATH"] = self.op.src_path
7283 env["SRC_IMAGES"] = self.src_images
7285 env.update(_BuildInstanceHookEnv(
7286 name=self.op.instance_name,
7287 primary_node=self.op.pnode,
7288 secondary_nodes=self.secondaries,
7289 status=self.op.start,
7290 os_type=self.op.os_type,
7291 memory=self.be_full[constants.BE_MEMORY],
7292 vcpus=self.be_full[constants.BE_VCPUS],
7293 nics=_NICListToTuple(self, self.nics),
7294 disk_template=self.op.disk_template,
7295 disks=[(d["size"], d["mode"]) for d in self.disks],
7298 hypervisor_name=self.op.hypervisor,
7301 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7305 def _ReadExportInfo(self):
7306 """Reads the export information from disk.
7308 It will override the opcode source node and path with the actual
7309 information, if these two were not specified before.
7311 @return: the export information
7314 assert self.op.mode == constants.INSTANCE_IMPORT
7316 src_node = self.op.src_node
7317 src_path = self.op.src_path
7319 if src_node is None:
7320 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7321 exp_list = self.rpc.call_export_list(locked_nodes)
7323 for node in exp_list:
7324 if exp_list[node].fail_msg:
7326 if src_path in exp_list[node].payload:
7328 self.op.src_node = src_node = node
7329 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7333 raise errors.OpPrereqError("No export found for relative path %s" %
7334 src_path, errors.ECODE_INVAL)
7336 _CheckNodeOnline(self, src_node)
7337 result = self.rpc.call_export_info(src_node, src_path)
7338 result.Raise("No export or invalid export found in dir %s" % src_path)
7340 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7341 if not export_info.has_section(constants.INISECT_EXP):
7342 raise errors.ProgrammerError("Corrupted export config",
7343 errors.ECODE_ENVIRON)
7345 ei_version = export_info.get(constants.INISECT_EXP, "version")
7346 if (int(ei_version) != constants.EXPORT_VERSION):
7347 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7348 (ei_version, constants.EXPORT_VERSION),
7349 errors.ECODE_ENVIRON)
7352 def _ReadExportParams(self, einfo):
7353 """Use export parameters as defaults.
7355 In case the opcode doesn't specify (as in override) some instance
7356 parameters, then try to use them from the export information, if
7360 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7362 if self.op.disk_template is None:
7363 if einfo.has_option(constants.INISECT_INS, "disk_template"):
7364 self.op.disk_template = einfo.get(constants.INISECT_INS,
7367 raise errors.OpPrereqError("No disk template specified and the export"
7368 " is missing the disk_template information",
7371 if not self.op.disks:
7372 if einfo.has_option(constants.INISECT_INS, "disk_count"):
7374 # TODO: import the disk iv_name too
7375 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7376 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7377 disks.append({"size": disk_sz})
7378 self.op.disks = disks
7380 raise errors.OpPrereqError("No disk info specified and the export"
7381 " is missing the disk information",
7384 if (not self.op.nics and
7385 einfo.has_option(constants.INISECT_INS, "nic_count")):
7387 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7389 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7390 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7395 if (self.op.hypervisor is None and
7396 einfo.has_option(constants.INISECT_INS, "hypervisor")):
7397 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7398 if einfo.has_section(constants.INISECT_HYP):
7399 # use the export parameters but do not override the ones
7400 # specified by the user
7401 for name, value in einfo.items(constants.INISECT_HYP):
7402 if name not in self.op.hvparams:
7403 self.op.hvparams[name] = value
7405 if einfo.has_section(constants.INISECT_BEP):
7406 # use the parameters, without overriding
7407 for name, value in einfo.items(constants.INISECT_BEP):
7408 if name not in self.op.beparams:
7409 self.op.beparams[name] = value
7411 # try to read the parameters old style, from the main section
7412 for name in constants.BES_PARAMETERS:
7413 if (name not in self.op.beparams and
7414 einfo.has_option(constants.INISECT_INS, name)):
7415 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7417 if einfo.has_section(constants.INISECT_OSP):
7418 # use the parameters, without overriding
7419 for name, value in einfo.items(constants.INISECT_OSP):
7420 if name not in self.op.osparams:
7421 self.op.osparams[name] = value
7423 def _RevertToDefaults(self, cluster):
7424 """Revert the instance parameters to the default values.
7428 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7429 for name in self.op.hvparams.keys():
7430 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7431 del self.op.hvparams[name]
7433 be_defs = cluster.SimpleFillBE({})
7434 for name in self.op.beparams.keys():
7435 if name in be_defs and be_defs[name] == self.op.beparams[name]:
7436 del self.op.beparams[name]
7438 nic_defs = cluster.SimpleFillNIC({})
7439 for nic in self.op.nics:
7440 for name in constants.NICS_PARAMETERS:
7441 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7444 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7445 for name in self.op.osparams.keys():
7446 if name in os_defs and os_defs[name] == self.op.osparams[name]:
7447 del self.op.osparams[name]
7449 def CheckPrereq(self):
7450 """Check prerequisites.
7453 if self.op.mode == constants.INSTANCE_IMPORT:
7454 export_info = self._ReadExportInfo()
7455 self._ReadExportParams(export_info)
7457 if (not self.cfg.GetVGName() and
7458 self.op.disk_template not in constants.DTS_NOT_LVM):
7459 raise errors.OpPrereqError("Cluster does not support lvm-based"
7460 " instances", errors.ECODE_STATE)
7462 if self.op.hypervisor is None:
7463 self.op.hypervisor = self.cfg.GetHypervisorType()
7465 cluster = self.cfg.GetClusterInfo()
7466 enabled_hvs = cluster.enabled_hypervisors
7467 if self.op.hypervisor not in enabled_hvs:
7468 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7469 " cluster (%s)" % (self.op.hypervisor,
7470 ",".join(enabled_hvs)),
7473 # check hypervisor parameter syntax (locally)
7474 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7475 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7477 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7478 hv_type.CheckParameterSyntax(filled_hvp)
7479 self.hv_full = filled_hvp
7480 # check that we don't specify global parameters on an instance
7481 _CheckGlobalHvParams(self.op.hvparams)
7483 # fill and remember the beparams dict
7484 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7485 self.be_full = cluster.SimpleFillBE(self.op.beparams)
7487 # build os parameters
7488 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7490 # now that hvp/bep are in final format, let's reset to defaults,
7492 if self.op.identify_defaults:
7493 self._RevertToDefaults(cluster)
7497 for idx, nic in enumerate(self.op.nics):
7498 nic_mode_req = nic.get("mode", None)
7499 nic_mode = nic_mode_req
7500 if nic_mode is None:
7501 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7503 # in routed mode, for the first nic, the default ip is 'auto'
7504 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7505 default_ip_mode = constants.VALUE_AUTO
7507 default_ip_mode = constants.VALUE_NONE
7509 # ip validity checks
7510 ip = nic.get("ip", default_ip_mode)
7511 if ip is None or ip.lower() == constants.VALUE_NONE:
7513 elif ip.lower() == constants.VALUE_AUTO:
7514 if not self.op.name_check:
7515 raise errors.OpPrereqError("IP address set to auto but name checks"
7516 " have been skipped",
7518 nic_ip = self.hostname1.ip
7520 if not netutils.IPAddress.IsValid(ip):
7521 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7525 # TODO: check the ip address for uniqueness
7526 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7527 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7530 # MAC address verification
7531 mac = nic.get("mac", constants.VALUE_AUTO)
7532 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7533 mac = utils.NormalizeAndValidateMac(mac)
7536 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7537 except errors.ReservationError:
7538 raise errors.OpPrereqError("MAC address %s already in use"
7539 " in cluster" % mac,
7540 errors.ECODE_NOTUNIQUE)
7542 # bridge verification
7543 bridge = nic.get("bridge", None)
7544 link = nic.get("link", None)
7546 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7547 " at the same time", errors.ECODE_INVAL)
7548 elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7549 raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7556 nicparams[constants.NIC_MODE] = nic_mode_req
7558 nicparams[constants.NIC_LINK] = link
7560 check_params = cluster.SimpleFillNIC(nicparams)
7561 objects.NIC.CheckParameterSyntax(check_params)
7562 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7564 # disk checks/pre-build
7566 for disk in self.op.disks:
7567 mode = disk.get("mode", constants.DISK_RDWR)
7568 if mode not in constants.DISK_ACCESS_SET:
7569 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7570 mode, errors.ECODE_INVAL)
7571 size = disk.get("size", None)
7573 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7576 except (TypeError, ValueError):
7577 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7579 data_vg = disk.get("vg", self.cfg.GetVGName())
7580 meta_vg = disk.get("metavg", data_vg)
7581 new_disk = {"size": size, "mode": mode, "vg": data_vg, "metavg": meta_vg}
7583 new_disk["adopt"] = disk["adopt"]
7584 self.disks.append(new_disk)
7586 if self.op.mode == constants.INSTANCE_IMPORT:
7588 # Check that the new instance doesn't have less disks than the export
7589 instance_disks = len(self.disks)
7590 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7591 if instance_disks < export_disks:
7592 raise errors.OpPrereqError("Not enough disks to import."
7593 " (instance: %d, export: %d)" %
7594 (instance_disks, export_disks),
7598 for idx in range(export_disks):
7599 option = 'disk%d_dump' % idx
7600 if export_info.has_option(constants.INISECT_INS, option):
7601 # FIXME: are the old os-es, disk sizes, etc. useful?
7602 export_name = export_info.get(constants.INISECT_INS, option)
7603 image = utils.PathJoin(self.op.src_path, export_name)
7604 disk_images.append(image)
7606 disk_images.append(False)
7608 self.src_images = disk_images
7610 old_name = export_info.get(constants.INISECT_INS, 'name')
7612 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7613 except (TypeError, ValueError), err:
7614 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7615 " an integer: %s" % str(err),
7617 if self.op.instance_name == old_name:
7618 for idx, nic in enumerate(self.nics):
7619 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7620 nic_mac_ini = 'nic%d_mac' % idx
7621 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7623 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7625 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7626 if self.op.ip_check:
7627 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7628 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7629 (self.check_ip, self.op.instance_name),
7630 errors.ECODE_NOTUNIQUE)
7632 #### mac address generation
7633 # By generating here the mac address both the allocator and the hooks get
7634 # the real final mac address rather than the 'auto' or 'generate' value.
7635 # There is a race condition between the generation and the instance object
7636 # creation, which means that we know the mac is valid now, but we're not
7637 # sure it will be when we actually add the instance. If things go bad
7638 # adding the instance will abort because of a duplicate mac, and the
7639 # creation job will fail.
7640 for nic in self.nics:
7641 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7642 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7646 if self.op.iallocator is not None:
7647 self._RunAllocator()
7649 #### node related checks
7651 # check primary node
7652 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7653 assert self.pnode is not None, \
7654 "Cannot retrieve locked node %s" % self.op.pnode
7656 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7657 pnode.name, errors.ECODE_STATE)
7659 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7660 pnode.name, errors.ECODE_STATE)
7661 if not pnode.vm_capable:
7662 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7663 " '%s'" % pnode.name, errors.ECODE_STATE)
7665 self.secondaries = []
7667 # mirror node verification
7668 if self.op.disk_template in constants.DTS_NET_MIRROR:
7669 if self.op.snode == pnode.name:
7670 raise errors.OpPrereqError("The secondary node cannot be the"
7671 " primary node.", errors.ECODE_INVAL)
7672 _CheckNodeOnline(self, self.op.snode)
7673 _CheckNodeNotDrained(self, self.op.snode)
7674 _CheckNodeVmCapable(self, self.op.snode)
7675 self.secondaries.append(self.op.snode)
7677 nodenames = [pnode.name] + self.secondaries
7679 if not self.adopt_disks:
7680 # Check lv size requirements, if not adopting
7681 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7682 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7684 else: # instead, we must check the adoption data
7685 all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7686 if len(all_lvs) != len(self.disks):
7687 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7689 for lv_name in all_lvs:
7691 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7692 # to ReserveLV uses the same syntax
7693 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7694 except errors.ReservationError:
7695 raise errors.OpPrereqError("LV named %s used by another instance" %
7696 lv_name, errors.ECODE_NOTUNIQUE)
7698 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7699 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7701 node_lvs = self.rpc.call_lv_list([pnode.name],
7702 vg_names.payload.keys())[pnode.name]
7703 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7704 node_lvs = node_lvs.payload
7706 delta = all_lvs.difference(node_lvs.keys())
7708 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7709 utils.CommaJoin(delta),
7711 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7713 raise errors.OpPrereqError("Online logical volumes found, cannot"
7714 " adopt: %s" % utils.CommaJoin(online_lvs),
7716 # update the size of disk based on what is found
7717 for dsk in self.disks:
7718 dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7720 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7722 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7723 # check OS parameters (remotely)
7724 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7726 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7728 # memory check on primary node
7730 _CheckNodeFreeMemory(self, self.pnode.name,
7731 "creating instance %s" % self.op.instance_name,
7732 self.be_full[constants.BE_MEMORY],
7735 self.dry_run_result = list(nodenames)
7737 def Exec(self, feedback_fn):
7738 """Create and add the instance to the cluster.
7741 instance = self.op.instance_name
7742 pnode_name = self.pnode.name
7744 ht_kind = self.op.hypervisor
7745 if ht_kind in constants.HTS_REQ_PORT:
7746 network_port = self.cfg.AllocatePort()
7750 if constants.ENABLE_FILE_STORAGE:
7751 # this is needed because os.path.join does not accept None arguments
7752 if self.op.file_storage_dir is None:
7753 string_file_storage_dir = ""
7755 string_file_storage_dir = self.op.file_storage_dir
7757 # build the full file storage dir path
7758 file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7759 string_file_storage_dir, instance)
7761 file_storage_dir = ""
7763 disks = _GenerateDiskTemplate(self,
7764 self.op.disk_template,
7765 instance, pnode_name,
7769 self.op.file_driver,
7773 iobj = objects.Instance(name=instance, os=self.op.os_type,
7774 primary_node=pnode_name,
7775 nics=self.nics, disks=disks,
7776 disk_template=self.op.disk_template,
7778 network_port=network_port,
7779 beparams=self.op.beparams,
7780 hvparams=self.op.hvparams,
7781 hypervisor=self.op.hypervisor,
7782 osparams=self.op.osparams,
7785 if self.adopt_disks:
7786 # rename LVs to the newly-generated names; we need to construct
7787 # 'fake' LV disks with the old data, plus the new unique_id
7788 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7790 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7791 rename_to.append(t_dsk.logical_id)
7792 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7793 self.cfg.SetDiskID(t_dsk, pnode_name)
7794 result = self.rpc.call_blockdev_rename(pnode_name,
7795 zip(tmp_disks, rename_to))
7796 result.Raise("Failed to rename adoped LVs")
7798 feedback_fn("* creating instance disks...")
7800 _CreateDisks(self, iobj)
7801 except errors.OpExecError:
7802 self.LogWarning("Device creation failed, reverting...")
7804 _RemoveDisks(self, iobj)
7806 self.cfg.ReleaseDRBDMinors(instance)
7809 feedback_fn("adding instance %s to cluster config" % instance)
7811 self.cfg.AddInstance(iobj, self.proc.GetECId())
7813 # Declare that we don't want to remove the instance lock anymore, as we've
7814 # added the instance to the config
7815 del self.remove_locks[locking.LEVEL_INSTANCE]
7816 # Unlock all the nodes
7817 if self.op.mode == constants.INSTANCE_IMPORT:
7818 nodes_keep = [self.op.src_node]
7819 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7820 if node != self.op.src_node]
7821 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7822 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7824 self.context.glm.release(locking.LEVEL_NODE)
7825 del self.acquired_locks[locking.LEVEL_NODE]
7828 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
7829 feedback_fn("* wiping instance disks...")
7831 _WipeDisks(self, iobj)
7832 except errors.OpExecError, err:
7833 logging.exception("Wiping disks failed")
7834 self.LogWarning("Wiping instance disks failed (%s)", err)
7838 # Something is already wrong with the disks, don't do anything else
7840 elif self.op.wait_for_sync:
7841 disk_abort = not _WaitForSync(self, iobj)
7842 elif iobj.disk_template in constants.DTS_NET_MIRROR:
7843 # make sure the disks are not degraded (still sync-ing is ok)
7845 feedback_fn("* checking mirrors status")
7846 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7851 _RemoveDisks(self, iobj)
7852 self.cfg.RemoveInstance(iobj.name)
7853 # Make sure the instance lock gets removed
7854 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7855 raise errors.OpExecError("There are some degraded disks for"
7858 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7859 if self.op.mode == constants.INSTANCE_CREATE:
7860 if not self.op.no_install:
7861 feedback_fn("* running the instance OS create scripts...")
7862 # FIXME: pass debug option from opcode to backend
7863 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7864 self.op.debug_level)
7865 result.Raise("Could not add os for instance %s"
7866 " on node %s" % (instance, pnode_name))
7868 elif self.op.mode == constants.INSTANCE_IMPORT:
7869 feedback_fn("* running the instance OS import scripts...")
7873 for idx, image in enumerate(self.src_images):
7877 # FIXME: pass debug option from opcode to backend
7878 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7879 constants.IEIO_FILE, (image, ),
7880 constants.IEIO_SCRIPT,
7881 (iobj.disks[idx], idx),
7883 transfers.append(dt)
7886 masterd.instance.TransferInstanceData(self, feedback_fn,
7887 self.op.src_node, pnode_name,
7888 self.pnode.secondary_ip,
7890 if not compat.all(import_result):
7891 self.LogWarning("Some disks for instance %s on node %s were not"
7892 " imported successfully" % (instance, pnode_name))
7894 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7895 feedback_fn("* preparing remote import...")
7896 # The source cluster will stop the instance before attempting to make a
7897 # connection. In some cases stopping an instance can take a long time,
7898 # hence the shutdown timeout is added to the connection timeout.
7899 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
7900 self.op.source_shutdown_timeout)
7901 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7903 assert iobj.primary_node == self.pnode.name
7905 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
7906 self.source_x509_ca,
7907 self._cds, timeouts)
7908 if not compat.all(disk_results):
7909 # TODO: Should the instance still be started, even if some disks
7910 # failed to import (valid for local imports, too)?
7911 self.LogWarning("Some disks for instance %s on node %s were not"
7912 " imported successfully" % (instance, pnode_name))
7914 # Run rename script on newly imported instance
7915 assert iobj.name == instance
7916 feedback_fn("Running rename script for %s" % instance)
7917 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7918 self.source_instance_name,
7919 self.op.debug_level)
7921 self.LogWarning("Failed to run rename script for %s on node"
7922 " %s: %s" % (instance, pnode_name, result.fail_msg))
7925 # also checked in the prereq part
7926 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7930 iobj.admin_up = True
7931 self.cfg.Update(iobj, feedback_fn)
7932 logging.info("Starting instance %s on node %s", instance, pnode_name)
7933 feedback_fn("* starting instance...")
7934 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7935 result.Raise("Could not start instance")
7937 return list(iobj.all_nodes)
7940 class LUInstanceConsole(NoHooksLU):
7941 """Connect to an instance's console.
7943 This is somewhat special in that it returns the command line that
7944 you need to run on the master node in order to connect to the
7950 def ExpandNames(self):
7951 self._ExpandAndLockInstance()
7953 def CheckPrereq(self):
7954 """Check prerequisites.
7956 This checks that the instance is in the cluster.
7959 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7960 assert self.instance is not None, \
7961 "Cannot retrieve locked instance %s" % self.op.instance_name
7962 _CheckNodeOnline(self, self.instance.primary_node)
7964 def Exec(self, feedback_fn):
7965 """Connect to the console of an instance
7968 instance = self.instance
7969 node = instance.primary_node
7971 node_insts = self.rpc.call_instance_list([node],
7972 [instance.hypervisor])[node]
7973 node_insts.Raise("Can't get node information from %s" % node)
7975 if instance.name not in node_insts.payload:
7976 if instance.admin_up:
7977 state = "ERROR_down"
7979 state = "ADMIN_down"
7980 raise errors.OpExecError("Instance %s is not running (state %s)" %
7981 (instance.name, state))
7983 logging.debug("Connecting to console of %s on %s", instance.name, node)
7985 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
7988 def _GetInstanceConsole(cluster, instance):
7989 """Returns console information for an instance.
7991 @type cluster: L{objects.Cluster}
7992 @type instance: L{objects.Instance}
7996 hyper = hypervisor.GetHypervisor(instance.hypervisor)
7997 # beparams and hvparams are passed separately, to avoid editing the
7998 # instance and then saving the defaults in the instance itself.
7999 hvparams = cluster.FillHV(instance)
8000 beparams = cluster.FillBE(instance)
8001 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8003 assert console.instance == instance.name
8004 assert console.Validate()
8006 return console.ToDict()
8009 class LUInstanceReplaceDisks(LogicalUnit):
8010 """Replace the disks of an instance.
8013 HPATH = "mirrors-replace"
8014 HTYPE = constants.HTYPE_INSTANCE
8017 def CheckArguments(self):
8018 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8021 def ExpandNames(self):
8022 self._ExpandAndLockInstance()
8024 if self.op.iallocator is not None:
8025 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8027 elif self.op.remote_node is not None:
8028 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8029 self.op.remote_node = remote_node
8031 # Warning: do not remove the locking of the new secondary here
8032 # unless DRBD8.AddChildren is changed to work in parallel;
8033 # currently it doesn't since parallel invocations of
8034 # FindUnusedMinor will conflict
8035 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
8036 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8039 self.needed_locks[locking.LEVEL_NODE] = []
8040 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8042 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8043 self.op.iallocator, self.op.remote_node,
8044 self.op.disks, False, self.op.early_release)
8046 self.tasklets = [self.replacer]
8048 def DeclareLocks(self, level):
8049 # If we're not already locking all nodes in the set we have to declare the
8050 # instance's primary/secondary nodes.
8051 if (level == locking.LEVEL_NODE and
8052 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
8053 self._LockInstancesNodes()
8055 def BuildHooksEnv(self):
8058 This runs on the master, the primary and all the secondaries.
8061 instance = self.replacer.instance
8063 "MODE": self.op.mode,
8064 "NEW_SECONDARY": self.op.remote_node,
8065 "OLD_SECONDARY": instance.secondary_nodes[0],
8067 env.update(_BuildInstanceHookEnvByObject(self, instance))
8069 self.cfg.GetMasterNode(),
8070 instance.primary_node,
8072 if self.op.remote_node is not None:
8073 nl.append(self.op.remote_node)
8077 class TLReplaceDisks(Tasklet):
8078 """Replaces disks for an instance.
8080 Note: Locking is not within the scope of this class.
8083 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8084 disks, delay_iallocator, early_release):
8085 """Initializes this class.
8088 Tasklet.__init__(self, lu)
8091 self.instance_name = instance_name
8093 self.iallocator_name = iallocator_name
8094 self.remote_node = remote_node
8096 self.delay_iallocator = delay_iallocator
8097 self.early_release = early_release
8100 self.instance = None
8101 self.new_node = None
8102 self.target_node = None
8103 self.other_node = None
8104 self.remote_node_info = None
8105 self.node_secondary_ip = None
8108 def CheckArguments(mode, remote_node, iallocator):
8109 """Helper function for users of this class.
8112 # check for valid parameter combination
8113 if mode == constants.REPLACE_DISK_CHG:
8114 if remote_node is None and iallocator is None:
8115 raise errors.OpPrereqError("When changing the secondary either an"
8116 " iallocator script must be used or the"
8117 " new node given", errors.ECODE_INVAL)
8119 if remote_node is not None and iallocator is not None:
8120 raise errors.OpPrereqError("Give either the iallocator or the new"
8121 " secondary, not both", errors.ECODE_INVAL)
8123 elif remote_node is not None or iallocator is not None:
8124 # Not replacing the secondary
8125 raise errors.OpPrereqError("The iallocator and new node options can"
8126 " only be used when changing the"
8127 " secondary node", errors.ECODE_INVAL)
8130 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8131 """Compute a new secondary node using an IAllocator.
8134 ial = IAllocator(lu.cfg, lu.rpc,
8135 mode=constants.IALLOCATOR_MODE_RELOC,
8137 relocate_from=relocate_from)
8139 ial.Run(iallocator_name)
8142 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8143 " %s" % (iallocator_name, ial.info),
8146 if len(ial.result) != ial.required_nodes:
8147 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8148 " of nodes (%s), required %s" %
8150 len(ial.result), ial.required_nodes),
8153 remote_node_name = ial.result[0]
8155 lu.LogInfo("Selected new secondary for instance '%s': %s",
8156 instance_name, remote_node_name)
8158 return remote_node_name
8160 def _FindFaultyDisks(self, node_name):
8161 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8164 def CheckPrereq(self):
8165 """Check prerequisites.
8167 This checks that the instance is in the cluster.
8170 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8171 assert instance is not None, \
8172 "Cannot retrieve locked instance %s" % self.instance_name
8174 if instance.disk_template != constants.DT_DRBD8:
8175 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8176 " instances", errors.ECODE_INVAL)
8178 if len(instance.secondary_nodes) != 1:
8179 raise errors.OpPrereqError("The instance has a strange layout,"
8180 " expected one secondary but found %d" %
8181 len(instance.secondary_nodes),
8184 if not self.delay_iallocator:
8185 self._CheckPrereq2()
8187 def _CheckPrereq2(self):
8188 """Check prerequisites, second part.
8190 This function should always be part of CheckPrereq. It was separated and is
8191 now called from Exec because during node evacuation iallocator was only
8192 called with an unmodified cluster model, not taking planned changes into
8196 instance = self.instance
8197 secondary_node = instance.secondary_nodes[0]
8199 if self.iallocator_name is None:
8200 remote_node = self.remote_node
8202 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8203 instance.name, instance.secondary_nodes)
8205 if remote_node is not None:
8206 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8207 assert self.remote_node_info is not None, \
8208 "Cannot retrieve locked node %s" % remote_node
8210 self.remote_node_info = None
8212 if remote_node == self.instance.primary_node:
8213 raise errors.OpPrereqError("The specified node is the primary node of"
8214 " the instance.", errors.ECODE_INVAL)
8216 if remote_node == secondary_node:
8217 raise errors.OpPrereqError("The specified node is already the"
8218 " secondary node of the instance.",
8221 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8222 constants.REPLACE_DISK_CHG):
8223 raise errors.OpPrereqError("Cannot specify disks to be replaced",
8226 if self.mode == constants.REPLACE_DISK_AUTO:
8227 faulty_primary = self._FindFaultyDisks(instance.primary_node)
8228 faulty_secondary = self._FindFaultyDisks(secondary_node)
8230 if faulty_primary and faulty_secondary:
8231 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8232 " one node and can not be repaired"
8233 " automatically" % self.instance_name,
8237 self.disks = faulty_primary
8238 self.target_node = instance.primary_node
8239 self.other_node = secondary_node
8240 check_nodes = [self.target_node, self.other_node]
8241 elif faulty_secondary:
8242 self.disks = faulty_secondary
8243 self.target_node = secondary_node
8244 self.other_node = instance.primary_node
8245 check_nodes = [self.target_node, self.other_node]
8251 # Non-automatic modes
8252 if self.mode == constants.REPLACE_DISK_PRI:
8253 self.target_node = instance.primary_node
8254 self.other_node = secondary_node
8255 check_nodes = [self.target_node, self.other_node]
8257 elif self.mode == constants.REPLACE_DISK_SEC:
8258 self.target_node = secondary_node
8259 self.other_node = instance.primary_node
8260 check_nodes = [self.target_node, self.other_node]
8262 elif self.mode == constants.REPLACE_DISK_CHG:
8263 self.new_node = remote_node
8264 self.other_node = instance.primary_node
8265 self.target_node = secondary_node
8266 check_nodes = [self.new_node, self.other_node]
8268 _CheckNodeNotDrained(self.lu, remote_node)
8269 _CheckNodeVmCapable(self.lu, remote_node)
8271 old_node_info = self.cfg.GetNodeInfo(secondary_node)
8272 assert old_node_info is not None
8273 if old_node_info.offline and not self.early_release:
8274 # doesn't make sense to delay the release
8275 self.early_release = True
8276 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8277 " early-release mode", secondary_node)
8280 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8283 # If not specified all disks should be replaced
8285 self.disks = range(len(self.instance.disks))
8287 for node in check_nodes:
8288 _CheckNodeOnline(self.lu, node)
8290 touched_nodes = frozenset([self.new_node, self.other_node,
8293 if self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
8294 # Release unneeded node locks
8295 for name in self.lu.acquired_locks[locking.LEVEL_NODE]:
8296 if name not in touched_nodes:
8297 self._ReleaseNodeLock(name)
8299 # Check whether disks are valid
8300 for disk_idx in self.disks:
8301 instance.FindDisk(disk_idx)
8303 # Get secondary node IP addresses
8304 self.node_secondary_ip = \
8305 dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
8306 for node_name in touched_nodes
8307 if node_name is not None)
8309 def Exec(self, feedback_fn):
8310 """Execute disk replacement.
8312 This dispatches the disk replacement to the appropriate handler.
8315 if self.delay_iallocator:
8316 self._CheckPrereq2()
8318 if (self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET and
8320 # Verify owned locks before starting operation
8321 owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8322 assert set(owned_locks) == set(self.node_secondary_ip), \
8323 "Not owning the correct locks: %s" % (owned_locks, )
8326 feedback_fn("No disks need replacement")
8329 feedback_fn("Replacing disk(s) %s for %s" %
8330 (utils.CommaJoin(self.disks), self.instance.name))
8332 activate_disks = (not self.instance.admin_up)
8334 # Activate the instance disks if we're replacing them on a down instance
8336 _StartInstanceDisks(self.lu, self.instance, True)
8339 # Should we replace the secondary node?
8340 if self.new_node is not None:
8341 fn = self._ExecDrbd8Secondary
8343 fn = self._ExecDrbd8DiskOnly
8345 result = fn(feedback_fn)
8347 # Deactivate the instance disks if we're replacing them on a
8350 _SafeShutdownInstanceDisks(self.lu, self.instance)
8353 # Verify owned locks
8354 owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8355 assert ((self.early_release and not owned_locks) or
8356 (not self.early_release and
8357 set(owned_locks) == set(self.node_secondary_ip))), \
8358 ("Not owning the correct locks, early_release=%s, owned=%r" %
8359 (self.early_release, owned_locks))
8363 def _CheckVolumeGroup(self, nodes):
8364 self.lu.LogInfo("Checking volume groups")
8366 vgname = self.cfg.GetVGName()
8368 # Make sure volume group exists on all involved nodes
8369 results = self.rpc.call_vg_list(nodes)
8371 raise errors.OpExecError("Can't list volume groups on the nodes")
8375 res.Raise("Error checking node %s" % node)
8376 if vgname not in res.payload:
8377 raise errors.OpExecError("Volume group '%s' not found on node %s" %
8380 def _CheckDisksExistence(self, nodes):
8381 # Check disk existence
8382 for idx, dev in enumerate(self.instance.disks):
8383 if idx not in self.disks:
8387 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8388 self.cfg.SetDiskID(dev, node)
8390 result = self.rpc.call_blockdev_find(node, dev)
8392 msg = result.fail_msg
8393 if msg or not result.payload:
8395 msg = "disk not found"
8396 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8399 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8400 for idx, dev in enumerate(self.instance.disks):
8401 if idx not in self.disks:
8404 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8407 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8409 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8410 " replace disks for instance %s" %
8411 (node_name, self.instance.name))
8413 def _CreateNewStorage(self, node_name):
8416 for idx, dev in enumerate(self.instance.disks):
8417 if idx not in self.disks:
8420 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8422 self.cfg.SetDiskID(dev, node_name)
8424 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8425 names = _GenerateUniqueNames(self.lu, lv_names)
8427 vg_data = dev.children[0].logical_id[0]
8428 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8429 logical_id=(vg_data, names[0]))
8430 vg_meta = dev.children[1].logical_id[0]
8431 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8432 logical_id=(vg_meta, names[1]))
8434 new_lvs = [lv_data, lv_meta]
8435 old_lvs = dev.children
8436 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8438 # we pass force_create=True to force the LVM creation
8439 for new_lv in new_lvs:
8440 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8441 _GetInstanceInfoText(self.instance), False)
8445 def _CheckDevices(self, node_name, iv_names):
8446 for name, (dev, _, _) in iv_names.iteritems():
8447 self.cfg.SetDiskID(dev, node_name)
8449 result = self.rpc.call_blockdev_find(node_name, dev)
8451 msg = result.fail_msg
8452 if msg or not result.payload:
8454 msg = "disk not found"
8455 raise errors.OpExecError("Can't find DRBD device %s: %s" %
8458 if result.payload.is_degraded:
8459 raise errors.OpExecError("DRBD device %s is degraded!" % name)
8461 def _RemoveOldStorage(self, node_name, iv_names):
8462 for name, (_, old_lvs, _) in iv_names.iteritems():
8463 self.lu.LogInfo("Remove logical volumes for %s" % name)
8466 self.cfg.SetDiskID(lv, node_name)
8468 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8470 self.lu.LogWarning("Can't remove old LV: %s" % msg,
8471 hint="remove unused LVs manually")
8473 def _ReleaseNodeLock(self, node_name):
8474 """Releases the lock for a given node."""
8475 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8477 def _ExecDrbd8DiskOnly(self, feedback_fn):
8478 """Replace a disk on the primary or secondary for DRBD 8.
8480 The algorithm for replace is quite complicated:
8482 1. for each disk to be replaced:
8484 1. create new LVs on the target node with unique names
8485 1. detach old LVs from the drbd device
8486 1. rename old LVs to name_replaced.<time_t>
8487 1. rename new LVs to old LVs
8488 1. attach the new LVs (with the old names now) to the drbd device
8490 1. wait for sync across all devices
8492 1. for each modified disk:
8494 1. remove old LVs (which have the name name_replaces.<time_t>)
8496 Failures are not very well handled.
8501 # Step: check device activation
8502 self.lu.LogStep(1, steps_total, "Check device existence")
8503 self._CheckDisksExistence([self.other_node, self.target_node])
8504 self._CheckVolumeGroup([self.target_node, self.other_node])
8506 # Step: check other node consistency
8507 self.lu.LogStep(2, steps_total, "Check peer consistency")
8508 self._CheckDisksConsistency(self.other_node,
8509 self.other_node == self.instance.primary_node,
8512 # Step: create new storage
8513 self.lu.LogStep(3, steps_total, "Allocate new storage")
8514 iv_names = self._CreateNewStorage(self.target_node)
8516 # Step: for each lv, detach+rename*2+attach
8517 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8518 for dev, old_lvs, new_lvs in iv_names.itervalues():
8519 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8521 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8523 result.Raise("Can't detach drbd from local storage on node"
8524 " %s for device %s" % (self.target_node, dev.iv_name))
8526 #cfg.Update(instance)
8528 # ok, we created the new LVs, so now we know we have the needed
8529 # storage; as such, we proceed on the target node to rename
8530 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8531 # using the assumption that logical_id == physical_id (which in
8532 # turn is the unique_id on that node)
8534 # FIXME(iustin): use a better name for the replaced LVs
8535 temp_suffix = int(time.time())
8536 ren_fn = lambda d, suff: (d.physical_id[0],
8537 d.physical_id[1] + "_replaced-%s" % suff)
8539 # Build the rename list based on what LVs exist on the node
8540 rename_old_to_new = []
8541 for to_ren in old_lvs:
8542 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8543 if not result.fail_msg and result.payload:
8545 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8547 self.lu.LogInfo("Renaming the old LVs on the target node")
8548 result = self.rpc.call_blockdev_rename(self.target_node,
8550 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8552 # Now we rename the new LVs to the old LVs
8553 self.lu.LogInfo("Renaming the new LVs on the target node")
8554 rename_new_to_old = [(new, old.physical_id)
8555 for old, new in zip(old_lvs, new_lvs)]
8556 result = self.rpc.call_blockdev_rename(self.target_node,
8558 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8560 for old, new in zip(old_lvs, new_lvs):
8561 new.logical_id = old.logical_id
8562 self.cfg.SetDiskID(new, self.target_node)
8564 for disk in old_lvs:
8565 disk.logical_id = ren_fn(disk, temp_suffix)
8566 self.cfg.SetDiskID(disk, self.target_node)
8568 # Now that the new lvs have the old name, we can add them to the device
8569 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8570 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8572 msg = result.fail_msg
8574 for new_lv in new_lvs:
8575 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8578 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8579 hint=("cleanup manually the unused logical"
8581 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8583 dev.children = new_lvs
8585 self.cfg.Update(self.instance, feedback_fn)
8588 if self.early_release:
8589 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8591 self._RemoveOldStorage(self.target_node, iv_names)
8592 # WARNING: we release both node locks here, do not do other RPCs
8593 # than WaitForSync to the primary node
8594 self._ReleaseNodeLock([self.target_node, self.other_node])
8597 # This can fail as the old devices are degraded and _WaitForSync
8598 # does a combined result over all disks, so we don't check its return value
8599 self.lu.LogStep(cstep, steps_total, "Sync devices")
8601 _WaitForSync(self.lu, self.instance)
8603 # Check all devices manually
8604 self._CheckDevices(self.instance.primary_node, iv_names)
8606 # Step: remove old storage
8607 if not self.early_release:
8608 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8610 self._RemoveOldStorage(self.target_node, iv_names)
8612 def _ExecDrbd8Secondary(self, feedback_fn):
8613 """Replace the secondary node for DRBD 8.
8615 The algorithm for replace is quite complicated:
8616 - for all disks of the instance:
8617 - create new LVs on the new node with same names
8618 - shutdown the drbd device on the old secondary
8619 - disconnect the drbd network on the primary
8620 - create the drbd device on the new secondary
8621 - network attach the drbd on the primary, using an artifice:
8622 the drbd code for Attach() will connect to the network if it
8623 finds a device which is connected to the good local disks but
8625 - wait for sync across all devices
8626 - remove all disks from the old secondary
8628 Failures are not very well handled.
8633 # Step: check device activation
8634 self.lu.LogStep(1, steps_total, "Check device existence")
8635 self._CheckDisksExistence([self.instance.primary_node])
8636 self._CheckVolumeGroup([self.instance.primary_node])
8638 # Step: check other node consistency
8639 self.lu.LogStep(2, steps_total, "Check peer consistency")
8640 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8642 # Step: create new storage
8643 self.lu.LogStep(3, steps_total, "Allocate new storage")
8644 for idx, dev in enumerate(self.instance.disks):
8645 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8646 (self.new_node, idx))
8647 # we pass force_create=True to force LVM creation
8648 for new_lv in dev.children:
8649 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8650 _GetInstanceInfoText(self.instance), False)
8652 # Step 4: dbrd minors and drbd setups changes
8653 # after this, we must manually remove the drbd minors on both the
8654 # error and the success paths
8655 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8656 minors = self.cfg.AllocateDRBDMinor([self.new_node
8657 for dev in self.instance.disks],
8659 logging.debug("Allocated minors %r", minors)
8662 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8663 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8664 (self.new_node, idx))
8665 # create new devices on new_node; note that we create two IDs:
8666 # one without port, so the drbd will be activated without
8667 # networking information on the new node at this stage, and one
8668 # with network, for the latter activation in step 4
8669 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8670 if self.instance.primary_node == o_node1:
8673 assert self.instance.primary_node == o_node2, "Three-node instance?"
8676 new_alone_id = (self.instance.primary_node, self.new_node, None,
8677 p_minor, new_minor, o_secret)
8678 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8679 p_minor, new_minor, o_secret)
8681 iv_names[idx] = (dev, dev.children, new_net_id)
8682 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8684 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8685 logical_id=new_alone_id,
8686 children=dev.children,
8689 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8690 _GetInstanceInfoText(self.instance), False)
8691 except errors.GenericError:
8692 self.cfg.ReleaseDRBDMinors(self.instance.name)
8695 # We have new devices, shutdown the drbd on the old secondary
8696 for idx, dev in enumerate(self.instance.disks):
8697 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8698 self.cfg.SetDiskID(dev, self.target_node)
8699 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8701 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8702 "node: %s" % (idx, msg),
8703 hint=("Please cleanup this device manually as"
8704 " soon as possible"))
8706 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8707 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8708 self.node_secondary_ip,
8709 self.instance.disks)\
8710 [self.instance.primary_node]
8712 msg = result.fail_msg
8714 # detaches didn't succeed (unlikely)
8715 self.cfg.ReleaseDRBDMinors(self.instance.name)
8716 raise errors.OpExecError("Can't detach the disks from the network on"
8717 " old node: %s" % (msg,))
8719 # if we managed to detach at least one, we update all the disks of
8720 # the instance to point to the new secondary
8721 self.lu.LogInfo("Updating instance configuration")
8722 for dev, _, new_logical_id in iv_names.itervalues():
8723 dev.logical_id = new_logical_id
8724 self.cfg.SetDiskID(dev, self.instance.primary_node)
8726 self.cfg.Update(self.instance, feedback_fn)
8728 # and now perform the drbd attach
8729 self.lu.LogInfo("Attaching primary drbds to new secondary"
8730 " (standalone => connected)")
8731 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8733 self.node_secondary_ip,
8734 self.instance.disks,
8737 for to_node, to_result in result.items():
8738 msg = to_result.fail_msg
8740 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8742 hint=("please do a gnt-instance info to see the"
8743 " status of disks"))
8745 if self.early_release:
8746 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8748 self._RemoveOldStorage(self.target_node, iv_names)
8749 # WARNING: we release all node locks here, do not do other RPCs
8750 # than WaitForSync to the primary node
8751 self._ReleaseNodeLock([self.instance.primary_node,
8756 # This can fail as the old devices are degraded and _WaitForSync
8757 # does a combined result over all disks, so we don't check its return value
8758 self.lu.LogStep(cstep, steps_total, "Sync devices")
8760 _WaitForSync(self.lu, self.instance)
8762 # Check all devices manually
8763 self._CheckDevices(self.instance.primary_node, iv_names)
8765 # Step: remove old storage
8766 if not self.early_release:
8767 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8768 self._RemoveOldStorage(self.target_node, iv_names)
8771 class LURepairNodeStorage(NoHooksLU):
8772 """Repairs the volume group on a node.
8777 def CheckArguments(self):
8778 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8780 storage_type = self.op.storage_type
8782 if (constants.SO_FIX_CONSISTENCY not in
8783 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8784 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8785 " repaired" % storage_type,
8788 def ExpandNames(self):
8789 self.needed_locks = {
8790 locking.LEVEL_NODE: [self.op.node_name],
8793 def _CheckFaultyDisks(self, instance, node_name):
8794 """Ensure faulty disks abort the opcode or at least warn."""
8796 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8798 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8799 " node '%s'" % (instance.name, node_name),
8801 except errors.OpPrereqError, err:
8802 if self.op.ignore_consistency:
8803 self.proc.LogWarning(str(err.args[0]))
8807 def CheckPrereq(self):
8808 """Check prerequisites.
8811 # Check whether any instance on this node has faulty disks
8812 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8813 if not inst.admin_up:
8815 check_nodes = set(inst.all_nodes)
8816 check_nodes.discard(self.op.node_name)
8817 for inst_node_name in check_nodes:
8818 self._CheckFaultyDisks(inst, inst_node_name)
8820 def Exec(self, feedback_fn):
8821 feedback_fn("Repairing storage unit '%s' on %s ..." %
8822 (self.op.name, self.op.node_name))
8824 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8825 result = self.rpc.call_storage_execute(self.op.node_name,
8826 self.op.storage_type, st_args,
8828 constants.SO_FIX_CONSISTENCY)
8829 result.Raise("Failed to repair storage unit '%s' on %s" %
8830 (self.op.name, self.op.node_name))
8833 class LUNodeEvacStrategy(NoHooksLU):
8834 """Computes the node evacuation strategy.
8839 def CheckArguments(self):
8840 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8842 def ExpandNames(self):
8843 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8844 self.needed_locks = locks = {}
8845 if self.op.remote_node is None:
8846 locks[locking.LEVEL_NODE] = locking.ALL_SET
8848 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8849 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8851 def Exec(self, feedback_fn):
8853 for node in self.op.nodes:
8854 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8858 if self.op.remote_node is not None:
8861 if i.primary_node == self.op.remote_node:
8862 raise errors.OpPrereqError("Node %s is the primary node of"
8863 " instance %s, cannot use it as"
8865 (self.op.remote_node, i.name),
8867 result.append([i.name, self.op.remote_node])
8869 ial = IAllocator(self.cfg, self.rpc,
8870 mode=constants.IALLOCATOR_MODE_MEVAC,
8871 evac_nodes=self.op.nodes)
8872 ial.Run(self.op.iallocator, validate=True)
8874 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8880 class LUInstanceGrowDisk(LogicalUnit):
8881 """Grow a disk of an instance.
8885 HTYPE = constants.HTYPE_INSTANCE
8888 def ExpandNames(self):
8889 self._ExpandAndLockInstance()
8890 self.needed_locks[locking.LEVEL_NODE] = []
8891 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8893 def DeclareLocks(self, level):
8894 if level == locking.LEVEL_NODE:
8895 self._LockInstancesNodes()
8897 def BuildHooksEnv(self):
8900 This runs on the master, the primary and all the secondaries.
8904 "DISK": self.op.disk,
8905 "AMOUNT": self.op.amount,
8907 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8908 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8911 def CheckPrereq(self):
8912 """Check prerequisites.
8914 This checks that the instance is in the cluster.
8917 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8918 assert instance is not None, \
8919 "Cannot retrieve locked instance %s" % self.op.instance_name
8920 nodenames = list(instance.all_nodes)
8921 for node in nodenames:
8922 _CheckNodeOnline(self, node)
8924 self.instance = instance
8926 if instance.disk_template not in constants.DTS_GROWABLE:
8927 raise errors.OpPrereqError("Instance's disk layout does not support"
8928 " growing.", errors.ECODE_INVAL)
8930 self.disk = instance.FindDisk(self.op.disk)
8932 if instance.disk_template != constants.DT_FILE:
8933 # TODO: check the free disk space for file, when that feature
8935 _CheckNodesFreeDiskPerVG(self, nodenames,
8936 self.disk.ComputeGrowth(self.op.amount))
8938 def Exec(self, feedback_fn):
8939 """Execute disk grow.
8942 instance = self.instance
8945 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8947 raise errors.OpExecError("Cannot activate block device to grow")
8949 for node in instance.all_nodes:
8950 self.cfg.SetDiskID(disk, node)
8951 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8952 result.Raise("Grow request failed to node %s" % node)
8954 # TODO: Rewrite code to work properly
8955 # DRBD goes into sync mode for a short amount of time after executing the
8956 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8957 # calling "resize" in sync mode fails. Sleeping for a short amount of
8958 # time is a work-around.
8961 disk.RecordGrow(self.op.amount)
8962 self.cfg.Update(instance, feedback_fn)
8963 if self.op.wait_for_sync:
8964 disk_abort = not _WaitForSync(self, instance, disks=[disk])
8966 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8967 " status.\nPlease check the instance.")
8968 if not instance.admin_up:
8969 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8970 elif not instance.admin_up:
8971 self.proc.LogWarning("Not shutting down the disk even if the instance is"
8972 " not supposed to be running because no wait for"
8973 " sync mode was requested.")
8976 class LUInstanceQueryData(NoHooksLU):
8977 """Query runtime instance data.
8982 def ExpandNames(self):
8983 self.needed_locks = {}
8985 # Use locking if requested or when non-static information is wanted
8986 if not (self.op.static or self.op.use_locking):
8987 self.LogWarning("Non-static data requested, locks need to be acquired")
8988 self.op.use_locking = True
8990 if self.op.instances or not self.op.use_locking:
8991 # Expand instance names right here
8992 self.wanted_names = _GetWantedInstances(self, self.op.instances)
8994 # Will use acquired locks
8995 self.wanted_names = None
8997 if self.op.use_locking:
8998 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9000 if self.wanted_names is None:
9001 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9003 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9005 self.needed_locks[locking.LEVEL_NODE] = []
9006 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9007 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9009 def DeclareLocks(self, level):
9010 if self.op.use_locking and level == locking.LEVEL_NODE:
9011 self._LockInstancesNodes()
9013 def CheckPrereq(self):
9014 """Check prerequisites.
9016 This only checks the optional instance list against the existing names.
9019 if self.wanted_names is None:
9020 assert self.op.use_locking, "Locking was not used"
9021 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
9023 self.wanted_instances = [self.cfg.GetInstanceInfo(name)
9024 for name in self.wanted_names]
9026 def _ComputeBlockdevStatus(self, node, instance_name, dev):
9027 """Returns the status of a block device
9030 if self.op.static or not node:
9033 self.cfg.SetDiskID(dev, node)
9035 result = self.rpc.call_blockdev_find(node, dev)
9039 result.Raise("Can't compute disk status for %s" % instance_name)
9041 status = result.payload
9045 return (status.dev_path, status.major, status.minor,
9046 status.sync_percent, status.estimated_time,
9047 status.is_degraded, status.ldisk_status)
9049 def _ComputeDiskStatus(self, instance, snode, dev):
9050 """Compute block device status.
9053 if dev.dev_type in constants.LDS_DRBD:
9054 # we change the snode then (otherwise we use the one passed in)
9055 if dev.logical_id[0] == instance.primary_node:
9056 snode = dev.logical_id[1]
9058 snode = dev.logical_id[0]
9060 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9062 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9065 dev_children = [self._ComputeDiskStatus(instance, snode, child)
9066 for child in dev.children]
9071 "iv_name": dev.iv_name,
9072 "dev_type": dev.dev_type,
9073 "logical_id": dev.logical_id,
9074 "physical_id": dev.physical_id,
9075 "pstatus": dev_pstatus,
9076 "sstatus": dev_sstatus,
9077 "children": dev_children,
9082 def Exec(self, feedback_fn):
9083 """Gather and return data"""
9086 cluster = self.cfg.GetClusterInfo()
9088 for instance in self.wanted_instances:
9089 if not self.op.static:
9090 remote_info = self.rpc.call_instance_info(instance.primary_node,
9092 instance.hypervisor)
9093 remote_info.Raise("Error checking node %s" % instance.primary_node)
9094 remote_info = remote_info.payload
9095 if remote_info and "state" in remote_info:
9098 remote_state = "down"
9101 if instance.admin_up:
9104 config_state = "down"
9106 disks = [self._ComputeDiskStatus(instance, None, device)
9107 for device in instance.disks]
9109 result[instance.name] = {
9110 "name": instance.name,
9111 "config_state": config_state,
9112 "run_state": remote_state,
9113 "pnode": instance.primary_node,
9114 "snodes": instance.secondary_nodes,
9116 # this happens to be the same format used for hooks
9117 "nics": _NICListToTuple(self, instance.nics),
9118 "disk_template": instance.disk_template,
9120 "hypervisor": instance.hypervisor,
9121 "network_port": instance.network_port,
9122 "hv_instance": instance.hvparams,
9123 "hv_actual": cluster.FillHV(instance, skip_globals=True),
9124 "be_instance": instance.beparams,
9125 "be_actual": cluster.FillBE(instance),
9126 "os_instance": instance.osparams,
9127 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9128 "serial_no": instance.serial_no,
9129 "mtime": instance.mtime,
9130 "ctime": instance.ctime,
9131 "uuid": instance.uuid,
9137 class LUInstanceSetParams(LogicalUnit):
9138 """Modifies an instances's parameters.
9141 HPATH = "instance-modify"
9142 HTYPE = constants.HTYPE_INSTANCE
9145 def CheckArguments(self):
9146 if not (self.op.nics or self.op.disks or self.op.disk_template or
9147 self.op.hvparams or self.op.beparams or self.op.os_name):
9148 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9150 if self.op.hvparams:
9151 _CheckGlobalHvParams(self.op.hvparams)
9155 for disk_op, disk_dict in self.op.disks:
9156 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9157 if disk_op == constants.DDM_REMOVE:
9160 elif disk_op == constants.DDM_ADD:
9163 if not isinstance(disk_op, int):
9164 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9165 if not isinstance(disk_dict, dict):
9166 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9167 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9169 if disk_op == constants.DDM_ADD:
9170 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9171 if mode not in constants.DISK_ACCESS_SET:
9172 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9174 size = disk_dict.get('size', None)
9176 raise errors.OpPrereqError("Required disk parameter size missing",
9180 except (TypeError, ValueError), err:
9181 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9182 str(err), errors.ECODE_INVAL)
9183 disk_dict['size'] = size
9185 # modification of disk
9186 if 'size' in disk_dict:
9187 raise errors.OpPrereqError("Disk size change not possible, use"
9188 " grow-disk", errors.ECODE_INVAL)
9190 if disk_addremove > 1:
9191 raise errors.OpPrereqError("Only one disk add or remove operation"
9192 " supported at a time", errors.ECODE_INVAL)
9194 if self.op.disks and self.op.disk_template is not None:
9195 raise errors.OpPrereqError("Disk template conversion and other disk"
9196 " changes not supported at the same time",
9199 if (self.op.disk_template and
9200 self.op.disk_template in constants.DTS_NET_MIRROR and
9201 self.op.remote_node is None):
9202 raise errors.OpPrereqError("Changing the disk template to a mirrored"
9203 " one requires specifying a secondary node",
9208 for nic_op, nic_dict in self.op.nics:
9209 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9210 if nic_op == constants.DDM_REMOVE:
9213 elif nic_op == constants.DDM_ADD:
9216 if not isinstance(nic_op, int):
9217 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9218 if not isinstance(nic_dict, dict):
9219 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9220 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9222 # nic_dict should be a dict
9223 nic_ip = nic_dict.get('ip', None)
9224 if nic_ip is not None:
9225 if nic_ip.lower() == constants.VALUE_NONE:
9226 nic_dict['ip'] = None
9228 if not netutils.IPAddress.IsValid(nic_ip):
9229 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9232 nic_bridge = nic_dict.get('bridge', None)
9233 nic_link = nic_dict.get('link', None)
9234 if nic_bridge and nic_link:
9235 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9236 " at the same time", errors.ECODE_INVAL)
9237 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9238 nic_dict['bridge'] = None
9239 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9240 nic_dict['link'] = None
9242 if nic_op == constants.DDM_ADD:
9243 nic_mac = nic_dict.get('mac', None)
9245 nic_dict['mac'] = constants.VALUE_AUTO
9247 if 'mac' in nic_dict:
9248 nic_mac = nic_dict['mac']
9249 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9250 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9252 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9253 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9254 " modifying an existing nic",
9257 if nic_addremove > 1:
9258 raise errors.OpPrereqError("Only one NIC add or remove operation"
9259 " supported at a time", errors.ECODE_INVAL)
9261 def ExpandNames(self):
9262 self._ExpandAndLockInstance()
9263 self.needed_locks[locking.LEVEL_NODE] = []
9264 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9266 def DeclareLocks(self, level):
9267 if level == locking.LEVEL_NODE:
9268 self._LockInstancesNodes()
9269 if self.op.disk_template and self.op.remote_node:
9270 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9271 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9273 def BuildHooksEnv(self):
9276 This runs on the master, primary and secondaries.
9280 if constants.BE_MEMORY in self.be_new:
9281 args['memory'] = self.be_new[constants.BE_MEMORY]
9282 if constants.BE_VCPUS in self.be_new:
9283 args['vcpus'] = self.be_new[constants.BE_VCPUS]
9284 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9285 # information at all.
9288 nic_override = dict(self.op.nics)
9289 for idx, nic in enumerate(self.instance.nics):
9290 if idx in nic_override:
9291 this_nic_override = nic_override[idx]
9293 this_nic_override = {}
9294 if 'ip' in this_nic_override:
9295 ip = this_nic_override['ip']
9298 if 'mac' in this_nic_override:
9299 mac = this_nic_override['mac']
9302 if idx in self.nic_pnew:
9303 nicparams = self.nic_pnew[idx]
9305 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9306 mode = nicparams[constants.NIC_MODE]
9307 link = nicparams[constants.NIC_LINK]
9308 args['nics'].append((ip, mac, mode, link))
9309 if constants.DDM_ADD in nic_override:
9310 ip = nic_override[constants.DDM_ADD].get('ip', None)
9311 mac = nic_override[constants.DDM_ADD]['mac']
9312 nicparams = self.nic_pnew[constants.DDM_ADD]
9313 mode = nicparams[constants.NIC_MODE]
9314 link = nicparams[constants.NIC_LINK]
9315 args['nics'].append((ip, mac, mode, link))
9316 elif constants.DDM_REMOVE in nic_override:
9317 del args['nics'][-1]
9319 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9320 if self.op.disk_template:
9321 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9322 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9325 def CheckPrereq(self):
9326 """Check prerequisites.
9328 This only checks the instance list against the existing names.
9331 # checking the new params on the primary/secondary nodes
9333 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9334 cluster = self.cluster = self.cfg.GetClusterInfo()
9335 assert self.instance is not None, \
9336 "Cannot retrieve locked instance %s" % self.op.instance_name
9337 pnode = instance.primary_node
9338 nodelist = list(instance.all_nodes)
9341 if self.op.os_name and not self.op.force:
9342 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9343 self.op.force_variant)
9344 instance_os = self.op.os_name
9346 instance_os = instance.os
9348 if self.op.disk_template:
9349 if instance.disk_template == self.op.disk_template:
9350 raise errors.OpPrereqError("Instance already has disk template %s" %
9351 instance.disk_template, errors.ECODE_INVAL)
9353 if (instance.disk_template,
9354 self.op.disk_template) not in self._DISK_CONVERSIONS:
9355 raise errors.OpPrereqError("Unsupported disk template conversion from"
9356 " %s to %s" % (instance.disk_template,
9357 self.op.disk_template),
9359 _CheckInstanceDown(self, instance, "cannot change disk template")
9360 if self.op.disk_template in constants.DTS_NET_MIRROR:
9361 if self.op.remote_node == pnode:
9362 raise errors.OpPrereqError("Given new secondary node %s is the same"
9363 " as the primary node of the instance" %
9364 self.op.remote_node, errors.ECODE_STATE)
9365 _CheckNodeOnline(self, self.op.remote_node)
9366 _CheckNodeNotDrained(self, self.op.remote_node)
9367 # FIXME: here we assume that the old instance type is DT_PLAIN
9368 assert instance.disk_template == constants.DT_PLAIN
9369 disks = [{"size": d.size, "vg": d.logical_id[0]}
9370 for d in instance.disks]
9371 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9372 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9374 # hvparams processing
9375 if self.op.hvparams:
9376 hv_type = instance.hypervisor
9377 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9378 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9379 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9382 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9383 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9384 self.hv_new = hv_new # the new actual values
9385 self.hv_inst = i_hvdict # the new dict (without defaults)
9387 self.hv_new = self.hv_inst = {}
9389 # beparams processing
9390 if self.op.beparams:
9391 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9393 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9394 be_new = cluster.SimpleFillBE(i_bedict)
9395 self.be_new = be_new # the new actual values
9396 self.be_inst = i_bedict # the new dict (without defaults)
9398 self.be_new = self.be_inst = {}
9399 be_old = cluster.FillBE(instance)
9401 # osparams processing
9402 if self.op.osparams:
9403 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9404 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9405 self.os_inst = i_osdict # the new dict (without defaults)
9411 if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
9412 be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
9413 mem_check_list = [pnode]
9414 if be_new[constants.BE_AUTO_BALANCE]:
9415 # either we changed auto_balance to yes or it was from before
9416 mem_check_list.extend(instance.secondary_nodes)
9417 instance_info = self.rpc.call_instance_info(pnode, instance.name,
9418 instance.hypervisor)
9419 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9420 instance.hypervisor)
9421 pninfo = nodeinfo[pnode]
9422 msg = pninfo.fail_msg
9424 # Assume the primary node is unreachable and go ahead
9425 self.warn.append("Can't get info from primary node %s: %s" %
9427 elif not isinstance(pninfo.payload.get('memory_free', None), int):
9428 self.warn.append("Node data from primary node %s doesn't contain"
9429 " free memory information" % pnode)
9430 elif instance_info.fail_msg:
9431 self.warn.append("Can't get instance runtime information: %s" %
9432 instance_info.fail_msg)
9434 if instance_info.payload:
9435 current_mem = int(instance_info.payload['memory'])
9437 # Assume instance not running
9438 # (there is a slight race condition here, but it's not very probable,
9439 # and we have no other way to check)
9441 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9442 pninfo.payload['memory_free'])
9444 raise errors.OpPrereqError("This change will prevent the instance"
9445 " from starting, due to %d MB of memory"
9446 " missing on its primary node" % miss_mem,
9449 if be_new[constants.BE_AUTO_BALANCE]:
9450 for node, nres in nodeinfo.items():
9451 if node not in instance.secondary_nodes:
9453 nres.Raise("Can't get info from secondary node %s" % node,
9454 prereq=True, ecode=errors.ECODE_STATE)
9455 if not isinstance(nres.payload.get('memory_free', None), int):
9456 raise errors.OpPrereqError("Secondary node %s didn't return free"
9457 " memory information" % node,
9459 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9460 raise errors.OpPrereqError("This change will prevent the instance"
9461 " from failover to its secondary node"
9462 " %s, due to not enough memory" % node,
9468 for nic_op, nic_dict in self.op.nics:
9469 if nic_op == constants.DDM_REMOVE:
9470 if not instance.nics:
9471 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9474 if nic_op != constants.DDM_ADD:
9476 if not instance.nics:
9477 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9478 " no NICs" % nic_op,
9480 if nic_op < 0 or nic_op >= len(instance.nics):
9481 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9483 (nic_op, len(instance.nics) - 1),
9485 old_nic_params = instance.nics[nic_op].nicparams
9486 old_nic_ip = instance.nics[nic_op].ip
9491 update_params_dict = dict([(key, nic_dict[key])
9492 for key in constants.NICS_PARAMETERS
9493 if key in nic_dict])
9495 if 'bridge' in nic_dict:
9496 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9498 new_nic_params = _GetUpdatedParams(old_nic_params,
9500 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9501 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9502 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9503 self.nic_pinst[nic_op] = new_nic_params
9504 self.nic_pnew[nic_op] = new_filled_nic_params
9505 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9507 if new_nic_mode == constants.NIC_MODE_BRIDGED:
9508 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9509 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9511 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9513 self.warn.append(msg)
9515 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9516 if new_nic_mode == constants.NIC_MODE_ROUTED:
9517 if 'ip' in nic_dict:
9518 nic_ip = nic_dict['ip']
9522 raise errors.OpPrereqError('Cannot set the nic ip to None'
9523 ' on a routed nic', errors.ECODE_INVAL)
9524 if 'mac' in nic_dict:
9525 nic_mac = nic_dict['mac']
9527 raise errors.OpPrereqError('Cannot set the nic mac to None',
9529 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9530 # otherwise generate the mac
9531 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9533 # or validate/reserve the current one
9535 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9536 except errors.ReservationError:
9537 raise errors.OpPrereqError("MAC address %s already in use"
9538 " in cluster" % nic_mac,
9539 errors.ECODE_NOTUNIQUE)
9542 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9543 raise errors.OpPrereqError("Disk operations not supported for"
9544 " diskless instances",
9546 for disk_op, _ in self.op.disks:
9547 if disk_op == constants.DDM_REMOVE:
9548 if len(instance.disks) == 1:
9549 raise errors.OpPrereqError("Cannot remove the last disk of"
9550 " an instance", errors.ECODE_INVAL)
9551 _CheckInstanceDown(self, instance, "cannot remove disks")
9553 if (disk_op == constants.DDM_ADD and
9554 len(instance.disks) >= constants.MAX_DISKS):
9555 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9556 " add more" % constants.MAX_DISKS,
9558 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9560 if disk_op < 0 or disk_op >= len(instance.disks):
9561 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9563 (disk_op, len(instance.disks)),
9568 def _ConvertPlainToDrbd(self, feedback_fn):
9569 """Converts an instance from plain to drbd.
9572 feedback_fn("Converting template to drbd")
9573 instance = self.instance
9574 pnode = instance.primary_node
9575 snode = self.op.remote_node
9577 # create a fake disk info for _GenerateDiskTemplate
9578 disk_info = [{"size": d.size, "mode": d.mode,
9579 "vg": d.logical_id[0]} for d in instance.disks]
9580 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9581 instance.name, pnode, [snode],
9582 disk_info, None, None, 0, feedback_fn)
9583 info = _GetInstanceInfoText(instance)
9584 feedback_fn("Creating aditional volumes...")
9585 # first, create the missing data and meta devices
9586 for disk in new_disks:
9587 # unfortunately this is... not too nice
9588 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9590 for child in disk.children:
9591 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9592 # at this stage, all new LVs have been created, we can rename the
9594 feedback_fn("Renaming original volumes...")
9595 rename_list = [(o, n.children[0].logical_id)
9596 for (o, n) in zip(instance.disks, new_disks)]
9597 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9598 result.Raise("Failed to rename original LVs")
9600 feedback_fn("Initializing DRBD devices...")
9601 # all child devices are in place, we can now create the DRBD devices
9602 for disk in new_disks:
9603 for node in [pnode, snode]:
9604 f_create = node == pnode
9605 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9607 # at this point, the instance has been modified
9608 instance.disk_template = constants.DT_DRBD8
9609 instance.disks = new_disks
9610 self.cfg.Update(instance, feedback_fn)
9612 # disks are created, waiting for sync
9613 disk_abort = not _WaitForSync(self, instance,
9614 oneshot=not self.op.wait_for_sync)
9616 raise errors.OpExecError("There are some degraded disks for"
9617 " this instance, please cleanup manually")
9619 def _ConvertDrbdToPlain(self, feedback_fn):
9620 """Converts an instance from drbd to plain.
9623 instance = self.instance
9624 assert len(instance.secondary_nodes) == 1
9625 pnode = instance.primary_node
9626 snode = instance.secondary_nodes[0]
9627 feedback_fn("Converting template to plain")
9629 old_disks = instance.disks
9630 new_disks = [d.children[0] for d in old_disks]
9632 # copy over size and mode
9633 for parent, child in zip(old_disks, new_disks):
9634 child.size = parent.size
9635 child.mode = parent.mode
9637 # update instance structure
9638 instance.disks = new_disks
9639 instance.disk_template = constants.DT_PLAIN
9640 self.cfg.Update(instance, feedback_fn)
9642 feedback_fn("Removing volumes on the secondary node...")
9643 for disk in old_disks:
9644 self.cfg.SetDiskID(disk, snode)
9645 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9647 self.LogWarning("Could not remove block device %s on node %s,"
9648 " continuing anyway: %s", disk.iv_name, snode, msg)
9650 feedback_fn("Removing unneeded volumes on the primary node...")
9651 for idx, disk in enumerate(old_disks):
9652 meta = disk.children[1]
9653 self.cfg.SetDiskID(meta, pnode)
9654 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9656 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9657 " continuing anyway: %s", idx, pnode, msg)
9659 def Exec(self, feedback_fn):
9660 """Modifies an instance.
9662 All parameters take effect only at the next restart of the instance.
9665 # Process here the warnings from CheckPrereq, as we don't have a
9666 # feedback_fn there.
9667 for warn in self.warn:
9668 feedback_fn("WARNING: %s" % warn)
9671 instance = self.instance
9673 for disk_op, disk_dict in self.op.disks:
9674 if disk_op == constants.DDM_REMOVE:
9675 # remove the last disk
9676 device = instance.disks.pop()
9677 device_idx = len(instance.disks)
9678 for node, disk in device.ComputeNodeTree(instance.primary_node):
9679 self.cfg.SetDiskID(disk, node)
9680 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9682 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9683 " continuing anyway", device_idx, node, msg)
9684 result.append(("disk/%d" % device_idx, "remove"))
9685 elif disk_op == constants.DDM_ADD:
9687 if instance.disk_template == constants.DT_FILE:
9688 file_driver, file_path = instance.disks[0].logical_id
9689 file_path = os.path.dirname(file_path)
9691 file_driver = file_path = None
9692 disk_idx_base = len(instance.disks)
9693 new_disk = _GenerateDiskTemplate(self,
9694 instance.disk_template,
9695 instance.name, instance.primary_node,
9696 instance.secondary_nodes,
9700 disk_idx_base, feedback_fn)[0]
9701 instance.disks.append(new_disk)
9702 info = _GetInstanceInfoText(instance)
9704 logging.info("Creating volume %s for instance %s",
9705 new_disk.iv_name, instance.name)
9706 # Note: this needs to be kept in sync with _CreateDisks
9708 for node in instance.all_nodes:
9709 f_create = node == instance.primary_node
9711 _CreateBlockDev(self, node, instance, new_disk,
9712 f_create, info, f_create)
9713 except errors.OpExecError, err:
9714 self.LogWarning("Failed to create volume %s (%s) on"
9716 new_disk.iv_name, new_disk, node, err)
9717 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9718 (new_disk.size, new_disk.mode)))
9720 # change a given disk
9721 instance.disks[disk_op].mode = disk_dict['mode']
9722 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9724 if self.op.disk_template:
9725 r_shut = _ShutdownInstanceDisks(self, instance)
9727 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9728 " proceed with disk template conversion")
9729 mode = (instance.disk_template, self.op.disk_template)
9731 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9733 self.cfg.ReleaseDRBDMinors(instance.name)
9735 result.append(("disk_template", self.op.disk_template))
9738 for nic_op, nic_dict in self.op.nics:
9739 if nic_op == constants.DDM_REMOVE:
9740 # remove the last nic
9741 del instance.nics[-1]
9742 result.append(("nic.%d" % len(instance.nics), "remove"))
9743 elif nic_op == constants.DDM_ADD:
9744 # mac and bridge should be set, by now
9745 mac = nic_dict['mac']
9746 ip = nic_dict.get('ip', None)
9747 nicparams = self.nic_pinst[constants.DDM_ADD]
9748 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9749 instance.nics.append(new_nic)
9750 result.append(("nic.%d" % (len(instance.nics) - 1),
9751 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9752 (new_nic.mac, new_nic.ip,
9753 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9754 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9757 for key in 'mac', 'ip':
9759 setattr(instance.nics[nic_op], key, nic_dict[key])
9760 if nic_op in self.nic_pinst:
9761 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9762 for key, val in nic_dict.iteritems():
9763 result.append(("nic.%s/%d" % (key, nic_op), val))
9766 if self.op.hvparams:
9767 instance.hvparams = self.hv_inst
9768 for key, val in self.op.hvparams.iteritems():
9769 result.append(("hv/%s" % key, val))
9772 if self.op.beparams:
9773 instance.beparams = self.be_inst
9774 for key, val in self.op.beparams.iteritems():
9775 result.append(("be/%s" % key, val))
9779 instance.os = self.op.os_name
9782 if self.op.osparams:
9783 instance.osparams = self.os_inst
9784 for key, val in self.op.osparams.iteritems():
9785 result.append(("os/%s" % key, val))
9787 self.cfg.Update(instance, feedback_fn)
9791 _DISK_CONVERSIONS = {
9792 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9793 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9797 class LUBackupQuery(NoHooksLU):
9798 """Query the exports list
9803 def ExpandNames(self):
9804 self.needed_locks = {}
9805 self.share_locks[locking.LEVEL_NODE] = 1
9806 if not self.op.nodes:
9807 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9809 self.needed_locks[locking.LEVEL_NODE] = \
9810 _GetWantedNodes(self, self.op.nodes)
9812 def Exec(self, feedback_fn):
9813 """Compute the list of all the exported system images.
9816 @return: a dictionary with the structure node->(export-list)
9817 where export-list is a list of the instances exported on
9821 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9822 rpcresult = self.rpc.call_export_list(self.nodes)
9824 for node in rpcresult:
9825 if rpcresult[node].fail_msg:
9826 result[node] = False
9828 result[node] = rpcresult[node].payload
9833 class LUBackupPrepare(NoHooksLU):
9834 """Prepares an instance for an export and returns useful information.
9839 def ExpandNames(self):
9840 self._ExpandAndLockInstance()
9842 def CheckPrereq(self):
9843 """Check prerequisites.
9846 instance_name = self.op.instance_name
9848 self.instance = self.cfg.GetInstanceInfo(instance_name)
9849 assert self.instance is not None, \
9850 "Cannot retrieve locked instance %s" % self.op.instance_name
9851 _CheckNodeOnline(self, self.instance.primary_node)
9853 self._cds = _GetClusterDomainSecret()
9855 def Exec(self, feedback_fn):
9856 """Prepares an instance for an export.
9859 instance = self.instance
9861 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9862 salt = utils.GenerateSecret(8)
9864 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9865 result = self.rpc.call_x509_cert_create(instance.primary_node,
9866 constants.RIE_CERT_VALIDITY)
9867 result.Raise("Can't create X509 key and certificate on %s" % result.node)
9869 (name, cert_pem) = result.payload
9871 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9875 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9876 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9878 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9884 class LUBackupExport(LogicalUnit):
9885 """Export an instance to an image in the cluster.
9888 HPATH = "instance-export"
9889 HTYPE = constants.HTYPE_INSTANCE
9892 def CheckArguments(self):
9893 """Check the arguments.
9896 self.x509_key_name = self.op.x509_key_name
9897 self.dest_x509_ca_pem = self.op.destination_x509_ca
9899 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9900 if not self.x509_key_name:
9901 raise errors.OpPrereqError("Missing X509 key name for encryption",
9904 if not self.dest_x509_ca_pem:
9905 raise errors.OpPrereqError("Missing destination X509 CA",
9908 def ExpandNames(self):
9909 self._ExpandAndLockInstance()
9911 # Lock all nodes for local exports
9912 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9913 # FIXME: lock only instance primary and destination node
9915 # Sad but true, for now we have do lock all nodes, as we don't know where
9916 # the previous export might be, and in this LU we search for it and
9917 # remove it from its current node. In the future we could fix this by:
9918 # - making a tasklet to search (share-lock all), then create the
9919 # new one, then one to remove, after
9920 # - removing the removal operation altogether
9921 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9923 def DeclareLocks(self, level):
9924 """Last minute lock declaration."""
9925 # All nodes are locked anyway, so nothing to do here.
9927 def BuildHooksEnv(self):
9930 This will run on the master, primary node and target node.
9934 "EXPORT_MODE": self.op.mode,
9935 "EXPORT_NODE": self.op.target_node,
9936 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9937 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9938 # TODO: Generic function for boolean env variables
9939 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9942 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9944 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9946 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9947 nl.append(self.op.target_node)
9951 def CheckPrereq(self):
9952 """Check prerequisites.
9954 This checks that the instance and node names are valid.
9957 instance_name = self.op.instance_name
9959 self.instance = self.cfg.GetInstanceInfo(instance_name)
9960 assert self.instance is not None, \
9961 "Cannot retrieve locked instance %s" % self.op.instance_name
9962 _CheckNodeOnline(self, self.instance.primary_node)
9964 if (self.op.remove_instance and self.instance.admin_up and
9965 not self.op.shutdown):
9966 raise errors.OpPrereqError("Can not remove instance without shutting it"
9969 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9970 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9971 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9972 assert self.dst_node is not None
9974 _CheckNodeOnline(self, self.dst_node.name)
9975 _CheckNodeNotDrained(self, self.dst_node.name)
9978 self.dest_disk_info = None
9979 self.dest_x509_ca = None
9981 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9982 self.dst_node = None
9984 if len(self.op.target_node) != len(self.instance.disks):
9985 raise errors.OpPrereqError(("Received destination information for %s"
9986 " disks, but instance %s has %s disks") %
9987 (len(self.op.target_node), instance_name,
9988 len(self.instance.disks)),
9991 cds = _GetClusterDomainSecret()
9993 # Check X509 key name
9995 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9996 except (TypeError, ValueError), err:
9997 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9999 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10000 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10001 errors.ECODE_INVAL)
10003 # Load and verify CA
10005 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10006 except OpenSSL.crypto.Error, err:
10007 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10008 (err, ), errors.ECODE_INVAL)
10010 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10011 if errcode is not None:
10012 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10013 (msg, ), errors.ECODE_INVAL)
10015 self.dest_x509_ca = cert
10017 # Verify target information
10019 for idx, disk_data in enumerate(self.op.target_node):
10021 (host, port, magic) = \
10022 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10023 except errors.GenericError, err:
10024 raise errors.OpPrereqError("Target info for disk %s: %s" %
10025 (idx, err), errors.ECODE_INVAL)
10027 disk_info.append((host, port, magic))
10029 assert len(disk_info) == len(self.op.target_node)
10030 self.dest_disk_info = disk_info
10033 raise errors.ProgrammerError("Unhandled export mode %r" %
10036 # instance disk type verification
10037 # TODO: Implement export support for file-based disks
10038 for disk in self.instance.disks:
10039 if disk.dev_type == constants.LD_FILE:
10040 raise errors.OpPrereqError("Export not supported for instances with"
10041 " file-based disks", errors.ECODE_INVAL)
10043 def _CleanupExports(self, feedback_fn):
10044 """Removes exports of current instance from all other nodes.
10046 If an instance in a cluster with nodes A..D was exported to node C, its
10047 exports will be removed from the nodes A, B and D.
10050 assert self.op.mode != constants.EXPORT_MODE_REMOTE
10052 nodelist = self.cfg.GetNodeList()
10053 nodelist.remove(self.dst_node.name)
10055 # on one-node clusters nodelist will be empty after the removal
10056 # if we proceed the backup would be removed because OpBackupQuery
10057 # substitutes an empty list with the full cluster node list.
10058 iname = self.instance.name
10060 feedback_fn("Removing old exports for instance %s" % iname)
10061 exportlist = self.rpc.call_export_list(nodelist)
10062 for node in exportlist:
10063 if exportlist[node].fail_msg:
10065 if iname in exportlist[node].payload:
10066 msg = self.rpc.call_export_remove(node, iname).fail_msg
10068 self.LogWarning("Could not remove older export for instance %s"
10069 " on node %s: %s", iname, node, msg)
10071 def Exec(self, feedback_fn):
10072 """Export an instance to an image in the cluster.
10075 assert self.op.mode in constants.EXPORT_MODES
10077 instance = self.instance
10078 src_node = instance.primary_node
10080 if self.op.shutdown:
10081 # shutdown the instance, but not the disks
10082 feedback_fn("Shutting down instance %s" % instance.name)
10083 result = self.rpc.call_instance_shutdown(src_node, instance,
10084 self.op.shutdown_timeout)
10085 # TODO: Maybe ignore failures if ignore_remove_failures is set
10086 result.Raise("Could not shutdown instance %s on"
10087 " node %s" % (instance.name, src_node))
10089 # set the disks ID correctly since call_instance_start needs the
10090 # correct drbd minor to create the symlinks
10091 for disk in instance.disks:
10092 self.cfg.SetDiskID(disk, src_node)
10094 activate_disks = (not instance.admin_up)
10097 # Activate the instance disks if we'exporting a stopped instance
10098 feedback_fn("Activating disks for %s" % instance.name)
10099 _StartInstanceDisks(self, instance, None)
10102 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10105 helper.CreateSnapshots()
10107 if (self.op.shutdown and instance.admin_up and
10108 not self.op.remove_instance):
10109 assert not activate_disks
10110 feedback_fn("Starting instance %s" % instance.name)
10111 result = self.rpc.call_instance_start(src_node, instance, None, None)
10112 msg = result.fail_msg
10114 feedback_fn("Failed to start instance: %s" % msg)
10115 _ShutdownInstanceDisks(self, instance)
10116 raise errors.OpExecError("Could not start instance: %s" % msg)
10118 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10119 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10120 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10121 connect_timeout = constants.RIE_CONNECT_TIMEOUT
10122 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10124 (key_name, _, _) = self.x509_key_name
10127 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10130 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10131 key_name, dest_ca_pem,
10136 # Check for backwards compatibility
10137 assert len(dresults) == len(instance.disks)
10138 assert compat.all(isinstance(i, bool) for i in dresults), \
10139 "Not all results are boolean: %r" % dresults
10143 feedback_fn("Deactivating disks for %s" % instance.name)
10144 _ShutdownInstanceDisks(self, instance)
10146 if not (compat.all(dresults) and fin_resu):
10149 failures.append("export finalization")
10150 if not compat.all(dresults):
10151 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10153 failures.append("disk export: disk(s) %s" % fdsk)
10155 raise errors.OpExecError("Export failed, errors in %s" %
10156 utils.CommaJoin(failures))
10158 # At this point, the export was successful, we can cleanup/finish
10160 # Remove instance if requested
10161 if self.op.remove_instance:
10162 feedback_fn("Removing instance %s" % instance.name)
10163 _RemoveInstance(self, feedback_fn, instance,
10164 self.op.ignore_remove_failures)
10166 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10167 self._CleanupExports(feedback_fn)
10169 return fin_resu, dresults
10172 class LUBackupRemove(NoHooksLU):
10173 """Remove exports related to the named instance.
10178 def ExpandNames(self):
10179 self.needed_locks = {}
10180 # We need all nodes to be locked in order for RemoveExport to work, but we
10181 # don't need to lock the instance itself, as nothing will happen to it (and
10182 # we can remove exports also for a removed instance)
10183 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10185 def Exec(self, feedback_fn):
10186 """Remove any export.
10189 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10190 # If the instance was not found we'll try with the name that was passed in.
10191 # This will only work if it was an FQDN, though.
10193 if not instance_name:
10195 instance_name = self.op.instance_name
10197 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10198 exportlist = self.rpc.call_export_list(locked_nodes)
10200 for node in exportlist:
10201 msg = exportlist[node].fail_msg
10203 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10205 if instance_name in exportlist[node].payload:
10207 result = self.rpc.call_export_remove(node, instance_name)
10208 msg = result.fail_msg
10210 logging.error("Could not remove export for instance %s"
10211 " on node %s: %s", instance_name, node, msg)
10213 if fqdn_warn and not found:
10214 feedback_fn("Export not found. If trying to remove an export belonging"
10215 " to a deleted instance please use its Fully Qualified"
10219 class LUGroupAdd(LogicalUnit):
10220 """Logical unit for creating node groups.
10223 HPATH = "group-add"
10224 HTYPE = constants.HTYPE_GROUP
10227 def ExpandNames(self):
10228 # We need the new group's UUID here so that we can create and acquire the
10229 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10230 # that it should not check whether the UUID exists in the configuration.
10231 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10232 self.needed_locks = {}
10233 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10235 def CheckPrereq(self):
10236 """Check prerequisites.
10238 This checks that the given group name is not an existing node group
10243 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10244 except errors.OpPrereqError:
10247 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10248 " node group (UUID: %s)" %
10249 (self.op.group_name, existing_uuid),
10250 errors.ECODE_EXISTS)
10252 if self.op.ndparams:
10253 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10255 def BuildHooksEnv(self):
10256 """Build hooks env.
10260 "GROUP_NAME": self.op.group_name,
10262 mn = self.cfg.GetMasterNode()
10263 return env, [mn], [mn]
10265 def Exec(self, feedback_fn):
10266 """Add the node group to the cluster.
10269 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10270 uuid=self.group_uuid,
10271 alloc_policy=self.op.alloc_policy,
10272 ndparams=self.op.ndparams)
10274 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10275 del self.remove_locks[locking.LEVEL_NODEGROUP]
10278 class LUGroupAssignNodes(NoHooksLU):
10279 """Logical unit for assigning nodes to groups.
10284 def ExpandNames(self):
10285 # These raise errors.OpPrereqError on their own:
10286 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10287 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10289 # We want to lock all the affected nodes and groups. We have readily
10290 # available the list of nodes, and the *destination* group. To gather the
10291 # list of "source" groups, we need to fetch node information later on.
10292 self.needed_locks = {
10293 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
10294 locking.LEVEL_NODE: self.op.nodes,
10297 def DeclareLocks(self, level):
10298 if level == locking.LEVEL_NODEGROUP:
10299 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
10301 # Try to get all affected nodes' groups without having the group or node
10302 # lock yet. Needs verification later in the code flow.
10303 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
10305 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
10307 def CheckPrereq(self):
10308 """Check prerequisites.
10311 assert self.needed_locks[locking.LEVEL_NODEGROUP]
10312 assert (frozenset(self.acquired_locks[locking.LEVEL_NODE]) ==
10313 frozenset(self.op.nodes))
10315 expected_locks = (set([self.group_uuid]) |
10316 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
10317 actual_locks = self.acquired_locks[locking.LEVEL_NODEGROUP]
10318 if actual_locks != expected_locks:
10319 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
10320 " current groups are '%s', used to be '%s'" %
10321 (utils.CommaJoin(expected_locks),
10322 utils.CommaJoin(actual_locks)))
10324 self.node_data = self.cfg.GetAllNodesInfo()
10325 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10326 instance_data = self.cfg.GetAllInstancesInfo()
10328 if self.group is None:
10329 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10330 (self.op.group_name, self.group_uuid))
10332 (new_splits, previous_splits) = \
10333 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10334 for node in self.op.nodes],
10335 self.node_data, instance_data)
10338 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10340 if not self.op.force:
10341 raise errors.OpExecError("The following instances get split by this"
10342 " change and --force was not given: %s" %
10345 self.LogWarning("This operation will split the following instances: %s",
10348 if previous_splits:
10349 self.LogWarning("In addition, these already-split instances continue"
10350 " to be split across groups: %s",
10351 utils.CommaJoin(utils.NiceSort(previous_splits)))
10353 def Exec(self, feedback_fn):
10354 """Assign nodes to a new group.
10357 for node in self.op.nodes:
10358 self.node_data[node].group = self.group_uuid
10360 # FIXME: Depends on side-effects of modifying the result of
10361 # C{cfg.GetAllNodesInfo}
10363 self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10366 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10367 """Check for split instances after a node assignment.
10369 This method considers a series of node assignments as an atomic operation,
10370 and returns information about split instances after applying the set of
10373 In particular, it returns information about newly split instances, and
10374 instances that were already split, and remain so after the change.
10376 Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
10379 @type changes: list of (node_name, new_group_uuid) pairs.
10380 @param changes: list of node assignments to consider.
10381 @param node_data: a dict with data for all nodes
10382 @param instance_data: a dict with all instances to consider
10383 @rtype: a two-tuple
10384 @return: a list of instances that were previously okay and result split as a
10385 consequence of this change, and a list of instances that were previously
10386 split and this change does not fix.
10389 changed_nodes = dict((node, group) for node, group in changes
10390 if node_data[node].group != group)
10392 all_split_instances = set()
10393 previously_split_instances = set()
10395 def InstanceNodes(instance):
10396 return [instance.primary_node] + list(instance.secondary_nodes)
10398 for inst in instance_data.values():
10399 if inst.disk_template not in constants.DTS_NET_MIRROR:
10402 instance_nodes = InstanceNodes(inst)
10404 if len(set(node_data[node].group for node in instance_nodes)) > 1:
10405 previously_split_instances.add(inst.name)
10407 if len(set(changed_nodes.get(node, node_data[node].group)
10408 for node in instance_nodes)) > 1:
10409 all_split_instances.add(inst.name)
10411 return (list(all_split_instances - previously_split_instances),
10412 list(previously_split_instances & all_split_instances))
10415 class _GroupQuery(_QueryBase):
10417 FIELDS = query.GROUP_FIELDS
10419 def ExpandNames(self, lu):
10420 lu.needed_locks = {}
10422 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10423 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10426 self.wanted = [name_to_uuid[name]
10427 for name in utils.NiceSort(name_to_uuid.keys())]
10429 # Accept names to be either names or UUIDs.
10432 all_uuid = frozenset(self._all_groups.keys())
10434 for name in self.names:
10435 if name in all_uuid:
10436 self.wanted.append(name)
10437 elif name in name_to_uuid:
10438 self.wanted.append(name_to_uuid[name])
10440 missing.append(name)
10443 raise errors.OpPrereqError("Some groups do not exist: %s" %
10444 utils.CommaJoin(missing),
10445 errors.ECODE_NOENT)
10447 def DeclareLocks(self, lu, level):
10450 def _GetQueryData(self, lu):
10451 """Computes the list of node groups and their attributes.
10454 do_nodes = query.GQ_NODE in self.requested_data
10455 do_instances = query.GQ_INST in self.requested_data
10457 group_to_nodes = None
10458 group_to_instances = None
10460 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10461 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10462 # latter GetAllInstancesInfo() is not enough, for we have to go through
10463 # instance->node. Hence, we will need to process nodes even if we only need
10464 # instance information.
10465 if do_nodes or do_instances:
10466 all_nodes = lu.cfg.GetAllNodesInfo()
10467 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10470 for node in all_nodes.values():
10471 if node.group in group_to_nodes:
10472 group_to_nodes[node.group].append(node.name)
10473 node_to_group[node.name] = node.group
10476 all_instances = lu.cfg.GetAllInstancesInfo()
10477 group_to_instances = dict((uuid, []) for uuid in self.wanted)
10479 for instance in all_instances.values():
10480 node = instance.primary_node
10481 if node in node_to_group:
10482 group_to_instances[node_to_group[node]].append(instance.name)
10485 # Do not pass on node information if it was not requested.
10486 group_to_nodes = None
10488 return query.GroupQueryData([self._all_groups[uuid]
10489 for uuid in self.wanted],
10490 group_to_nodes, group_to_instances)
10493 class LUGroupQuery(NoHooksLU):
10494 """Logical unit for querying node groups.
10499 def CheckArguments(self):
10500 self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
10502 def ExpandNames(self):
10503 self.gq.ExpandNames(self)
10505 def Exec(self, feedback_fn):
10506 return self.gq.OldStyleQuery(self)
10509 class LUGroupSetParams(LogicalUnit):
10510 """Modifies the parameters of a node group.
10513 HPATH = "group-modify"
10514 HTYPE = constants.HTYPE_GROUP
10517 def CheckArguments(self):
10520 self.op.alloc_policy,
10523 if all_changes.count(None) == len(all_changes):
10524 raise errors.OpPrereqError("Please pass at least one modification",
10525 errors.ECODE_INVAL)
10527 def ExpandNames(self):
10528 # This raises errors.OpPrereqError on its own:
10529 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10531 self.needed_locks = {
10532 locking.LEVEL_NODEGROUP: [self.group_uuid],
10535 def CheckPrereq(self):
10536 """Check prerequisites.
10539 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10541 if self.group is None:
10542 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10543 (self.op.group_name, self.group_uuid))
10545 if self.op.ndparams:
10546 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10547 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10548 self.new_ndparams = new_ndparams
10550 def BuildHooksEnv(self):
10551 """Build hooks env.
10555 "GROUP_NAME": self.op.group_name,
10556 "NEW_ALLOC_POLICY": self.op.alloc_policy,
10558 mn = self.cfg.GetMasterNode()
10559 return env, [mn], [mn]
10561 def Exec(self, feedback_fn):
10562 """Modifies the node group.
10567 if self.op.ndparams:
10568 self.group.ndparams = self.new_ndparams
10569 result.append(("ndparams", str(self.group.ndparams)))
10571 if self.op.alloc_policy:
10572 self.group.alloc_policy = self.op.alloc_policy
10574 self.cfg.Update(self.group, feedback_fn)
10579 class LUGroupRemove(LogicalUnit):
10580 HPATH = "group-remove"
10581 HTYPE = constants.HTYPE_GROUP
10584 def ExpandNames(self):
10585 # This will raises errors.OpPrereqError on its own:
10586 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10587 self.needed_locks = {
10588 locking.LEVEL_NODEGROUP: [self.group_uuid],
10591 def CheckPrereq(self):
10592 """Check prerequisites.
10594 This checks that the given group name exists as a node group, that is
10595 empty (i.e., contains no nodes), and that is not the last group of the
10599 # Verify that the group is empty.
10600 group_nodes = [node.name
10601 for node in self.cfg.GetAllNodesInfo().values()
10602 if node.group == self.group_uuid]
10605 raise errors.OpPrereqError("Group '%s' not empty, has the following"
10607 (self.op.group_name,
10608 utils.CommaJoin(utils.NiceSort(group_nodes))),
10609 errors.ECODE_STATE)
10611 # Verify the cluster would not be left group-less.
10612 if len(self.cfg.GetNodeGroupList()) == 1:
10613 raise errors.OpPrereqError("Group '%s' is the only group,"
10614 " cannot be removed" %
10615 self.op.group_name,
10616 errors.ECODE_STATE)
10618 def BuildHooksEnv(self):
10619 """Build hooks env.
10623 "GROUP_NAME": self.op.group_name,
10625 mn = self.cfg.GetMasterNode()
10626 return env, [mn], [mn]
10628 def Exec(self, feedback_fn):
10629 """Remove the node group.
10633 self.cfg.RemoveNodeGroup(self.group_uuid)
10634 except errors.ConfigurationError:
10635 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10636 (self.op.group_name, self.group_uuid))
10638 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10641 class LUGroupRename(LogicalUnit):
10642 HPATH = "group-rename"
10643 HTYPE = constants.HTYPE_GROUP
10646 def ExpandNames(self):
10647 # This raises errors.OpPrereqError on its own:
10648 self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name)
10650 self.needed_locks = {
10651 locking.LEVEL_NODEGROUP: [self.group_uuid],
10654 def CheckPrereq(self):
10655 """Check prerequisites.
10657 This checks that the given old_name exists as a node group, and that
10662 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10663 except errors.OpPrereqError:
10666 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10667 " node group (UUID: %s)" %
10668 (self.op.new_name, new_name_uuid),
10669 errors.ECODE_EXISTS)
10671 def BuildHooksEnv(self):
10672 """Build hooks env.
10676 "OLD_NAME": self.op.old_name,
10677 "NEW_NAME": self.op.new_name,
10680 mn = self.cfg.GetMasterNode()
10681 all_nodes = self.cfg.GetAllNodesInfo()
10683 all_nodes.pop(mn, None)
10685 for node in all_nodes.values():
10686 if node.group == self.group_uuid:
10687 run_nodes.append(node.name)
10689 return env, run_nodes, run_nodes
10691 def Exec(self, feedback_fn):
10692 """Rename the node group.
10695 group = self.cfg.GetNodeGroup(self.group_uuid)
10698 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10699 (self.op.old_name, self.group_uuid))
10701 group.name = self.op.new_name
10702 self.cfg.Update(group, feedback_fn)
10704 return self.op.new_name
10707 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10708 """Generic tags LU.
10710 This is an abstract class which is the parent of all the other tags LUs.
10714 def ExpandNames(self):
10715 self.needed_locks = {}
10716 if self.op.kind == constants.TAG_NODE:
10717 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10718 self.needed_locks[locking.LEVEL_NODE] = self.op.name
10719 elif self.op.kind == constants.TAG_INSTANCE:
10720 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10721 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10723 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10724 # not possible to acquire the BGL based on opcode parameters)
10726 def CheckPrereq(self):
10727 """Check prerequisites.
10730 if self.op.kind == constants.TAG_CLUSTER:
10731 self.target = self.cfg.GetClusterInfo()
10732 elif self.op.kind == constants.TAG_NODE:
10733 self.target = self.cfg.GetNodeInfo(self.op.name)
10734 elif self.op.kind == constants.TAG_INSTANCE:
10735 self.target = self.cfg.GetInstanceInfo(self.op.name)
10737 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10738 str(self.op.kind), errors.ECODE_INVAL)
10741 class LUTagsGet(TagsLU):
10742 """Returns the tags of a given object.
10747 def ExpandNames(self):
10748 TagsLU.ExpandNames(self)
10750 # Share locks as this is only a read operation
10751 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10753 def Exec(self, feedback_fn):
10754 """Returns the tag list.
10757 return list(self.target.GetTags())
10760 class LUTagsSearch(NoHooksLU):
10761 """Searches the tags for a given pattern.
10766 def ExpandNames(self):
10767 self.needed_locks = {}
10769 def CheckPrereq(self):
10770 """Check prerequisites.
10772 This checks the pattern passed for validity by compiling it.
10776 self.re = re.compile(self.op.pattern)
10777 except re.error, err:
10778 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10779 (self.op.pattern, err), errors.ECODE_INVAL)
10781 def Exec(self, feedback_fn):
10782 """Returns the tag list.
10786 tgts = [("/cluster", cfg.GetClusterInfo())]
10787 ilist = cfg.GetAllInstancesInfo().values()
10788 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10789 nlist = cfg.GetAllNodesInfo().values()
10790 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10792 for path, target in tgts:
10793 for tag in target.GetTags():
10794 if self.re.search(tag):
10795 results.append((path, tag))
10799 class LUTagsSet(TagsLU):
10800 """Sets a tag on a given object.
10805 def CheckPrereq(self):
10806 """Check prerequisites.
10808 This checks the type and length of the tag name and value.
10811 TagsLU.CheckPrereq(self)
10812 for tag in self.op.tags:
10813 objects.TaggableObject.ValidateTag(tag)
10815 def Exec(self, feedback_fn):
10820 for tag in self.op.tags:
10821 self.target.AddTag(tag)
10822 except errors.TagError, err:
10823 raise errors.OpExecError("Error while setting tag: %s" % str(err))
10824 self.cfg.Update(self.target, feedback_fn)
10827 class LUTagsDel(TagsLU):
10828 """Delete a list of tags from a given object.
10833 def CheckPrereq(self):
10834 """Check prerequisites.
10836 This checks that we have the given tag.
10839 TagsLU.CheckPrereq(self)
10840 for tag in self.op.tags:
10841 objects.TaggableObject.ValidateTag(tag)
10842 del_tags = frozenset(self.op.tags)
10843 cur_tags = self.target.GetTags()
10845 diff_tags = del_tags - cur_tags
10847 diff_names = ("'%s'" % i for i in sorted(diff_tags))
10848 raise errors.OpPrereqError("Tag(s) %s not found" %
10849 (utils.CommaJoin(diff_names), ),
10850 errors.ECODE_NOENT)
10852 def Exec(self, feedback_fn):
10853 """Remove the tag from the object.
10856 for tag in self.op.tags:
10857 self.target.RemoveTag(tag)
10858 self.cfg.Update(self.target, feedback_fn)
10861 class LUTestDelay(NoHooksLU):
10862 """Sleep for a specified amount of time.
10864 This LU sleeps on the master and/or nodes for a specified amount of
10870 def ExpandNames(self):
10871 """Expand names and set required locks.
10873 This expands the node list, if any.
10876 self.needed_locks = {}
10877 if self.op.on_nodes:
10878 # _GetWantedNodes can be used here, but is not always appropriate to use
10879 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10880 # more information.
10881 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10882 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10884 def _TestDelay(self):
10885 """Do the actual sleep.
10888 if self.op.on_master:
10889 if not utils.TestDelay(self.op.duration):
10890 raise errors.OpExecError("Error during master delay test")
10891 if self.op.on_nodes:
10892 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10893 for node, node_result in result.items():
10894 node_result.Raise("Failure during rpc call to node %s" % node)
10896 def Exec(self, feedback_fn):
10897 """Execute the test delay opcode, with the wanted repetitions.
10900 if self.op.repeat == 0:
10903 top_value = self.op.repeat - 1
10904 for i in range(self.op.repeat):
10905 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10909 class LUTestJqueue(NoHooksLU):
10910 """Utility LU to test some aspects of the job queue.
10915 # Must be lower than default timeout for WaitForJobChange to see whether it
10916 # notices changed jobs
10917 _CLIENT_CONNECT_TIMEOUT = 20.0
10918 _CLIENT_CONFIRM_TIMEOUT = 60.0
10921 def _NotifyUsingSocket(cls, cb, errcls):
10922 """Opens a Unix socket and waits for another program to connect.
10925 @param cb: Callback to send socket name to client
10926 @type errcls: class
10927 @param errcls: Exception class to use for errors
10930 # Using a temporary directory as there's no easy way to create temporary
10931 # sockets without writing a custom loop around tempfile.mktemp and
10933 tmpdir = tempfile.mkdtemp()
10935 tmpsock = utils.PathJoin(tmpdir, "sock")
10937 logging.debug("Creating temporary socket at %s", tmpsock)
10938 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10943 # Send details to client
10946 # Wait for client to connect before continuing
10947 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10949 (conn, _) = sock.accept()
10950 except socket.error, err:
10951 raise errcls("Client didn't connect in time (%s)" % err)
10955 # Remove as soon as client is connected
10956 shutil.rmtree(tmpdir)
10958 # Wait for client to close
10961 # pylint: disable-msg=E1101
10962 # Instance of '_socketobject' has no ... member
10963 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10965 except socket.error, err:
10966 raise errcls("Client failed to confirm notification (%s)" % err)
10970 def _SendNotification(self, test, arg, sockname):
10971 """Sends a notification to the client.
10974 @param test: Test name
10975 @param arg: Test argument (depends on test)
10976 @type sockname: string
10977 @param sockname: Socket path
10980 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10982 def _Notify(self, prereq, test, arg):
10983 """Notifies the client of a test.
10986 @param prereq: Whether this is a prereq-phase test
10988 @param test: Test name
10989 @param arg: Test argument (depends on test)
10993 errcls = errors.OpPrereqError
10995 errcls = errors.OpExecError
10997 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11001 def CheckArguments(self):
11002 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11003 self.expandnames_calls = 0
11005 def ExpandNames(self):
11006 checkargs_calls = getattr(self, "checkargs_calls", 0)
11007 if checkargs_calls < 1:
11008 raise errors.ProgrammerError("CheckArguments was not called")
11010 self.expandnames_calls += 1
11012 if self.op.notify_waitlock:
11013 self._Notify(True, constants.JQT_EXPANDNAMES, None)
11015 self.LogInfo("Expanding names")
11017 # Get lock on master node (just to get a lock, not for a particular reason)
11018 self.needed_locks = {
11019 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11022 def Exec(self, feedback_fn):
11023 if self.expandnames_calls < 1:
11024 raise errors.ProgrammerError("ExpandNames was not called")
11026 if self.op.notify_exec:
11027 self._Notify(False, constants.JQT_EXEC, None)
11029 self.LogInfo("Executing")
11031 if self.op.log_messages:
11032 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11033 for idx, msg in enumerate(self.op.log_messages):
11034 self.LogInfo("Sending log message %s", idx + 1)
11035 feedback_fn(constants.JQT_MSGPREFIX + msg)
11036 # Report how many test messages have been sent
11037 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11040 raise errors.OpExecError("Opcode failure was requested")
11045 class IAllocator(object):
11046 """IAllocator framework.
11048 An IAllocator instance has three sets of attributes:
11049 - cfg that is needed to query the cluster
11050 - input data (all members of the _KEYS class attribute are required)
11051 - four buffer attributes (in|out_data|text), that represent the
11052 input (to the external script) in text and data structure format,
11053 and the output from it, again in two formats
11054 - the result variables from the script (success, info, nodes) for
11058 # pylint: disable-msg=R0902
11059 # lots of instance attributes
11061 "name", "mem_size", "disks", "disk_template",
11062 "os", "tags", "nics", "vcpus", "hypervisor",
11065 "name", "relocate_from",
11071 def __init__(self, cfg, rpc, mode, **kwargs):
11074 # init buffer variables
11075 self.in_text = self.out_text = self.in_data = self.out_data = None
11076 # init all input fields so that pylint is happy
11078 self.mem_size = self.disks = self.disk_template = None
11079 self.os = self.tags = self.nics = self.vcpus = None
11080 self.hypervisor = None
11081 self.relocate_from = None
11083 self.evac_nodes = None
11085 self.required_nodes = None
11086 # init result fields
11087 self.success = self.info = self.result = None
11088 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11089 keyset = self._ALLO_KEYS
11090 fn = self._AddNewInstance
11091 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11092 keyset = self._RELO_KEYS
11093 fn = self._AddRelocateInstance
11094 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11095 keyset = self._EVAC_KEYS
11096 fn = self._AddEvacuateNodes
11098 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11099 " IAllocator" % self.mode)
11101 if key not in keyset:
11102 raise errors.ProgrammerError("Invalid input parameter '%s' to"
11103 " IAllocator" % key)
11104 setattr(self, key, kwargs[key])
11107 if key not in kwargs:
11108 raise errors.ProgrammerError("Missing input parameter '%s' to"
11109 " IAllocator" % key)
11110 self._BuildInputData(fn)
11112 def _ComputeClusterData(self):
11113 """Compute the generic allocator input data.
11115 This is the data that is independent of the actual operation.
11119 cluster_info = cfg.GetClusterInfo()
11122 "version": constants.IALLOCATOR_VERSION,
11123 "cluster_name": cfg.GetClusterName(),
11124 "cluster_tags": list(cluster_info.GetTags()),
11125 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11126 # we don't have job IDs
11128 ninfo = cfg.GetAllNodesInfo()
11129 iinfo = cfg.GetAllInstancesInfo().values()
11130 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11133 node_list = [n.name for n in ninfo.values() if n.vm_capable]
11135 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11136 hypervisor_name = self.hypervisor
11137 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11138 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11139 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11140 hypervisor_name = cluster_info.enabled_hypervisors[0]
11142 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11145 self.rpc.call_all_instances_info(node_list,
11146 cluster_info.enabled_hypervisors)
11148 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11150 config_ndata = self._ComputeBasicNodeData(ninfo)
11151 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11152 i_list, config_ndata)
11153 assert len(data["nodes"]) == len(ninfo), \
11154 "Incomplete node data computed"
11156 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11158 self.in_data = data
11161 def _ComputeNodeGroupData(cfg):
11162 """Compute node groups data.
11166 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
11168 "name": gdata.name,
11169 "alloc_policy": gdata.alloc_policy,
11174 def _ComputeBasicNodeData(node_cfg):
11175 """Compute global node data.
11178 @returns: a dict of name: (node dict, node config)
11182 for ninfo in node_cfg.values():
11183 # fill in static (config-based) values
11185 "tags": list(ninfo.GetTags()),
11186 "primary_ip": ninfo.primary_ip,
11187 "secondary_ip": ninfo.secondary_ip,
11188 "offline": ninfo.offline,
11189 "drained": ninfo.drained,
11190 "master_candidate": ninfo.master_candidate,
11191 "group": ninfo.group,
11192 "master_capable": ninfo.master_capable,
11193 "vm_capable": ninfo.vm_capable,
11196 node_results[ninfo.name] = pnr
11198 return node_results
11201 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11203 """Compute global node data.
11205 @param node_results: the basic node structures as filled from the config
11208 # make a copy of the current dict
11209 node_results = dict(node_results)
11210 for nname, nresult in node_data.items():
11211 assert nname in node_results, "Missing basic data for node %s" % nname
11212 ninfo = node_cfg[nname]
11214 if not (ninfo.offline or ninfo.drained):
11215 nresult.Raise("Can't get data for node %s" % nname)
11216 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11218 remote_info = nresult.payload
11220 for attr in ['memory_total', 'memory_free', 'memory_dom0',
11221 'vg_size', 'vg_free', 'cpu_total']:
11222 if attr not in remote_info:
11223 raise errors.OpExecError("Node '%s' didn't return attribute"
11224 " '%s'" % (nname, attr))
11225 if not isinstance(remote_info[attr], int):
11226 raise errors.OpExecError("Node '%s' returned invalid value"
11228 (nname, attr, remote_info[attr]))
11229 # compute memory used by primary instances
11230 i_p_mem = i_p_up_mem = 0
11231 for iinfo, beinfo in i_list:
11232 if iinfo.primary_node == nname:
11233 i_p_mem += beinfo[constants.BE_MEMORY]
11234 if iinfo.name not in node_iinfo[nname].payload:
11237 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11238 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11239 remote_info['memory_free'] -= max(0, i_mem_diff)
11242 i_p_up_mem += beinfo[constants.BE_MEMORY]
11244 # compute memory used by instances
11246 "total_memory": remote_info['memory_total'],
11247 "reserved_memory": remote_info['memory_dom0'],
11248 "free_memory": remote_info['memory_free'],
11249 "total_disk": remote_info['vg_size'],
11250 "free_disk": remote_info['vg_free'],
11251 "total_cpus": remote_info['cpu_total'],
11252 "i_pri_memory": i_p_mem,
11253 "i_pri_up_memory": i_p_up_mem,
11255 pnr_dyn.update(node_results[nname])
11256 node_results[nname] = pnr_dyn
11258 return node_results
11261 def _ComputeInstanceData(cluster_info, i_list):
11262 """Compute global instance data.
11266 for iinfo, beinfo in i_list:
11268 for nic in iinfo.nics:
11269 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11270 nic_dict = {"mac": nic.mac,
11272 "mode": filled_params[constants.NIC_MODE],
11273 "link": filled_params[constants.NIC_LINK],
11275 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11276 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11277 nic_data.append(nic_dict)
11279 "tags": list(iinfo.GetTags()),
11280 "admin_up": iinfo.admin_up,
11281 "vcpus": beinfo[constants.BE_VCPUS],
11282 "memory": beinfo[constants.BE_MEMORY],
11284 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11286 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11287 "disk_template": iinfo.disk_template,
11288 "hypervisor": iinfo.hypervisor,
11290 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11292 instance_data[iinfo.name] = pir
11294 return instance_data
11296 def _AddNewInstance(self):
11297 """Add new instance data to allocator structure.
11299 This in combination with _AllocatorGetClusterData will create the
11300 correct structure needed as input for the allocator.
11302 The checks for the completeness of the opcode must have already been
11306 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11308 if self.disk_template in constants.DTS_NET_MIRROR:
11309 self.required_nodes = 2
11311 self.required_nodes = 1
11314 "disk_template": self.disk_template,
11317 "vcpus": self.vcpus,
11318 "memory": self.mem_size,
11319 "disks": self.disks,
11320 "disk_space_total": disk_space,
11322 "required_nodes": self.required_nodes,
11326 def _AddRelocateInstance(self):
11327 """Add relocate instance data to allocator structure.
11329 This in combination with _IAllocatorGetClusterData will create the
11330 correct structure needed as input for the allocator.
11332 The checks for the completeness of the opcode must have already been
11336 instance = self.cfg.GetInstanceInfo(self.name)
11337 if instance is None:
11338 raise errors.ProgrammerError("Unknown instance '%s' passed to"
11339 " IAllocator" % self.name)
11341 if instance.disk_template not in constants.DTS_NET_MIRROR:
11342 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11343 errors.ECODE_INVAL)
11345 if len(instance.secondary_nodes) != 1:
11346 raise errors.OpPrereqError("Instance has not exactly one secondary node",
11347 errors.ECODE_STATE)
11349 self.required_nodes = 1
11350 disk_sizes = [{'size': disk.size} for disk in instance.disks]
11351 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11355 "disk_space_total": disk_space,
11356 "required_nodes": self.required_nodes,
11357 "relocate_from": self.relocate_from,
11361 def _AddEvacuateNodes(self):
11362 """Add evacuate nodes data to allocator structure.
11366 "evac_nodes": self.evac_nodes
11370 def _BuildInputData(self, fn):
11371 """Build input data structures.
11374 self._ComputeClusterData()
11377 request["type"] = self.mode
11378 self.in_data["request"] = request
11380 self.in_text = serializer.Dump(self.in_data)
11382 def Run(self, name, validate=True, call_fn=None):
11383 """Run an instance allocator and return the results.
11386 if call_fn is None:
11387 call_fn = self.rpc.call_iallocator_runner
11389 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11390 result.Raise("Failure while running the iallocator script")
11392 self.out_text = result.payload
11394 self._ValidateResult()
11396 def _ValidateResult(self):
11397 """Process the allocator results.
11399 This will process and if successful save the result in
11400 self.out_data and the other parameters.
11404 rdict = serializer.Load(self.out_text)
11405 except Exception, err:
11406 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11408 if not isinstance(rdict, dict):
11409 raise errors.OpExecError("Can't parse iallocator results: not a dict")
11411 # TODO: remove backwards compatiblity in later versions
11412 if "nodes" in rdict and "result" not in rdict:
11413 rdict["result"] = rdict["nodes"]
11416 for key in "success", "info", "result":
11417 if key not in rdict:
11418 raise errors.OpExecError("Can't parse iallocator results:"
11419 " missing key '%s'" % key)
11420 setattr(self, key, rdict[key])
11422 if not isinstance(rdict["result"], list):
11423 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11425 self.out_data = rdict
11428 class LUTestAllocator(NoHooksLU):
11429 """Run allocator tests.
11431 This LU runs the allocator tests
11434 def CheckPrereq(self):
11435 """Check prerequisites.
11437 This checks the opcode parameters depending on the director and mode test.
11440 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11441 for attr in ["mem_size", "disks", "disk_template",
11442 "os", "tags", "nics", "vcpus"]:
11443 if not hasattr(self.op, attr):
11444 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11445 attr, errors.ECODE_INVAL)
11446 iname = self.cfg.ExpandInstanceName(self.op.name)
11447 if iname is not None:
11448 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11449 iname, errors.ECODE_EXISTS)
11450 if not isinstance(self.op.nics, list):
11451 raise errors.OpPrereqError("Invalid parameter 'nics'",
11452 errors.ECODE_INVAL)
11453 if not isinstance(self.op.disks, list):
11454 raise errors.OpPrereqError("Invalid parameter 'disks'",
11455 errors.ECODE_INVAL)
11456 for row in self.op.disks:
11457 if (not isinstance(row, dict) or
11458 "size" not in row or
11459 not isinstance(row["size"], int) or
11460 "mode" not in row or
11461 row["mode"] not in ['r', 'w']):
11462 raise errors.OpPrereqError("Invalid contents of the 'disks'"
11463 " parameter", errors.ECODE_INVAL)
11464 if self.op.hypervisor is None:
11465 self.op.hypervisor = self.cfg.GetHypervisorType()
11466 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11467 fname = _ExpandInstanceName(self.cfg, self.op.name)
11468 self.op.name = fname
11469 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11470 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11471 if not hasattr(self.op, "evac_nodes"):
11472 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11473 " opcode input", errors.ECODE_INVAL)
11475 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11476 self.op.mode, errors.ECODE_INVAL)
11478 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11479 if self.op.allocator is None:
11480 raise errors.OpPrereqError("Missing allocator name",
11481 errors.ECODE_INVAL)
11482 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11483 raise errors.OpPrereqError("Wrong allocator test '%s'" %
11484 self.op.direction, errors.ECODE_INVAL)
11486 def Exec(self, feedback_fn):
11487 """Run the allocator test.
11490 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11491 ial = IAllocator(self.cfg, self.rpc,
11494 mem_size=self.op.mem_size,
11495 disks=self.op.disks,
11496 disk_template=self.op.disk_template,
11500 vcpus=self.op.vcpus,
11501 hypervisor=self.op.hypervisor,
11503 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11504 ial = IAllocator(self.cfg, self.rpc,
11507 relocate_from=list(self.relocate_from),
11509 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11510 ial = IAllocator(self.cfg, self.rpc,
11512 evac_nodes=self.op.evac_nodes)
11514 raise errors.ProgrammerError("Uncatched mode %s in"
11515 " LUTestAllocator.Exec", self.op.mode)
11517 if self.op.direction == constants.IALLOCATOR_DIR_IN:
11518 result = ial.in_text
11520 ial.Run(self.op.allocator, validate=False)
11521 result = ial.out_text
11525 #: Query type implementations
11527 constants.QR_INSTANCE: _InstanceQuery,
11528 constants.QR_NODE: _NodeQuery,
11529 constants.QR_GROUP: _GroupQuery,
11533 def _GetQueryImplementation(name):
11534 """Returns the implemtnation for a query type.
11536 @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11540 return _QUERY_IMPL[name]
11542 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11543 errors.ECODE_INVAL)