4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 import ganeti.masterd.instance # pylint: disable-msg=W0611
64 def _SupportsOob(cfg, node):
65 """Tells if node supports OOB.
67 @type cfg: L{config.ConfigWriter}
68 @param cfg: The cluster configuration
69 @type node: L{objects.Node}
71 @return: The OOB script if supported or an empty string otherwise
74 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
78 class LogicalUnit(object):
79 """Logical Unit base class.
81 Subclasses must follow these rules:
82 - implement ExpandNames
83 - implement CheckPrereq (except when tasklets are used)
84 - implement Exec (except when tasklets are used)
85 - implement BuildHooksEnv
86 - redefine HPATH and HTYPE
87 - optionally redefine their run requirements:
88 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
90 Note that all commands require root permissions.
92 @ivar dry_run_result: the value (if any) that will be returned to the caller
93 in dry-run mode (signalled by opcode dry_run parameter)
100 def __init__(self, processor, op, context, rpc):
101 """Constructor for LogicalUnit.
103 This needs to be overridden in derived classes in order to check op
107 self.proc = processor
109 self.cfg = context.cfg
110 self.context = context
112 # Dicts used to declare locking needs to mcpu
113 self.needed_locks = None
114 self.acquired_locks = {}
115 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
117 self.remove_locks = {}
118 # Used to force good behavior when calling helper functions
119 self.recalculate_locks = {}
122 self.Log = processor.Log # pylint: disable-msg=C0103
123 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126 # support for dry-run
127 self.dry_run_result = None
128 # support for generic debug attribute
129 if (not hasattr(self.op, "debug_level") or
130 not isinstance(self.op.debug_level, int)):
131 self.op.debug_level = 0
136 # Validate opcode parameters and set defaults
137 self.op.Validate(True)
139 self.CheckArguments()
142 """Returns the SshRunner object
146 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
149 ssh = property(fget=__GetSSH)
151 def CheckArguments(self):
152 """Check syntactic validity for the opcode arguments.
154 This method is for doing a simple syntactic check and ensure
155 validity of opcode parameters, without any cluster-related
156 checks. While the same can be accomplished in ExpandNames and/or
157 CheckPrereq, doing these separate is better because:
159 - ExpandNames is left as as purely a lock-related function
160 - CheckPrereq is run after we have acquired locks (and possible
163 The function is allowed to change the self.op attribute so that
164 later methods can no longer worry about missing parameters.
169 def ExpandNames(self):
170 """Expand names for this LU.
172 This method is called before starting to execute the opcode, and it should
173 update all the parameters of the opcode to their canonical form (e.g. a
174 short node name must be fully expanded after this method has successfully
175 completed). This way locking, hooks, logging, etc. can work correctly.
177 LUs which implement this method must also populate the self.needed_locks
178 member, as a dict with lock levels as keys, and a list of needed lock names
181 - use an empty dict if you don't need any lock
182 - if you don't need any lock at a particular level omit that level
183 - don't put anything for the BGL level
184 - if you want all locks at a level use locking.ALL_SET as a value
186 If you need to share locks (rather than acquire them exclusively) at one
187 level you can modify self.share_locks, setting a true value (usually 1) for
188 that level. By default locks are not shared.
190 This function can also define a list of tasklets, which then will be
191 executed in order instead of the usual LU-level CheckPrereq and Exec
192 functions, if those are not defined by the LU.
196 # Acquire all nodes and one instance
197 self.needed_locks = {
198 locking.LEVEL_NODE: locking.ALL_SET,
199 locking.LEVEL_INSTANCE: ['instance1.example.com'],
201 # Acquire just two nodes
202 self.needed_locks = {
203 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206 self.needed_locks = {} # No, you can't leave it to the default value None
209 # The implementation of this method is mandatory only if the new LU is
210 # concurrent, so that old LUs don't need to be changed all at the same
213 self.needed_locks = {} # Exclusive LUs don't need locks.
215 raise NotImplementedError
217 def DeclareLocks(self, level):
218 """Declare LU locking needs for a level
220 While most LUs can just declare their locking needs at ExpandNames time,
221 sometimes there's the need to calculate some locks after having acquired
222 the ones before. This function is called just before acquiring locks at a
223 particular level, but after acquiring the ones at lower levels, and permits
224 such calculations. It can be used to modify self.needed_locks, and by
225 default it does nothing.
227 This function is only called if you have something already set in
228 self.needed_locks for the level.
230 @param level: Locking level which is going to be locked
231 @type level: member of ganeti.locking.LEVELS
235 def CheckPrereq(self):
236 """Check prerequisites for this LU.
238 This method should check that the prerequisites for the execution
239 of this LU are fulfilled. It can do internode communication, but
240 it should be idempotent - no cluster or system changes are
243 The method should raise errors.OpPrereqError in case something is
244 not fulfilled. Its return value is ignored.
246 This method should also update all the parameters of the opcode to
247 their canonical form if it hasn't been done by ExpandNames before.
250 if self.tasklets is not None:
251 for (idx, tl) in enumerate(self.tasklets):
252 logging.debug("Checking prerequisites for tasklet %s/%s",
253 idx + 1, len(self.tasklets))
258 def Exec(self, feedback_fn):
261 This method should implement the actual work. It should raise
262 errors.OpExecError for failures that are somewhat dealt with in
266 if self.tasklets is not None:
267 for (idx, tl) in enumerate(self.tasklets):
268 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271 raise NotImplementedError
273 def BuildHooksEnv(self):
274 """Build hooks environment for this LU.
276 This method should return a three-node tuple consisting of: a dict
277 containing the environment that will be used for running the
278 specific hook for this LU, a list of node names on which the hook
279 should run before the execution, and a list of node names on which
280 the hook should run after the execution.
282 The keys of the dict must not have 'GANETI_' prefixed as this will
283 be handled in the hooks runner. Also note additional keys will be
284 added by the hooks runner. If the LU doesn't define any
285 environment, an empty dict (and not None) should be returned.
287 No nodes should be returned as an empty list (and not None).
289 Note that if the HPATH for a LU class is None, this function will
293 raise NotImplementedError
295 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296 """Notify the LU about the results of its hooks.
298 This method is called every time a hooks phase is executed, and notifies
299 the Logical Unit about the hooks' result. The LU can then use it to alter
300 its result based on the hooks. By default the method does nothing and the
301 previous result is passed back unchanged but any LU can define it if it
302 wants to use the local cluster hook-scripts somehow.
304 @param phase: one of L{constants.HOOKS_PHASE_POST} or
305 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306 @param hook_results: the results of the multi-node hooks rpc call
307 @param feedback_fn: function used send feedback back to the caller
308 @param lu_result: the previous Exec result this LU had, or None
310 @return: the new Exec result, based on the previous result
314 # API must be kept, thus we ignore the unused argument and could
315 # be a function warnings
316 # pylint: disable-msg=W0613,R0201
319 def _ExpandAndLockInstance(self):
320 """Helper function to expand and lock an instance.
322 Many LUs that work on an instance take its name in self.op.instance_name
323 and need to expand it and then declare the expanded name for locking. This
324 function does it, and then updates self.op.instance_name to the expanded
325 name. It also initializes needed_locks as a dict, if this hasn't been done
329 if self.needed_locks is None:
330 self.needed_locks = {}
332 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333 "_ExpandAndLockInstance called with instance-level locks set"
334 self.op.instance_name = _ExpandInstanceName(self.cfg,
335 self.op.instance_name)
336 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
338 def _LockInstancesNodes(self, primary_only=False):
339 """Helper function to declare instances' nodes for locking.
341 This function should be called after locking one or more instances to lock
342 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343 with all primary or secondary nodes for instances already locked and
344 present in self.needed_locks[locking.LEVEL_INSTANCE].
346 It should be called from DeclareLocks, and for safety only works if
347 self.recalculate_locks[locking.LEVEL_NODE] is set.
349 In the future it may grow parameters to just lock some instance's nodes, or
350 to just lock primaries or secondary nodes, if needed.
352 If should be called in DeclareLocks in a way similar to::
354 if level == locking.LEVEL_NODE:
355 self._LockInstancesNodes()
357 @type primary_only: boolean
358 @param primary_only: only lock primary nodes of locked instances
361 assert locking.LEVEL_NODE in self.recalculate_locks, \
362 "_LockInstancesNodes helper function called with no nodes to recalculate"
364 # TODO: check if we're really been called with the instance locks held
366 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367 # future we might want to have different behaviors depending on the value
368 # of self.recalculate_locks[locking.LEVEL_NODE]
370 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371 instance = self.context.cfg.GetInstanceInfo(instance_name)
372 wanted_nodes.append(instance.primary_node)
374 wanted_nodes.extend(instance.secondary_nodes)
376 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
381 del self.recalculate_locks[locking.LEVEL_NODE]
384 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385 """Simple LU which runs no hooks.
387 This LU is intended as a parent for other LogicalUnits which will
388 run no hooks, in order to reduce duplicate code.
394 def BuildHooksEnv(self):
395 """Empty BuildHooksEnv for NoHooksLu.
397 This just raises an error.
400 assert False, "BuildHooksEnv called for NoHooksLUs"
404 """Tasklet base class.
406 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407 they can mix legacy code with tasklets. Locking needs to be done in the LU,
408 tasklets know nothing about locks.
410 Subclasses must follow these rules:
411 - Implement CheckPrereq
415 def __init__(self, lu):
422 def CheckPrereq(self):
423 """Check prerequisites for this tasklets.
425 This method should check whether the prerequisites for the execution of
426 this tasklet are fulfilled. It can do internode communication, but it
427 should be idempotent - no cluster or system changes are allowed.
429 The method should raise errors.OpPrereqError in case something is not
430 fulfilled. Its return value is ignored.
432 This method should also update all parameters to their canonical form if it
433 hasn't been done before.
438 def Exec(self, feedback_fn):
439 """Execute the tasklet.
441 This method should implement the actual work. It should raise
442 errors.OpExecError for failures that are somewhat dealt with in code, or
446 raise NotImplementedError
450 """Base for query utility classes.
453 #: Attribute holding field definitions
456 def __init__(self, filter_, fields, use_locking):
457 """Initializes this class.
460 self.use_locking = use_locking
462 self.query = query.Query(self.FIELDS, fields, filter_=filter_,
464 self.requested_data = self.query.RequestedData()
465 self.names = self.query.RequestedNames()
467 # Sort only if no names were requested
468 self.sort_by_name = not self.names
470 self.do_locking = None
473 def _GetNames(self, lu, all_names, lock_level):
474 """Helper function to determine names asked for in the query.
478 names = lu.acquired_locks[lock_level]
482 if self.wanted == locking.ALL_SET:
483 assert not self.names
484 # caller didn't specify names, so ordering is not important
485 return utils.NiceSort(names)
487 # caller specified names and we must keep the same order
489 assert not self.do_locking or lu.acquired_locks[lock_level]
491 missing = set(self.wanted).difference(names)
493 raise errors.OpExecError("Some items were removed before retrieving"
494 " their data: %s" % missing)
496 # Return expanded names
500 def FieldsQuery(cls, fields):
501 """Returns list of available fields.
503 @return: List of L{objects.QueryFieldDefinition}
506 return query.QueryFields(cls.FIELDS, fields)
508 def ExpandNames(self, lu):
509 """Expand names for this query.
511 See L{LogicalUnit.ExpandNames}.
514 raise NotImplementedError()
516 def DeclareLocks(self, lu, level):
517 """Declare locks for this query.
519 See L{LogicalUnit.DeclareLocks}.
522 raise NotImplementedError()
524 def _GetQueryData(self, lu):
525 """Collects all data for this query.
527 @return: Query data object
530 raise NotImplementedError()
532 def NewStyleQuery(self, lu):
533 """Collect data and execute query.
536 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
537 sort_by_name=self.sort_by_name)
539 def OldStyleQuery(self, lu):
540 """Collect data and execute query.
543 return self.query.OldStyleQuery(self._GetQueryData(lu),
544 sort_by_name=self.sort_by_name)
547 def _GetWantedNodes(lu, nodes):
548 """Returns list of checked and expanded node names.
550 @type lu: L{LogicalUnit}
551 @param lu: the logical unit on whose behalf we execute
553 @param nodes: list of node names or None for all nodes
555 @return: the list of nodes, sorted
556 @raise errors.ProgrammerError: if the nodes parameter is wrong type
560 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
562 return utils.NiceSort(lu.cfg.GetNodeList())
565 def _GetWantedInstances(lu, instances):
566 """Returns list of checked and expanded instance names.
568 @type lu: L{LogicalUnit}
569 @param lu: the logical unit on whose behalf we execute
570 @type instances: list
571 @param instances: list of instance names or None for all instances
573 @return: the list of instances, sorted
574 @raise errors.OpPrereqError: if the instances parameter is wrong type
575 @raise errors.OpPrereqError: if any of the passed instances is not found
579 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
581 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
585 def _GetUpdatedParams(old_params, update_dict,
586 use_default=True, use_none=False):
587 """Return the new version of a parameter dictionary.
589 @type old_params: dict
590 @param old_params: old parameters
591 @type update_dict: dict
592 @param update_dict: dict containing new parameter values, or
593 constants.VALUE_DEFAULT to reset the parameter to its default
595 @param use_default: boolean
596 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
597 values as 'to be deleted' values
598 @param use_none: boolean
599 @type use_none: whether to recognise C{None} values as 'to be
602 @return: the new parameter dictionary
605 params_copy = copy.deepcopy(old_params)
606 for key, val in update_dict.iteritems():
607 if ((use_default and val == constants.VALUE_DEFAULT) or
608 (use_none and val is None)):
614 params_copy[key] = val
618 def _CheckOutputFields(static, dynamic, selected):
619 """Checks whether all selected fields are valid.
621 @type static: L{utils.FieldSet}
622 @param static: static fields set
623 @type dynamic: L{utils.FieldSet}
624 @param dynamic: dynamic fields set
631 delta = f.NonMatching(selected)
633 raise errors.OpPrereqError("Unknown output fields selected: %s"
634 % ",".join(delta), errors.ECODE_INVAL)
637 def _CheckGlobalHvParams(params):
638 """Validates that given hypervisor params are not global ones.
640 This will ensure that instances don't get customised versions of
644 used_globals = constants.HVC_GLOBALS.intersection(params)
646 msg = ("The following hypervisor parameters are global and cannot"
647 " be customized at instance level, please modify them at"
648 " cluster level: %s" % utils.CommaJoin(used_globals))
649 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
652 def _CheckNodeOnline(lu, node, msg=None):
653 """Ensure that a given node is online.
655 @param lu: the LU on behalf of which we make the check
656 @param node: the node to check
657 @param msg: if passed, should be a message to replace the default one
658 @raise errors.OpPrereqError: if the node is offline
662 msg = "Can't use offline node"
663 if lu.cfg.GetNodeInfo(node).offline:
664 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
667 def _CheckNodeNotDrained(lu, node):
668 """Ensure that a given node is not drained.
670 @param lu: the LU on behalf of which we make the check
671 @param node: the node to check
672 @raise errors.OpPrereqError: if the node is drained
675 if lu.cfg.GetNodeInfo(node).drained:
676 raise errors.OpPrereqError("Can't use drained node %s" % node,
680 def _CheckNodeVmCapable(lu, node):
681 """Ensure that a given node is vm capable.
683 @param lu: the LU on behalf of which we make the check
684 @param node: the node to check
685 @raise errors.OpPrereqError: if the node is not vm capable
688 if not lu.cfg.GetNodeInfo(node).vm_capable:
689 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
693 def _CheckNodeHasOS(lu, node, os_name, force_variant):
694 """Ensure that a node supports a given OS.
696 @param lu: the LU on behalf of which we make the check
697 @param node: the node to check
698 @param os_name: the OS to query about
699 @param force_variant: whether to ignore variant errors
700 @raise errors.OpPrereqError: if the node is not supporting the OS
703 result = lu.rpc.call_os_get(node, os_name)
704 result.Raise("OS '%s' not in supported OS list for node %s" %
706 prereq=True, ecode=errors.ECODE_INVAL)
707 if not force_variant:
708 _CheckOSVariant(result.payload, os_name)
711 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
712 """Ensure that a node has the given secondary ip.
714 @type lu: L{LogicalUnit}
715 @param lu: the LU on behalf of which we make the check
717 @param node: the node to check
718 @type secondary_ip: string
719 @param secondary_ip: the ip to check
720 @type prereq: boolean
721 @param prereq: whether to throw a prerequisite or an execute error
722 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
723 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
726 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
727 result.Raise("Failure checking secondary ip on node %s" % node,
728 prereq=prereq, ecode=errors.ECODE_ENVIRON)
729 if not result.payload:
730 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
731 " please fix and re-run this command" % secondary_ip)
733 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
735 raise errors.OpExecError(msg)
738 def _GetClusterDomainSecret():
739 """Reads the cluster domain secret.
742 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
746 def _CheckInstanceDown(lu, instance, reason):
747 """Ensure that an instance is not running."""
748 if instance.admin_up:
749 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
750 (instance.name, reason), errors.ECODE_STATE)
752 pnode = instance.primary_node
753 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
754 ins_l.Raise("Can't contact node %s for instance information" % pnode,
755 prereq=True, ecode=errors.ECODE_ENVIRON)
757 if instance.name in ins_l.payload:
758 raise errors.OpPrereqError("Instance %s is running, %s" %
759 (instance.name, reason), errors.ECODE_STATE)
762 def _ExpandItemName(fn, name, kind):
763 """Expand an item name.
765 @param fn: the function to use for expansion
766 @param name: requested item name
767 @param kind: text description ('Node' or 'Instance')
768 @return: the resolved (full) name
769 @raise errors.OpPrereqError: if the item is not found
773 if full_name is None:
774 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
779 def _ExpandNodeName(cfg, name):
780 """Wrapper over L{_ExpandItemName} for nodes."""
781 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
784 def _ExpandInstanceName(cfg, name):
785 """Wrapper over L{_ExpandItemName} for instance."""
786 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
789 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
790 memory, vcpus, nics, disk_template, disks,
791 bep, hvp, hypervisor_name):
792 """Builds instance related env variables for hooks
794 This builds the hook environment from individual variables.
797 @param name: the name of the instance
798 @type primary_node: string
799 @param primary_node: the name of the instance's primary node
800 @type secondary_nodes: list
801 @param secondary_nodes: list of secondary nodes as strings
802 @type os_type: string
803 @param os_type: the name of the instance's OS
804 @type status: boolean
805 @param status: the should_run status of the instance
807 @param memory: the memory size of the instance
809 @param vcpus: the count of VCPUs the instance has
811 @param nics: list of tuples (ip, mac, mode, link) representing
812 the NICs the instance has
813 @type disk_template: string
814 @param disk_template: the disk template of the instance
816 @param disks: the list of (size, mode) pairs
818 @param bep: the backend parameters for the instance
820 @param hvp: the hypervisor parameters for the instance
821 @type hypervisor_name: string
822 @param hypervisor_name: the hypervisor for the instance
824 @return: the hook environment for this instance
833 "INSTANCE_NAME": name,
834 "INSTANCE_PRIMARY": primary_node,
835 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
836 "INSTANCE_OS_TYPE": os_type,
837 "INSTANCE_STATUS": str_status,
838 "INSTANCE_MEMORY": memory,
839 "INSTANCE_VCPUS": vcpus,
840 "INSTANCE_DISK_TEMPLATE": disk_template,
841 "INSTANCE_HYPERVISOR": hypervisor_name,
845 nic_count = len(nics)
846 for idx, (ip, mac, mode, link) in enumerate(nics):
849 env["INSTANCE_NIC%d_IP" % idx] = ip
850 env["INSTANCE_NIC%d_MAC" % idx] = mac
851 env["INSTANCE_NIC%d_MODE" % idx] = mode
852 env["INSTANCE_NIC%d_LINK" % idx] = link
853 if mode == constants.NIC_MODE_BRIDGED:
854 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
858 env["INSTANCE_NIC_COUNT"] = nic_count
861 disk_count = len(disks)
862 for idx, (size, mode) in enumerate(disks):
863 env["INSTANCE_DISK%d_SIZE" % idx] = size
864 env["INSTANCE_DISK%d_MODE" % idx] = mode
868 env["INSTANCE_DISK_COUNT"] = disk_count
870 for source, kind in [(bep, "BE"), (hvp, "HV")]:
871 for key, value in source.items():
872 env["INSTANCE_%s_%s" % (kind, key)] = value
877 def _NICListToTuple(lu, nics):
878 """Build a list of nic information tuples.
880 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
881 value in LUInstanceQueryData.
883 @type lu: L{LogicalUnit}
884 @param lu: the logical unit on whose behalf we execute
885 @type nics: list of L{objects.NIC}
886 @param nics: list of nics to convert to hooks tuples
890 cluster = lu.cfg.GetClusterInfo()
894 filled_params = cluster.SimpleFillNIC(nic.nicparams)
895 mode = filled_params[constants.NIC_MODE]
896 link = filled_params[constants.NIC_LINK]
897 hooks_nics.append((ip, mac, mode, link))
901 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
902 """Builds instance related env variables for hooks from an object.
904 @type lu: L{LogicalUnit}
905 @param lu: the logical unit on whose behalf we execute
906 @type instance: L{objects.Instance}
907 @param instance: the instance for which we should build the
910 @param override: dictionary with key/values that will override
913 @return: the hook environment dictionary
916 cluster = lu.cfg.GetClusterInfo()
917 bep = cluster.FillBE(instance)
918 hvp = cluster.FillHV(instance)
920 'name': instance.name,
921 'primary_node': instance.primary_node,
922 'secondary_nodes': instance.secondary_nodes,
923 'os_type': instance.os,
924 'status': instance.admin_up,
925 'memory': bep[constants.BE_MEMORY],
926 'vcpus': bep[constants.BE_VCPUS],
927 'nics': _NICListToTuple(lu, instance.nics),
928 'disk_template': instance.disk_template,
929 'disks': [(disk.size, disk.mode) for disk in instance.disks],
932 'hypervisor_name': instance.hypervisor,
935 args.update(override)
936 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
939 def _AdjustCandidatePool(lu, exceptions):
940 """Adjust the candidate pool after node operations.
943 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
945 lu.LogInfo("Promoted nodes to master candidate role: %s",
946 utils.CommaJoin(node.name for node in mod_list))
947 for name in mod_list:
948 lu.context.ReaddNode(name)
949 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
951 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
955 def _DecideSelfPromotion(lu, exceptions=None):
956 """Decide whether I should promote myself as a master candidate.
959 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
960 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
961 # the new node will increase mc_max with one, so:
962 mc_should = min(mc_should + 1, cp_size)
963 return mc_now < mc_should
966 def _CheckNicsBridgesExist(lu, target_nics, target_node):
967 """Check that the brigdes needed by a list of nics exist.
970 cluster = lu.cfg.GetClusterInfo()
971 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
972 brlist = [params[constants.NIC_LINK] for params in paramslist
973 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
975 result = lu.rpc.call_bridges_exist(target_node, brlist)
976 result.Raise("Error checking bridges on destination node '%s'" %
977 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
980 def _CheckInstanceBridgesExist(lu, instance, node=None):
981 """Check that the brigdes needed by an instance exist.
985 node = instance.primary_node
986 _CheckNicsBridgesExist(lu, instance.nics, node)
989 def _CheckOSVariant(os_obj, name):
990 """Check whether an OS name conforms to the os variants specification.
992 @type os_obj: L{objects.OS}
993 @param os_obj: OS object to check
995 @param name: OS name passed by the user, to check for validity
998 if not os_obj.supported_variants:
1000 variant = objects.OS.GetVariant(name)
1002 raise errors.OpPrereqError("OS name must include a variant",
1005 if variant not in os_obj.supported_variants:
1006 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1009 def _GetNodeInstancesInner(cfg, fn):
1010 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1013 def _GetNodeInstances(cfg, node_name):
1014 """Returns a list of all primary and secondary instances on a node.
1018 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1021 def _GetNodePrimaryInstances(cfg, node_name):
1022 """Returns primary instances on a node.
1025 return _GetNodeInstancesInner(cfg,
1026 lambda inst: node_name == inst.primary_node)
1029 def _GetNodeSecondaryInstances(cfg, node_name):
1030 """Returns secondary instances on a node.
1033 return _GetNodeInstancesInner(cfg,
1034 lambda inst: node_name in inst.secondary_nodes)
1037 def _GetStorageTypeArgs(cfg, storage_type):
1038 """Returns the arguments for a storage type.
1041 # Special case for file storage
1042 if storage_type == constants.ST_FILE:
1043 # storage.FileStorage wants a list of storage directories
1044 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1049 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1052 for dev in instance.disks:
1053 cfg.SetDiskID(dev, node_name)
1055 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1056 result.Raise("Failed to get disk status from node %s" % node_name,
1057 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1059 for idx, bdev_status in enumerate(result.payload):
1060 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1066 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1067 """Check the sanity of iallocator and node arguments and use the
1068 cluster-wide iallocator if appropriate.
1070 Check that at most one of (iallocator, node) is specified. If none is
1071 specified, then the LU's opcode's iallocator slot is filled with the
1072 cluster-wide default iallocator.
1074 @type iallocator_slot: string
1075 @param iallocator_slot: the name of the opcode iallocator slot
1076 @type node_slot: string
1077 @param node_slot: the name of the opcode target node slot
1080 node = getattr(lu.op, node_slot, None)
1081 iallocator = getattr(lu.op, iallocator_slot, None)
1083 if node is not None and iallocator is not None:
1084 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1086 elif node is None and iallocator is None:
1087 default_iallocator = lu.cfg.GetDefaultIAllocator()
1088 if default_iallocator:
1089 setattr(lu.op, iallocator_slot, default_iallocator)
1091 raise errors.OpPrereqError("No iallocator or node given and no"
1092 " cluster-wide default iallocator found."
1093 " Please specify either an iallocator or a"
1094 " node, or set a cluster-wide default"
1098 class LUClusterPostInit(LogicalUnit):
1099 """Logical unit for running hooks after cluster initialization.
1102 HPATH = "cluster-init"
1103 HTYPE = constants.HTYPE_CLUSTER
1105 def BuildHooksEnv(self):
1109 env = {"OP_TARGET": self.cfg.GetClusterName()}
1110 mn = self.cfg.GetMasterNode()
1111 return env, [], [mn]
1113 def Exec(self, feedback_fn):
1120 class LUClusterDestroy(LogicalUnit):
1121 """Logical unit for destroying the cluster.
1124 HPATH = "cluster-destroy"
1125 HTYPE = constants.HTYPE_CLUSTER
1127 def BuildHooksEnv(self):
1131 env = {"OP_TARGET": self.cfg.GetClusterName()}
1134 def CheckPrereq(self):
1135 """Check prerequisites.
1137 This checks whether the cluster is empty.
1139 Any errors are signaled by raising errors.OpPrereqError.
1142 master = self.cfg.GetMasterNode()
1144 nodelist = self.cfg.GetNodeList()
1145 if len(nodelist) != 1 or nodelist[0] != master:
1146 raise errors.OpPrereqError("There are still %d node(s) in"
1147 " this cluster." % (len(nodelist) - 1),
1149 instancelist = self.cfg.GetInstanceList()
1151 raise errors.OpPrereqError("There are still %d instance(s) in"
1152 " this cluster." % len(instancelist),
1155 def Exec(self, feedback_fn):
1156 """Destroys the cluster.
1159 master = self.cfg.GetMasterNode()
1161 # Run post hooks on master node before it's removed
1162 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1164 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1166 # pylint: disable-msg=W0702
1167 self.LogWarning("Errors occurred running hooks on %s" % master)
1169 result = self.rpc.call_node_stop_master(master, False)
1170 result.Raise("Could not disable the master role")
1175 def _VerifyCertificate(filename):
1176 """Verifies a certificate for LUClusterVerify.
1178 @type filename: string
1179 @param filename: Path to PEM file
1183 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1184 utils.ReadFile(filename))
1185 except Exception, err: # pylint: disable-msg=W0703
1186 return (LUClusterVerify.ETYPE_ERROR,
1187 "Failed to load X509 certificate %s: %s" % (filename, err))
1190 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1191 constants.SSL_CERT_EXPIRATION_ERROR)
1194 fnamemsg = "While verifying %s: %s" % (filename, msg)
1199 return (None, fnamemsg)
1200 elif errcode == utils.CERT_WARNING:
1201 return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1202 elif errcode == utils.CERT_ERROR:
1203 return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1205 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1208 class LUClusterVerify(LogicalUnit):
1209 """Verifies the cluster status.
1212 HPATH = "cluster-verify"
1213 HTYPE = constants.HTYPE_CLUSTER
1216 TCLUSTER = "cluster"
1218 TINSTANCE = "instance"
1220 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1221 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1222 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1223 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1224 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1225 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1226 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1227 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1228 EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1229 ENODEDRBD = (TNODE, "ENODEDRBD")
1230 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1231 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1232 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1233 ENODEHV = (TNODE, "ENODEHV")
1234 ENODELVM = (TNODE, "ENODELVM")
1235 ENODEN1 = (TNODE, "ENODEN1")
1236 ENODENET = (TNODE, "ENODENET")
1237 ENODEOS = (TNODE, "ENODEOS")
1238 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1239 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1240 ENODERPC = (TNODE, "ENODERPC")
1241 ENODESSH = (TNODE, "ENODESSH")
1242 ENODEVERSION = (TNODE, "ENODEVERSION")
1243 ENODESETUP = (TNODE, "ENODESETUP")
1244 ENODETIME = (TNODE, "ENODETIME")
1245 ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1247 ETYPE_FIELD = "code"
1248 ETYPE_ERROR = "ERROR"
1249 ETYPE_WARNING = "WARNING"
1251 _HOOKS_INDENT_RE = re.compile("^", re.M)
1253 class NodeImage(object):
1254 """A class representing the logical and physical status of a node.
1257 @ivar name: the node name to which this object refers
1258 @ivar volumes: a structure as returned from
1259 L{ganeti.backend.GetVolumeList} (runtime)
1260 @ivar instances: a list of running instances (runtime)
1261 @ivar pinst: list of configured primary instances (config)
1262 @ivar sinst: list of configured secondary instances (config)
1263 @ivar sbp: dictionary of {primary-node: list of instances} for all
1264 instances for which this node is secondary (config)
1265 @ivar mfree: free memory, as reported by hypervisor (runtime)
1266 @ivar dfree: free disk, as reported by the node (runtime)
1267 @ivar offline: the offline status (config)
1268 @type rpc_fail: boolean
1269 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1270 not whether the individual keys were correct) (runtime)
1271 @type lvm_fail: boolean
1272 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1273 @type hyp_fail: boolean
1274 @ivar hyp_fail: whether the RPC call didn't return the instance list
1275 @type ghost: boolean
1276 @ivar ghost: whether this is a known node or not (config)
1277 @type os_fail: boolean
1278 @ivar os_fail: whether the RPC call didn't return valid OS data
1280 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1281 @type vm_capable: boolean
1282 @ivar vm_capable: whether the node can host instances
1285 def __init__(self, offline=False, name=None, vm_capable=True):
1294 self.offline = offline
1295 self.vm_capable = vm_capable
1296 self.rpc_fail = False
1297 self.lvm_fail = False
1298 self.hyp_fail = False
1300 self.os_fail = False
1303 def ExpandNames(self):
1304 self.needed_locks = {
1305 locking.LEVEL_NODE: locking.ALL_SET,
1306 locking.LEVEL_INSTANCE: locking.ALL_SET,
1308 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1310 def _Error(self, ecode, item, msg, *args, **kwargs):
1311 """Format an error message.
1313 Based on the opcode's error_codes parameter, either format a
1314 parseable error code, or a simpler error string.
1316 This must be called only from Exec and functions called from Exec.
1319 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1321 # first complete the msg
1324 # then format the whole message
1325 if self.op.error_codes:
1326 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1332 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1333 # and finally report it via the feedback_fn
1334 self._feedback_fn(" - %s" % msg)
1336 def _ErrorIf(self, cond, *args, **kwargs):
1337 """Log an error message if the passed condition is True.
1340 cond = bool(cond) or self.op.debug_simulate_errors
1342 self._Error(*args, **kwargs)
1343 # do not mark the operation as failed for WARN cases only
1344 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1345 self.bad = self.bad or cond
1347 def _VerifyNode(self, ninfo, nresult):
1348 """Perform some basic validation on data returned from a node.
1350 - check the result data structure is well formed and has all the
1352 - check ganeti version
1354 @type ninfo: L{objects.Node}
1355 @param ninfo: the node to check
1356 @param nresult: the results from the node
1358 @return: whether overall this call was successful (and we can expect
1359 reasonable values in the respose)
1363 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1365 # main result, nresult should be a non-empty dict
1366 test = not nresult or not isinstance(nresult, dict)
1367 _ErrorIf(test, self.ENODERPC, node,
1368 "unable to verify node: no data returned")
1372 # compares ganeti version
1373 local_version = constants.PROTOCOL_VERSION
1374 remote_version = nresult.get("version", None)
1375 test = not (remote_version and
1376 isinstance(remote_version, (list, tuple)) and
1377 len(remote_version) == 2)
1378 _ErrorIf(test, self.ENODERPC, node,
1379 "connection to node returned invalid data")
1383 test = local_version != remote_version[0]
1384 _ErrorIf(test, self.ENODEVERSION, node,
1385 "incompatible protocol versions: master %s,"
1386 " node %s", local_version, remote_version[0])
1390 # node seems compatible, we can actually try to look into its results
1392 # full package version
1393 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1394 self.ENODEVERSION, node,
1395 "software version mismatch: master %s, node %s",
1396 constants.RELEASE_VERSION, remote_version[1],
1397 code=self.ETYPE_WARNING)
1399 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1400 if ninfo.vm_capable and isinstance(hyp_result, dict):
1401 for hv_name, hv_result in hyp_result.iteritems():
1402 test = hv_result is not None
1403 _ErrorIf(test, self.ENODEHV, node,
1404 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1406 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1407 if ninfo.vm_capable and isinstance(hvp_result, list):
1408 for item, hv_name, hv_result in hvp_result:
1409 _ErrorIf(True, self.ENODEHV, node,
1410 "hypervisor %s parameter verify failure (source %s): %s",
1411 hv_name, item, hv_result)
1413 test = nresult.get(constants.NV_NODESETUP,
1414 ["Missing NODESETUP results"])
1415 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1420 def _VerifyNodeTime(self, ninfo, nresult,
1421 nvinfo_starttime, nvinfo_endtime):
1422 """Check the node time.
1424 @type ninfo: L{objects.Node}
1425 @param ninfo: the node to check
1426 @param nresult: the remote results for the node
1427 @param nvinfo_starttime: the start time of the RPC call
1428 @param nvinfo_endtime: the end time of the RPC call
1432 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1434 ntime = nresult.get(constants.NV_TIME, None)
1436 ntime_merged = utils.MergeTime(ntime)
1437 except (ValueError, TypeError):
1438 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1441 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1442 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1443 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1444 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1448 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1449 "Node time diverges by at least %s from master node time",
1452 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1453 """Check the node time.
1455 @type ninfo: L{objects.Node}
1456 @param ninfo: the node to check
1457 @param nresult: the remote results for the node
1458 @param vg_name: the configured VG name
1465 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1467 # checks vg existence and size > 20G
1468 vglist = nresult.get(constants.NV_VGLIST, None)
1470 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1472 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1473 constants.MIN_VG_SIZE)
1474 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1477 pvlist = nresult.get(constants.NV_PVLIST, None)
1478 test = pvlist is None
1479 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1481 # check that ':' is not present in PV names, since it's a
1482 # special character for lvcreate (denotes the range of PEs to
1484 for _, pvname, owner_vg in pvlist:
1485 test = ":" in pvname
1486 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1487 " '%s' of VG '%s'", pvname, owner_vg)
1489 def _VerifyNodeNetwork(self, ninfo, nresult):
1490 """Check the node time.
1492 @type ninfo: L{objects.Node}
1493 @param ninfo: the node to check
1494 @param nresult: the remote results for the node
1498 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1500 test = constants.NV_NODELIST not in nresult
1501 _ErrorIf(test, self.ENODESSH, node,
1502 "node hasn't returned node ssh connectivity data")
1504 if nresult[constants.NV_NODELIST]:
1505 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1506 _ErrorIf(True, self.ENODESSH, node,
1507 "ssh communication with node '%s': %s", a_node, a_msg)
1509 test = constants.NV_NODENETTEST not in nresult
1510 _ErrorIf(test, self.ENODENET, node,
1511 "node hasn't returned node tcp connectivity data")
1513 if nresult[constants.NV_NODENETTEST]:
1514 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1516 _ErrorIf(True, self.ENODENET, node,
1517 "tcp communication with node '%s': %s",
1518 anode, nresult[constants.NV_NODENETTEST][anode])
1520 test = constants.NV_MASTERIP not in nresult
1521 _ErrorIf(test, self.ENODENET, node,
1522 "node hasn't returned node master IP reachability data")
1524 if not nresult[constants.NV_MASTERIP]:
1525 if node == self.master_node:
1526 msg = "the master node cannot reach the master IP (not configured?)"
1528 msg = "cannot reach the master IP"
1529 _ErrorIf(True, self.ENODENET, node, msg)
1531 def _VerifyInstance(self, instance, instanceconfig, node_image,
1533 """Verify an instance.
1535 This function checks to see if the required block devices are
1536 available on the instance's node.
1539 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1540 node_current = instanceconfig.primary_node
1542 node_vol_should = {}
1543 instanceconfig.MapLVsByNode(node_vol_should)
1545 for node in node_vol_should:
1546 n_img = node_image[node]
1547 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1548 # ignore missing volumes on offline or broken nodes
1550 for volume in node_vol_should[node]:
1551 test = volume not in n_img.volumes
1552 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1553 "volume %s missing on node %s", volume, node)
1555 if instanceconfig.admin_up:
1556 pri_img = node_image[node_current]
1557 test = instance not in pri_img.instances and not pri_img.offline
1558 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1559 "instance not running on its primary node %s",
1562 for node, n_img in node_image.items():
1563 if node != node_current:
1564 test = instance in n_img.instances
1565 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1566 "instance should not run on node %s", node)
1568 diskdata = [(nname, success, status, idx)
1569 for (nname, disks) in diskstatus.items()
1570 for idx, (success, status) in enumerate(disks)]
1572 for nname, success, bdev_status, idx in diskdata:
1573 # the 'ghost node' construction in Exec() ensures that we have a
1575 snode = node_image[nname]
1576 bad_snode = snode.ghost or snode.offline
1577 _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1578 self.EINSTANCEFAULTYDISK, instance,
1579 "couldn't retrieve status for disk/%s on %s: %s",
1580 idx, nname, bdev_status)
1581 _ErrorIf((instanceconfig.admin_up and success and
1582 bdev_status.ldisk_status == constants.LDS_FAULTY),
1583 self.EINSTANCEFAULTYDISK, instance,
1584 "disk/%s on %s is faulty", idx, nname)
1586 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1587 """Verify if there are any unknown volumes in the cluster.
1589 The .os, .swap and backup volumes are ignored. All other volumes are
1590 reported as unknown.
1592 @type reserved: L{ganeti.utils.FieldSet}
1593 @param reserved: a FieldSet of reserved volume names
1596 for node, n_img in node_image.items():
1597 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1598 # skip non-healthy nodes
1600 for volume in n_img.volumes:
1601 test = ((node not in node_vol_should or
1602 volume not in node_vol_should[node]) and
1603 not reserved.Matches(volume))
1604 self._ErrorIf(test, self.ENODEORPHANLV, node,
1605 "volume %s is unknown", volume)
1607 def _VerifyOrphanInstances(self, instancelist, node_image):
1608 """Verify the list of running instances.
1610 This checks what instances are running but unknown to the cluster.
1613 for node, n_img in node_image.items():
1614 for o_inst in n_img.instances:
1615 test = o_inst not in instancelist
1616 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1617 "instance %s on node %s should not exist", o_inst, node)
1619 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1620 """Verify N+1 Memory Resilience.
1622 Check that if one single node dies we can still start all the
1623 instances it was primary for.
1626 cluster_info = self.cfg.GetClusterInfo()
1627 for node, n_img in node_image.items():
1628 # This code checks that every node which is now listed as
1629 # secondary has enough memory to host all instances it is
1630 # supposed to should a single other node in the cluster fail.
1631 # FIXME: not ready for failover to an arbitrary node
1632 # FIXME: does not support file-backed instances
1633 # WARNING: we currently take into account down instances as well
1634 # as up ones, considering that even if they're down someone
1635 # might want to start them even in the event of a node failure.
1637 # we're skipping offline nodes from the N+1 warning, since
1638 # most likely we don't have good memory infromation from them;
1639 # we already list instances living on such nodes, and that's
1642 for prinode, instances in n_img.sbp.items():
1644 for instance in instances:
1645 bep = cluster_info.FillBE(instance_cfg[instance])
1646 if bep[constants.BE_AUTO_BALANCE]:
1647 needed_mem += bep[constants.BE_MEMORY]
1648 test = n_img.mfree < needed_mem
1649 self._ErrorIf(test, self.ENODEN1, node,
1650 "not enough memory to accomodate instance failovers"
1651 " should node %s fail", prinode)
1653 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1655 """Verifies and computes the node required file checksums.
1657 @type ninfo: L{objects.Node}
1658 @param ninfo: the node to check
1659 @param nresult: the remote results for the node
1660 @param file_list: required list of files
1661 @param local_cksum: dictionary of local files and their checksums
1662 @param master_files: list of files that only masters should have
1666 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1668 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1669 test = not isinstance(remote_cksum, dict)
1670 _ErrorIf(test, self.ENODEFILECHECK, node,
1671 "node hasn't returned file checksum data")
1675 for file_name in file_list:
1676 node_is_mc = ninfo.master_candidate
1677 must_have = (file_name not in master_files) or node_is_mc
1679 test1 = file_name not in remote_cksum
1681 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1683 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1684 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1685 "file '%s' missing", file_name)
1686 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1687 "file '%s' has wrong checksum", file_name)
1688 # not candidate and this is not a must-have file
1689 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1690 "file '%s' should not exist on non master"
1691 " candidates (and the file is outdated)", file_name)
1692 # all good, except non-master/non-must have combination
1693 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1694 "file '%s' should not exist"
1695 " on non master candidates", file_name)
1697 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1699 """Verifies and the node DRBD status.
1701 @type ninfo: L{objects.Node}
1702 @param ninfo: the node to check
1703 @param nresult: the remote results for the node
1704 @param instanceinfo: the dict of instances
1705 @param drbd_helper: the configured DRBD usermode helper
1706 @param drbd_map: the DRBD map as returned by
1707 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1711 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1714 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1715 test = (helper_result == None)
1716 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1717 "no drbd usermode helper returned")
1719 status, payload = helper_result
1721 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1722 "drbd usermode helper check unsuccessful: %s", payload)
1723 test = status and (payload != drbd_helper)
1724 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1725 "wrong drbd usermode helper: %s", payload)
1727 # compute the DRBD minors
1729 for minor, instance in drbd_map[node].items():
1730 test = instance not in instanceinfo
1731 _ErrorIf(test, self.ECLUSTERCFG, None,
1732 "ghost instance '%s' in temporary DRBD map", instance)
1733 # ghost instance should not be running, but otherwise we
1734 # don't give double warnings (both ghost instance and
1735 # unallocated minor in use)
1737 node_drbd[minor] = (instance, False)
1739 instance = instanceinfo[instance]
1740 node_drbd[minor] = (instance.name, instance.admin_up)
1742 # and now check them
1743 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1744 test = not isinstance(used_minors, (tuple, list))
1745 _ErrorIf(test, self.ENODEDRBD, node,
1746 "cannot parse drbd status file: %s", str(used_minors))
1748 # we cannot check drbd status
1751 for minor, (iname, must_exist) in node_drbd.items():
1752 test = minor not in used_minors and must_exist
1753 _ErrorIf(test, self.ENODEDRBD, node,
1754 "drbd minor %d of instance %s is not active", minor, iname)
1755 for minor in used_minors:
1756 test = minor not in node_drbd
1757 _ErrorIf(test, self.ENODEDRBD, node,
1758 "unallocated drbd minor %d is in use", minor)
1760 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1761 """Builds the node OS structures.
1763 @type ninfo: L{objects.Node}
1764 @param ninfo: the node to check
1765 @param nresult: the remote results for the node
1766 @param nimg: the node image object
1770 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1772 remote_os = nresult.get(constants.NV_OSLIST, None)
1773 test = (not isinstance(remote_os, list) or
1774 not compat.all(isinstance(v, list) and len(v) == 7
1775 for v in remote_os))
1777 _ErrorIf(test, self.ENODEOS, node,
1778 "node hasn't returned valid OS data")
1787 for (name, os_path, status, diagnose,
1788 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1790 if name not in os_dict:
1793 # parameters is a list of lists instead of list of tuples due to
1794 # JSON lacking a real tuple type, fix it:
1795 parameters = [tuple(v) for v in parameters]
1796 os_dict[name].append((os_path, status, diagnose,
1797 set(variants), set(parameters), set(api_ver)))
1799 nimg.oslist = os_dict
1801 def _VerifyNodeOS(self, ninfo, nimg, base):
1802 """Verifies the node OS list.
1804 @type ninfo: L{objects.Node}
1805 @param ninfo: the node to check
1806 @param nimg: the node image object
1807 @param base: the 'template' node we match against (e.g. from the master)
1811 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1813 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1815 for os_name, os_data in nimg.oslist.items():
1816 assert os_data, "Empty OS status for OS %s?!" % os_name
1817 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1818 _ErrorIf(not f_status, self.ENODEOS, node,
1819 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1820 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1821 "OS '%s' has multiple entries (first one shadows the rest): %s",
1822 os_name, utils.CommaJoin([v[0] for v in os_data]))
1823 # this will catched in backend too
1824 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1825 and not f_var, self.ENODEOS, node,
1826 "OS %s with API at least %d does not declare any variant",
1827 os_name, constants.OS_API_V15)
1828 # comparisons with the 'base' image
1829 test = os_name not in base.oslist
1830 _ErrorIf(test, self.ENODEOS, node,
1831 "Extra OS %s not present on reference node (%s)",
1835 assert base.oslist[os_name], "Base node has empty OS status?"
1836 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1838 # base OS is invalid, skipping
1840 for kind, a, b in [("API version", f_api, b_api),
1841 ("variants list", f_var, b_var),
1842 ("parameters", f_param, b_param)]:
1843 _ErrorIf(a != b, self.ENODEOS, node,
1844 "OS %s %s differs from reference node %s: %s vs. %s",
1845 kind, os_name, base.name,
1846 utils.CommaJoin(a), utils.CommaJoin(b))
1848 # check any missing OSes
1849 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1850 _ErrorIf(missing, self.ENODEOS, node,
1851 "OSes present on reference node %s but missing on this node: %s",
1852 base.name, utils.CommaJoin(missing))
1854 def _VerifyOob(self, ninfo, nresult):
1855 """Verifies out of band functionality of a node.
1857 @type ninfo: L{objects.Node}
1858 @param ninfo: the node to check
1859 @param nresult: the remote results for the node
1863 # We just have to verify the paths on master and/or master candidates
1864 # as the oob helper is invoked on the master
1865 if ((ninfo.master_candidate or ninfo.master_capable) and
1866 constants.NV_OOB_PATHS in nresult):
1867 for path_result in nresult[constants.NV_OOB_PATHS]:
1868 self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1870 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1871 """Verifies and updates the node volume data.
1873 This function will update a L{NodeImage}'s internal structures
1874 with data from the remote call.
1876 @type ninfo: L{objects.Node}
1877 @param ninfo: the node to check
1878 @param nresult: the remote results for the node
1879 @param nimg: the node image object
1880 @param vg_name: the configured VG name
1884 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1886 nimg.lvm_fail = True
1887 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1890 elif isinstance(lvdata, basestring):
1891 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1892 utils.SafeEncode(lvdata))
1893 elif not isinstance(lvdata, dict):
1894 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1896 nimg.volumes = lvdata
1897 nimg.lvm_fail = False
1899 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1900 """Verifies and updates the node instance list.
1902 If the listing was successful, then updates this node's instance
1903 list. Otherwise, it marks the RPC call as failed for the instance
1906 @type ninfo: L{objects.Node}
1907 @param ninfo: the node to check
1908 @param nresult: the remote results for the node
1909 @param nimg: the node image object
1912 idata = nresult.get(constants.NV_INSTANCELIST, None)
1913 test = not isinstance(idata, list)
1914 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1915 " (instancelist): %s", utils.SafeEncode(str(idata)))
1917 nimg.hyp_fail = True
1919 nimg.instances = idata
1921 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1922 """Verifies and computes a node information map
1924 @type ninfo: L{objects.Node}
1925 @param ninfo: the node to check
1926 @param nresult: the remote results for the node
1927 @param nimg: the node image object
1928 @param vg_name: the configured VG name
1932 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1934 # try to read free memory (from the hypervisor)
1935 hv_info = nresult.get(constants.NV_HVINFO, None)
1936 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1937 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1940 nimg.mfree = int(hv_info["memory_free"])
1941 except (ValueError, TypeError):
1942 _ErrorIf(True, self.ENODERPC, node,
1943 "node returned invalid nodeinfo, check hypervisor")
1945 # FIXME: devise a free space model for file based instances as well
1946 if vg_name is not None:
1947 test = (constants.NV_VGLIST not in nresult or
1948 vg_name not in nresult[constants.NV_VGLIST])
1949 _ErrorIf(test, self.ENODELVM, node,
1950 "node didn't return data for the volume group '%s'"
1951 " - it is either missing or broken", vg_name)
1954 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1955 except (ValueError, TypeError):
1956 _ErrorIf(True, self.ENODERPC, node,
1957 "node returned invalid LVM info, check LVM status")
1959 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1960 """Gets per-disk status information for all instances.
1962 @type nodelist: list of strings
1963 @param nodelist: Node names
1964 @type node_image: dict of (name, L{objects.Node})
1965 @param node_image: Node objects
1966 @type instanceinfo: dict of (name, L{objects.Instance})
1967 @param instanceinfo: Instance objects
1968 @rtype: {instance: {node: [(succes, payload)]}}
1969 @return: a dictionary of per-instance dictionaries with nodes as
1970 keys and disk information as values; the disk information is a
1971 list of tuples (success, payload)
1974 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1977 node_disks_devonly = {}
1978 diskless_instances = set()
1979 diskless = constants.DT_DISKLESS
1981 for nname in nodelist:
1982 node_instances = list(itertools.chain(node_image[nname].pinst,
1983 node_image[nname].sinst))
1984 diskless_instances.update(inst for inst in node_instances
1985 if instanceinfo[inst].disk_template == diskless)
1986 disks = [(inst, disk)
1987 for inst in node_instances
1988 for disk in instanceinfo[inst].disks]
1991 # No need to collect data
1994 node_disks[nname] = disks
1996 # Creating copies as SetDiskID below will modify the objects and that can
1997 # lead to incorrect data returned from nodes
1998 devonly = [dev.Copy() for (_, dev) in disks]
2001 self.cfg.SetDiskID(dev, nname)
2003 node_disks_devonly[nname] = devonly
2005 assert len(node_disks) == len(node_disks_devonly)
2007 # Collect data from all nodes with disks
2008 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2011 assert len(result) == len(node_disks)
2015 for (nname, nres) in result.items():
2016 disks = node_disks[nname]
2019 # No data from this node
2020 data = len(disks) * [(False, "node offline")]
2023 _ErrorIf(msg, self.ENODERPC, nname,
2024 "while getting disk information: %s", msg)
2026 # No data from this node
2027 data = len(disks) * [(False, msg)]
2030 for idx, i in enumerate(nres.payload):
2031 if isinstance(i, (tuple, list)) and len(i) == 2:
2034 logging.warning("Invalid result from node %s, entry %d: %s",
2036 data.append((False, "Invalid result from the remote node"))
2038 for ((inst, _), status) in zip(disks, data):
2039 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2041 # Add empty entries for diskless instances.
2042 for inst in diskless_instances:
2043 assert inst not in instdisk
2046 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2047 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2048 compat.all(isinstance(s, (tuple, list)) and
2049 len(s) == 2 for s in statuses)
2050 for inst, nnames in instdisk.items()
2051 for nname, statuses in nnames.items())
2052 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2056 def _VerifyHVP(self, hvp_data):
2057 """Verifies locally the syntax of the hypervisor parameters.
2060 for item, hv_name, hv_params in hvp_data:
2061 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2064 hv_class = hypervisor.GetHypervisor(hv_name)
2065 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2066 hv_class.CheckParameterSyntax(hv_params)
2067 except errors.GenericError, err:
2068 self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2071 def BuildHooksEnv(self):
2074 Cluster-Verify hooks just ran in the post phase and their failure makes
2075 the output be logged in the verify output and the verification to fail.
2078 all_nodes = self.cfg.GetNodeList()
2080 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2082 for node in self.cfg.GetAllNodesInfo().values():
2083 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2085 return env, [], all_nodes
2087 def Exec(self, feedback_fn):
2088 """Verify integrity of cluster, performing various test on nodes.
2091 # This method has too many local variables. pylint: disable-msg=R0914
2093 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2094 verbose = self.op.verbose
2095 self._feedback_fn = feedback_fn
2096 feedback_fn("* Verifying global settings")
2097 for msg in self.cfg.VerifyConfig():
2098 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2100 # Check the cluster certificates
2101 for cert_filename in constants.ALL_CERT_FILES:
2102 (errcode, msg) = _VerifyCertificate(cert_filename)
2103 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2105 vg_name = self.cfg.GetVGName()
2106 drbd_helper = self.cfg.GetDRBDHelper()
2107 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2108 cluster = self.cfg.GetClusterInfo()
2109 nodelist = utils.NiceSort(self.cfg.GetNodeList())
2110 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2111 nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2112 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2113 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2114 for iname in instancelist)
2115 groupinfo = self.cfg.GetAllNodeGroupsInfo()
2116 i_non_redundant = [] # Non redundant instances
2117 i_non_a_balanced = [] # Non auto-balanced instances
2118 n_offline = 0 # Count of offline nodes
2119 n_drained = 0 # Count of nodes being drained
2120 node_vol_should = {}
2122 # FIXME: verify OS list
2123 # do local checksums
2124 master_files = [constants.CLUSTER_CONF_FILE]
2125 master_node = self.master_node = self.cfg.GetMasterNode()
2126 master_ip = self.cfg.GetMasterIP()
2128 file_names = ssconf.SimpleStore().GetFileList()
2129 file_names.extend(constants.ALL_CERT_FILES)
2130 file_names.extend(master_files)
2131 if cluster.modify_etc_hosts:
2132 file_names.append(constants.ETC_HOSTS)
2134 local_checksums = utils.FingerprintFiles(file_names)
2136 # Compute the set of hypervisor parameters
2138 for hv_name in hypervisors:
2139 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2140 for os_name, os_hvp in cluster.os_hvp.items():
2141 for hv_name, hv_params in os_hvp.items():
2144 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2145 hvp_data.append(("os %s" % os_name, hv_name, full_params))
2146 # TODO: collapse identical parameter values in a single one
2147 for instance in instanceinfo.values():
2148 if not instance.hvparams:
2150 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2151 cluster.FillHV(instance)))
2152 # and verify them locally
2153 self._VerifyHVP(hvp_data)
2155 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2156 node_verify_param = {
2157 constants.NV_FILELIST: file_names,
2158 constants.NV_NODELIST: [node.name for node in nodeinfo
2159 if not node.offline],
2160 constants.NV_HYPERVISOR: hypervisors,
2161 constants.NV_HVPARAMS: hvp_data,
2162 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2163 node.secondary_ip) for node in nodeinfo
2164 if not node.offline],
2165 constants.NV_INSTANCELIST: hypervisors,
2166 constants.NV_VERSION: None,
2167 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2168 constants.NV_NODESETUP: None,
2169 constants.NV_TIME: None,
2170 constants.NV_MASTERIP: (master_node, master_ip),
2171 constants.NV_OSLIST: None,
2172 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2175 if vg_name is not None:
2176 node_verify_param[constants.NV_VGLIST] = None
2177 node_verify_param[constants.NV_LVLIST] = vg_name
2178 node_verify_param[constants.NV_PVLIST] = [vg_name]
2179 node_verify_param[constants.NV_DRBDLIST] = None
2182 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2184 # Build our expected cluster state
2185 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2187 vm_capable=node.vm_capable))
2188 for node in nodeinfo)
2192 for node in nodeinfo:
2193 path = _SupportsOob(self.cfg, node)
2194 if path and path not in oob_paths:
2195 oob_paths.append(path)
2198 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2200 for instance in instancelist:
2201 inst_config = instanceinfo[instance]
2203 for nname in inst_config.all_nodes:
2204 if nname not in node_image:
2206 gnode = self.NodeImage(name=nname)
2208 node_image[nname] = gnode
2210 inst_config.MapLVsByNode(node_vol_should)
2212 pnode = inst_config.primary_node
2213 node_image[pnode].pinst.append(instance)
2215 for snode in inst_config.secondary_nodes:
2216 nimg = node_image[snode]
2217 nimg.sinst.append(instance)
2218 if pnode not in nimg.sbp:
2219 nimg.sbp[pnode] = []
2220 nimg.sbp[pnode].append(instance)
2222 # At this point, we have the in-memory data structures complete,
2223 # except for the runtime information, which we'll gather next
2225 # Due to the way our RPC system works, exact response times cannot be
2226 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2227 # time before and after executing the request, we can at least have a time
2229 nvinfo_starttime = time.time()
2230 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2231 self.cfg.GetClusterName())
2232 nvinfo_endtime = time.time()
2234 all_drbd_map = self.cfg.ComputeDRBDMap()
2236 feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2237 instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2239 feedback_fn("* Verifying node status")
2243 for node_i in nodeinfo:
2245 nimg = node_image[node]
2249 feedback_fn("* Skipping offline node %s" % (node,))
2253 if node == master_node:
2255 elif node_i.master_candidate:
2256 ntype = "master candidate"
2257 elif node_i.drained:
2263 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2265 msg = all_nvinfo[node].fail_msg
2266 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2268 nimg.rpc_fail = True
2271 nresult = all_nvinfo[node].payload
2273 nimg.call_ok = self._VerifyNode(node_i, nresult)
2274 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2275 self._VerifyNodeNetwork(node_i, nresult)
2276 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2279 self._VerifyOob(node_i, nresult)
2282 self._VerifyNodeLVM(node_i, nresult, vg_name)
2283 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2286 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2287 self._UpdateNodeInstances(node_i, nresult, nimg)
2288 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2289 self._UpdateNodeOS(node_i, nresult, nimg)
2290 if not nimg.os_fail:
2291 if refos_img is None:
2293 self._VerifyNodeOS(node_i, nimg, refos_img)
2295 feedback_fn("* Verifying instance status")
2296 for instance in instancelist:
2298 feedback_fn("* Verifying instance %s" % instance)
2299 inst_config = instanceinfo[instance]
2300 self._VerifyInstance(instance, inst_config, node_image,
2302 inst_nodes_offline = []
2304 pnode = inst_config.primary_node
2305 pnode_img = node_image[pnode]
2306 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2307 self.ENODERPC, pnode, "instance %s, connection to"
2308 " primary node failed", instance)
2310 _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2311 "instance lives on offline node %s", inst_config.primary_node)
2313 # If the instance is non-redundant we cannot survive losing its primary
2314 # node, so we are not N+1 compliant. On the other hand we have no disk
2315 # templates with more than one secondary so that situation is not well
2317 # FIXME: does not support file-backed instances
2318 if not inst_config.secondary_nodes:
2319 i_non_redundant.append(instance)
2321 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2322 instance, "instance has multiple secondary nodes: %s",
2323 utils.CommaJoin(inst_config.secondary_nodes),
2324 code=self.ETYPE_WARNING)
2326 if inst_config.disk_template in constants.DTS_INT_MIRROR:
2327 pnode = inst_config.primary_node
2328 instance_nodes = utils.NiceSort(inst_config.all_nodes)
2329 instance_groups = {}
2331 for node in instance_nodes:
2332 instance_groups.setdefault(nodeinfo_byname[node].group,
2336 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2337 # Sort so that we always list the primary node first.
2338 for group, nodes in sorted(instance_groups.items(),
2339 key=lambda (_, nodes): pnode in nodes,
2342 self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2343 instance, "instance has primary and secondary nodes in"
2344 " different groups: %s", utils.CommaJoin(pretty_list),
2345 code=self.ETYPE_WARNING)
2347 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2348 i_non_a_balanced.append(instance)
2350 for snode in inst_config.secondary_nodes:
2351 s_img = node_image[snode]
2352 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2353 "instance %s, connection to secondary node failed", instance)
2356 inst_nodes_offline.append(snode)
2358 # warn that the instance lives on offline nodes
2359 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2360 "instance has offline secondary node(s) %s",
2361 utils.CommaJoin(inst_nodes_offline))
2362 # ... or ghost/non-vm_capable nodes
2363 for node in inst_config.all_nodes:
2364 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2365 "instance lives on ghost node %s", node)
2366 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2367 instance, "instance lives on non-vm_capable node %s", node)
2369 feedback_fn("* Verifying orphan volumes")
2370 reserved = utils.FieldSet(*cluster.reserved_lvs)
2371 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2373 feedback_fn("* Verifying orphan instances")
2374 self._VerifyOrphanInstances(instancelist, node_image)
2376 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2377 feedback_fn("* Verifying N+1 Memory redundancy")
2378 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2380 feedback_fn("* Other Notes")
2382 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2383 % len(i_non_redundant))
2385 if i_non_a_balanced:
2386 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2387 % len(i_non_a_balanced))
2390 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2393 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2397 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2398 """Analyze the post-hooks' result
2400 This method analyses the hook result, handles it, and sends some
2401 nicely-formatted feedback back to the user.
2403 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2404 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2405 @param hooks_results: the results of the multi-node hooks rpc call
2406 @param feedback_fn: function used send feedback back to the caller
2407 @param lu_result: previous Exec result
2408 @return: the new Exec result, based on the previous result
2412 # We only really run POST phase hooks, and are only interested in
2414 if phase == constants.HOOKS_PHASE_POST:
2415 # Used to change hooks' output to proper indentation
2416 feedback_fn("* Hooks Results")
2417 assert hooks_results, "invalid result from hooks"
2419 for node_name in hooks_results:
2420 res = hooks_results[node_name]
2422 test = msg and not res.offline
2423 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2424 "Communication failure in hooks execution: %s", msg)
2425 if res.offline or msg:
2426 # No need to investigate payload if node is offline or gave an error.
2427 # override manually lu_result here as _ErrorIf only
2428 # overrides self.bad
2431 for script, hkr, output in res.payload:
2432 test = hkr == constants.HKR_FAIL
2433 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2434 "Script %s failed, output:", script)
2436 output = self._HOOKS_INDENT_RE.sub(' ', output)
2437 feedback_fn("%s" % output)
2443 class LUClusterVerifyDisks(NoHooksLU):
2444 """Verifies the cluster disks status.
2449 def ExpandNames(self):
2450 self.needed_locks = {
2451 locking.LEVEL_NODE: locking.ALL_SET,
2452 locking.LEVEL_INSTANCE: locking.ALL_SET,
2454 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2456 def Exec(self, feedback_fn):
2457 """Verify integrity of cluster disks.
2459 @rtype: tuple of three items
2460 @return: a tuple of (dict of node-to-node_error, list of instances
2461 which need activate-disks, dict of instance: (node, volume) for
2465 result = res_nodes, res_instances, res_missing = {}, [], {}
2467 nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2468 instances = self.cfg.GetAllInstancesInfo().values()
2471 for inst in instances:
2473 if not inst.admin_up:
2475 inst.MapLVsByNode(inst_lvs)
2476 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2477 for node, vol_list in inst_lvs.iteritems():
2478 for vol in vol_list:
2479 nv_dict[(node, vol)] = inst
2484 node_lvs = self.rpc.call_lv_list(nodes, [])
2485 for node, node_res in node_lvs.items():
2486 if node_res.offline:
2488 msg = node_res.fail_msg
2490 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2491 res_nodes[node] = msg
2494 lvs = node_res.payload
2495 for lv_name, (_, _, lv_online) in lvs.items():
2496 inst = nv_dict.pop((node, lv_name), None)
2497 if (not lv_online and inst is not None
2498 and inst.name not in res_instances):
2499 res_instances.append(inst.name)
2501 # any leftover items in nv_dict are missing LVs, let's arrange the
2503 for key, inst in nv_dict.iteritems():
2504 if inst.name not in res_missing:
2505 res_missing[inst.name] = []
2506 res_missing[inst.name].append(key)
2511 class LUClusterRepairDiskSizes(NoHooksLU):
2512 """Verifies the cluster disks sizes.
2517 def ExpandNames(self):
2518 if self.op.instances:
2519 self.wanted_names = []
2520 for name in self.op.instances:
2521 full_name = _ExpandInstanceName(self.cfg, name)
2522 self.wanted_names.append(full_name)
2523 self.needed_locks = {
2524 locking.LEVEL_NODE: [],
2525 locking.LEVEL_INSTANCE: self.wanted_names,
2527 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2529 self.wanted_names = None
2530 self.needed_locks = {
2531 locking.LEVEL_NODE: locking.ALL_SET,
2532 locking.LEVEL_INSTANCE: locking.ALL_SET,
2534 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2536 def DeclareLocks(self, level):
2537 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2538 self._LockInstancesNodes(primary_only=True)
2540 def CheckPrereq(self):
2541 """Check prerequisites.
2543 This only checks the optional instance list against the existing names.
2546 if self.wanted_names is None:
2547 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2549 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2550 in self.wanted_names]
2552 def _EnsureChildSizes(self, disk):
2553 """Ensure children of the disk have the needed disk size.
2555 This is valid mainly for DRBD8 and fixes an issue where the
2556 children have smaller disk size.
2558 @param disk: an L{ganeti.objects.Disk} object
2561 if disk.dev_type == constants.LD_DRBD8:
2562 assert disk.children, "Empty children for DRBD8?"
2563 fchild = disk.children[0]
2564 mismatch = fchild.size < disk.size
2566 self.LogInfo("Child disk has size %d, parent %d, fixing",
2567 fchild.size, disk.size)
2568 fchild.size = disk.size
2570 # and we recurse on this child only, not on the metadev
2571 return self._EnsureChildSizes(fchild) or mismatch
2575 def Exec(self, feedback_fn):
2576 """Verify the size of cluster disks.
2579 # TODO: check child disks too
2580 # TODO: check differences in size between primary/secondary nodes
2582 for instance in self.wanted_instances:
2583 pnode = instance.primary_node
2584 if pnode not in per_node_disks:
2585 per_node_disks[pnode] = []
2586 for idx, disk in enumerate(instance.disks):
2587 per_node_disks[pnode].append((instance, idx, disk))
2590 for node, dskl in per_node_disks.items():
2591 newl = [v[2].Copy() for v in dskl]
2593 self.cfg.SetDiskID(dsk, node)
2594 result = self.rpc.call_blockdev_getsize(node, newl)
2596 self.LogWarning("Failure in blockdev_getsize call to node"
2597 " %s, ignoring", node)
2599 if len(result.payload) != len(dskl):
2600 logging.warning("Invalid result from node %s: len(dksl)=%d,"
2601 " result.payload=%s", node, len(dskl), result.payload)
2602 self.LogWarning("Invalid result from node %s, ignoring node results",
2605 for ((instance, idx, disk), size) in zip(dskl, result.payload):
2607 self.LogWarning("Disk %d of instance %s did not return size"
2608 " information, ignoring", idx, instance.name)
2610 if not isinstance(size, (int, long)):
2611 self.LogWarning("Disk %d of instance %s did not return valid"
2612 " size information, ignoring", idx, instance.name)
2615 if size != disk.size:
2616 self.LogInfo("Disk %d of instance %s has mismatched size,"
2617 " correcting: recorded %d, actual %d", idx,
2618 instance.name, disk.size, size)
2620 self.cfg.Update(instance, feedback_fn)
2621 changed.append((instance.name, idx, size))
2622 if self._EnsureChildSizes(disk):
2623 self.cfg.Update(instance, feedback_fn)
2624 changed.append((instance.name, idx, disk.size))
2628 class LUClusterRename(LogicalUnit):
2629 """Rename the cluster.
2632 HPATH = "cluster-rename"
2633 HTYPE = constants.HTYPE_CLUSTER
2635 def BuildHooksEnv(self):
2640 "OP_TARGET": self.cfg.GetClusterName(),
2641 "NEW_NAME": self.op.name,
2643 mn = self.cfg.GetMasterNode()
2644 all_nodes = self.cfg.GetNodeList()
2645 return env, [mn], all_nodes
2647 def CheckPrereq(self):
2648 """Verify that the passed name is a valid one.
2651 hostname = netutils.GetHostname(name=self.op.name,
2652 family=self.cfg.GetPrimaryIPFamily())
2654 new_name = hostname.name
2655 self.ip = new_ip = hostname.ip
2656 old_name = self.cfg.GetClusterName()
2657 old_ip = self.cfg.GetMasterIP()
2658 if new_name == old_name and new_ip == old_ip:
2659 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2660 " cluster has changed",
2662 if new_ip != old_ip:
2663 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2664 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2665 " reachable on the network" %
2666 new_ip, errors.ECODE_NOTUNIQUE)
2668 self.op.name = new_name
2670 def Exec(self, feedback_fn):
2671 """Rename the cluster.
2674 clustername = self.op.name
2677 # shutdown the master IP
2678 master = self.cfg.GetMasterNode()
2679 result = self.rpc.call_node_stop_master(master, False)
2680 result.Raise("Could not disable the master role")
2683 cluster = self.cfg.GetClusterInfo()
2684 cluster.cluster_name = clustername
2685 cluster.master_ip = ip
2686 self.cfg.Update(cluster, feedback_fn)
2688 # update the known hosts file
2689 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2690 node_list = self.cfg.GetOnlineNodeList()
2692 node_list.remove(master)
2695 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2697 result = self.rpc.call_node_start_master(master, False, False)
2698 msg = result.fail_msg
2700 self.LogWarning("Could not re-enable the master role on"
2701 " the master, please restart manually: %s", msg)
2706 class LUClusterSetParams(LogicalUnit):
2707 """Change the parameters of the cluster.
2710 HPATH = "cluster-modify"
2711 HTYPE = constants.HTYPE_CLUSTER
2714 def CheckArguments(self):
2718 if self.op.uid_pool:
2719 uidpool.CheckUidPool(self.op.uid_pool)
2721 if self.op.add_uids:
2722 uidpool.CheckUidPool(self.op.add_uids)
2724 if self.op.remove_uids:
2725 uidpool.CheckUidPool(self.op.remove_uids)
2727 def ExpandNames(self):
2728 # FIXME: in the future maybe other cluster params won't require checking on
2729 # all nodes to be modified.
2730 self.needed_locks = {
2731 locking.LEVEL_NODE: locking.ALL_SET,
2733 self.share_locks[locking.LEVEL_NODE] = 1
2735 def BuildHooksEnv(self):
2740 "OP_TARGET": self.cfg.GetClusterName(),
2741 "NEW_VG_NAME": self.op.vg_name,
2743 mn = self.cfg.GetMasterNode()
2744 return env, [mn], [mn]
2746 def CheckPrereq(self):
2747 """Check prerequisites.
2749 This checks whether the given params don't conflict and
2750 if the given volume group is valid.
2753 if self.op.vg_name is not None and not self.op.vg_name:
2754 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2755 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2756 " instances exist", errors.ECODE_INVAL)
2758 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2759 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2760 raise errors.OpPrereqError("Cannot disable drbd helper while"
2761 " drbd-based instances exist",
2764 node_list = self.acquired_locks[locking.LEVEL_NODE]
2766 # if vg_name not None, checks given volume group on all nodes
2768 vglist = self.rpc.call_vg_list(node_list)
2769 for node in node_list:
2770 msg = vglist[node].fail_msg
2772 # ignoring down node
2773 self.LogWarning("Error while gathering data on node %s"
2774 " (ignoring node): %s", node, msg)
2776 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2778 constants.MIN_VG_SIZE)
2780 raise errors.OpPrereqError("Error on node '%s': %s" %
2781 (node, vgstatus), errors.ECODE_ENVIRON)
2783 if self.op.drbd_helper:
2784 # checks given drbd helper on all nodes
2785 helpers = self.rpc.call_drbd_helper(node_list)
2786 for node in node_list:
2787 ninfo = self.cfg.GetNodeInfo(node)
2789 self.LogInfo("Not checking drbd helper on offline node %s", node)
2791 msg = helpers[node].fail_msg
2793 raise errors.OpPrereqError("Error checking drbd helper on node"
2794 " '%s': %s" % (node, msg),
2795 errors.ECODE_ENVIRON)
2796 node_helper = helpers[node].payload
2797 if node_helper != self.op.drbd_helper:
2798 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2799 (node, node_helper), errors.ECODE_ENVIRON)
2801 self.cluster = cluster = self.cfg.GetClusterInfo()
2802 # validate params changes
2803 if self.op.beparams:
2804 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2805 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2807 if self.op.ndparams:
2808 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2809 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2811 if self.op.nicparams:
2812 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2813 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2814 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2817 # check all instances for consistency
2818 for instance in self.cfg.GetAllInstancesInfo().values():
2819 for nic_idx, nic in enumerate(instance.nics):
2820 params_copy = copy.deepcopy(nic.nicparams)
2821 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2823 # check parameter syntax
2825 objects.NIC.CheckParameterSyntax(params_filled)
2826 except errors.ConfigurationError, err:
2827 nic_errors.append("Instance %s, nic/%d: %s" %
2828 (instance.name, nic_idx, err))
2830 # if we're moving instances to routed, check that they have an ip
2831 target_mode = params_filled[constants.NIC_MODE]
2832 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2833 nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2834 (instance.name, nic_idx))
2836 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2837 "\n".join(nic_errors))
2839 # hypervisor list/parameters
2840 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2841 if self.op.hvparams:
2842 for hv_name, hv_dict in self.op.hvparams.items():
2843 if hv_name not in self.new_hvparams:
2844 self.new_hvparams[hv_name] = hv_dict
2846 self.new_hvparams[hv_name].update(hv_dict)
2848 # os hypervisor parameters
2849 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2851 for os_name, hvs in self.op.os_hvp.items():
2852 if os_name not in self.new_os_hvp:
2853 self.new_os_hvp[os_name] = hvs
2855 for hv_name, hv_dict in hvs.items():
2856 if hv_name not in self.new_os_hvp[os_name]:
2857 self.new_os_hvp[os_name][hv_name] = hv_dict
2859 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2862 self.new_osp = objects.FillDict(cluster.osparams, {})
2863 if self.op.osparams:
2864 for os_name, osp in self.op.osparams.items():
2865 if os_name not in self.new_osp:
2866 self.new_osp[os_name] = {}
2868 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2871 if not self.new_osp[os_name]:
2872 # we removed all parameters
2873 del self.new_osp[os_name]
2875 # check the parameter validity (remote check)
2876 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2877 os_name, self.new_osp[os_name])
2879 # changes to the hypervisor list
2880 if self.op.enabled_hypervisors is not None:
2881 self.hv_list = self.op.enabled_hypervisors
2882 for hv in self.hv_list:
2883 # if the hypervisor doesn't already exist in the cluster
2884 # hvparams, we initialize it to empty, and then (in both
2885 # cases) we make sure to fill the defaults, as we might not
2886 # have a complete defaults list if the hypervisor wasn't
2888 if hv not in new_hvp:
2890 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2891 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2893 self.hv_list = cluster.enabled_hypervisors
2895 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2896 # either the enabled list has changed, or the parameters have, validate
2897 for hv_name, hv_params in self.new_hvparams.items():
2898 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2899 (self.op.enabled_hypervisors and
2900 hv_name in self.op.enabled_hypervisors)):
2901 # either this is a new hypervisor, or its parameters have changed
2902 hv_class = hypervisor.GetHypervisor(hv_name)
2903 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2904 hv_class.CheckParameterSyntax(hv_params)
2905 _CheckHVParams(self, node_list, hv_name, hv_params)
2908 # no need to check any newly-enabled hypervisors, since the
2909 # defaults have already been checked in the above code-block
2910 for os_name, os_hvp in self.new_os_hvp.items():
2911 for hv_name, hv_params in os_hvp.items():
2912 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2913 # we need to fill in the new os_hvp on top of the actual hv_p
2914 cluster_defaults = self.new_hvparams.get(hv_name, {})
2915 new_osp = objects.FillDict(cluster_defaults, hv_params)
2916 hv_class = hypervisor.GetHypervisor(hv_name)
2917 hv_class.CheckParameterSyntax(new_osp)
2918 _CheckHVParams(self, node_list, hv_name, new_osp)
2920 if self.op.default_iallocator:
2921 alloc_script = utils.FindFile(self.op.default_iallocator,
2922 constants.IALLOCATOR_SEARCH_PATH,
2924 if alloc_script is None:
2925 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2926 " specified" % self.op.default_iallocator,
2929 def Exec(self, feedback_fn):
2930 """Change the parameters of the cluster.
2933 if self.op.vg_name is not None:
2934 new_volume = self.op.vg_name
2937 if new_volume != self.cfg.GetVGName():
2938 self.cfg.SetVGName(new_volume)
2940 feedback_fn("Cluster LVM configuration already in desired"
2941 " state, not changing")
2942 if self.op.drbd_helper is not None:
2943 new_helper = self.op.drbd_helper
2946 if new_helper != self.cfg.GetDRBDHelper():
2947 self.cfg.SetDRBDHelper(new_helper)
2949 feedback_fn("Cluster DRBD helper already in desired state,"
2951 if self.op.hvparams:
2952 self.cluster.hvparams = self.new_hvparams
2954 self.cluster.os_hvp = self.new_os_hvp
2955 if self.op.enabled_hypervisors is not None:
2956 self.cluster.hvparams = self.new_hvparams
2957 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2958 if self.op.beparams:
2959 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2960 if self.op.nicparams:
2961 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2962 if self.op.osparams:
2963 self.cluster.osparams = self.new_osp
2964 if self.op.ndparams:
2965 self.cluster.ndparams = self.new_ndparams
2967 if self.op.candidate_pool_size is not None:
2968 self.cluster.candidate_pool_size = self.op.candidate_pool_size
2969 # we need to update the pool size here, otherwise the save will fail
2970 _AdjustCandidatePool(self, [])
2972 if self.op.maintain_node_health is not None:
2973 self.cluster.maintain_node_health = self.op.maintain_node_health
2975 if self.op.prealloc_wipe_disks is not None:
2976 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2978 if self.op.add_uids is not None:
2979 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2981 if self.op.remove_uids is not None:
2982 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2984 if self.op.uid_pool is not None:
2985 self.cluster.uid_pool = self.op.uid_pool
2987 if self.op.default_iallocator is not None:
2988 self.cluster.default_iallocator = self.op.default_iallocator
2990 if self.op.reserved_lvs is not None:
2991 self.cluster.reserved_lvs = self.op.reserved_lvs
2993 def helper_os(aname, mods, desc):
2995 lst = getattr(self.cluster, aname)
2996 for key, val in mods:
2997 if key == constants.DDM_ADD:
2999 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3002 elif key == constants.DDM_REMOVE:
3006 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3008 raise errors.ProgrammerError("Invalid modification '%s'" % key)
3010 if self.op.hidden_os:
3011 helper_os("hidden_os", self.op.hidden_os, "hidden")
3013 if self.op.blacklisted_os:
3014 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3016 if self.op.master_netdev:
3017 master = self.cfg.GetMasterNode()
3018 feedback_fn("Shutting down master ip on the current netdev (%s)" %
3019 self.cluster.master_netdev)
3020 result = self.rpc.call_node_stop_master(master, False)
3021 result.Raise("Could not disable the master ip")
3022 feedback_fn("Changing master_netdev from %s to %s" %
3023 (self.cluster.master_netdev, self.op.master_netdev))
3024 self.cluster.master_netdev = self.op.master_netdev
3026 self.cfg.Update(self.cluster, feedback_fn)
3028 if self.op.master_netdev:
3029 feedback_fn("Starting the master ip on the new master netdev (%s)" %
3030 self.op.master_netdev)
3031 result = self.rpc.call_node_start_master(master, False, False)
3033 self.LogWarning("Could not re-enable the master ip on"
3034 " the master, please restart manually: %s",
3038 def _UploadHelper(lu, nodes, fname):
3039 """Helper for uploading a file and showing warnings.
3042 if os.path.exists(fname):
3043 result = lu.rpc.call_upload_file(nodes, fname)
3044 for to_node, to_result in result.items():
3045 msg = to_result.fail_msg
3047 msg = ("Copy of file %s to node %s failed: %s" %
3048 (fname, to_node, msg))
3049 lu.proc.LogWarning(msg)
3052 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3053 """Distribute additional files which are part of the cluster configuration.
3055 ConfigWriter takes care of distributing the config and ssconf files, but
3056 there are more files which should be distributed to all nodes. This function
3057 makes sure those are copied.
3059 @param lu: calling logical unit
3060 @param additional_nodes: list of nodes not in the config to distribute to
3061 @type additional_vm: boolean
3062 @param additional_vm: whether the additional nodes are vm-capable or not
3065 # 1. Gather target nodes
3066 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3067 dist_nodes = lu.cfg.GetOnlineNodeList()
3068 nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3069 vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3070 if additional_nodes is not None:
3071 dist_nodes.extend(additional_nodes)
3073 vm_nodes.extend(additional_nodes)
3074 if myself.name in dist_nodes:
3075 dist_nodes.remove(myself.name)
3076 if myself.name in vm_nodes:
3077 vm_nodes.remove(myself.name)
3079 # 2. Gather files to distribute
3080 dist_files = set([constants.ETC_HOSTS,
3081 constants.SSH_KNOWN_HOSTS_FILE,
3082 constants.RAPI_CERT_FILE,
3083 constants.RAPI_USERS_FILE,
3084 constants.CONFD_HMAC_KEY,
3085 constants.CLUSTER_DOMAIN_SECRET_FILE,
3089 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3090 for hv_name in enabled_hypervisors:
3091 hv_class = hypervisor.GetHypervisor(hv_name)
3092 vm_files.update(hv_class.GetAncillaryFiles())
3094 # 3. Perform the files upload
3095 for fname in dist_files:
3096 _UploadHelper(lu, dist_nodes, fname)
3097 for fname in vm_files:
3098 _UploadHelper(lu, vm_nodes, fname)
3101 class LUClusterRedistConf(NoHooksLU):
3102 """Force the redistribution of cluster configuration.
3104 This is a very simple LU.
3109 def ExpandNames(self):
3110 self.needed_locks = {
3111 locking.LEVEL_NODE: locking.ALL_SET,
3113 self.share_locks[locking.LEVEL_NODE] = 1
3115 def Exec(self, feedback_fn):
3116 """Redistribute the configuration.
3119 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3120 _RedistributeAncillaryFiles(self)
3123 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3124 """Sleep and poll for an instance's disk to sync.
3127 if not instance.disks or disks is not None and not disks:
3130 disks = _ExpandCheckDisks(instance, disks)
3133 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3135 node = instance.primary_node
3138 lu.cfg.SetDiskID(dev, node)
3140 # TODO: Convert to utils.Retry
3143 degr_retries = 10 # in seconds, as we sleep 1 second each time
3147 cumul_degraded = False
3148 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3149 msg = rstats.fail_msg
3151 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3154 raise errors.RemoteError("Can't contact node %s for mirror data,"
3155 " aborting." % node)
3158 rstats = rstats.payload
3160 for i, mstat in enumerate(rstats):
3162 lu.LogWarning("Can't compute data for node %s/%s",
3163 node, disks[i].iv_name)
3166 cumul_degraded = (cumul_degraded or
3167 (mstat.is_degraded and mstat.sync_percent is None))
3168 if mstat.sync_percent is not None:
3170 if mstat.estimated_time is not None:
3171 rem_time = ("%s remaining (estimated)" %
3172 utils.FormatSeconds(mstat.estimated_time))
3173 max_time = mstat.estimated_time
3175 rem_time = "no time estimate"
3176 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3177 (disks[i].iv_name, mstat.sync_percent, rem_time))
3179 # if we're done but degraded, let's do a few small retries, to
3180 # make sure we see a stable and not transient situation; therefore
3181 # we force restart of the loop
3182 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3183 logging.info("Degraded disks found, %d retries left", degr_retries)
3191 time.sleep(min(60, max_time))
3194 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3195 return not cumul_degraded
3198 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3199 """Check that mirrors are not degraded.
3201 The ldisk parameter, if True, will change the test from the
3202 is_degraded attribute (which represents overall non-ok status for
3203 the device(s)) to the ldisk (representing the local storage status).
3206 lu.cfg.SetDiskID(dev, node)
3210 if on_primary or dev.AssembleOnSecondary():
3211 rstats = lu.rpc.call_blockdev_find(node, dev)
3212 msg = rstats.fail_msg
3214 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3216 elif not rstats.payload:
3217 lu.LogWarning("Can't find disk on node %s", node)
3221 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3223 result = result and not rstats.payload.is_degraded
3226 for child in dev.children:
3227 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3232 class LUOobCommand(NoHooksLU):
3233 """Logical unit for OOB handling.
3237 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3239 def CheckPrereq(self):
3240 """Check prerequisites.
3243 - the node exists in the configuration
3246 Any errors are signaled by raising errors.OpPrereqError.
3250 self.master_node = self.cfg.GetMasterNode()
3252 if self.op.node_names:
3253 if self.op.command in self._SKIP_MASTER:
3254 if self.master_node in self.op.node_names:
3255 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3256 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3258 if master_oob_handler:
3259 additional_text = ("Run '%s %s %s' if you want to operate on the"
3260 " master regardless") % (master_oob_handler,
3264 additional_text = "The master node does not support out-of-band"
3266 raise errors.OpPrereqError(("Operating on the master node %s is not"
3267 " allowed for %s\n%s") %
3268 (self.master_node, self.op.command,
3269 additional_text), errors.ECODE_INVAL)
3271 self.op.node_names = self.cfg.GetNodeList()
3272 if self.op.command in self._SKIP_MASTER:
3273 self.op.node_names.remove(self.master_node)
3275 if self.op.command in self._SKIP_MASTER:
3276 assert self.master_node not in self.op.node_names
3278 for node_name in self.op.node_names:
3279 node = self.cfg.GetNodeInfo(node_name)
3282 raise errors.OpPrereqError("Node %s not found" % node_name,
3285 self.nodes.append(node)
3287 if (not self.op.ignore_status and
3288 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3289 raise errors.OpPrereqError(("Cannot power off node %s because it is"
3290 " not marked offline") % node_name,
3293 def ExpandNames(self):
3294 """Gather locks we need.
3297 if self.op.node_names:
3298 self.op.node_names = [_ExpandNodeName(self.cfg, name)
3299 for name in self.op.node_names]
3300 lock_names = self.op.node_names
3302 lock_names = locking.ALL_SET
3304 self.needed_locks = {
3305 locking.LEVEL_NODE: lock_names,
3308 def Exec(self, feedback_fn):
3309 """Execute OOB and return result if we expect any.
3312 master_node = self.master_node
3315 for node in self.nodes:
3316 node_entry = [(constants.RS_NORMAL, node.name)]
3317 ret.append(node_entry)
3319 oob_program = _SupportsOob(self.cfg, node)
3322 node_entry.append((constants.RS_UNAVAIL, None))
3325 logging.info("Executing out-of-band command '%s' using '%s' on %s",
3326 self.op.command, oob_program, node.name)
3327 result = self.rpc.call_run_oob(master_node, oob_program,
3328 self.op.command, node.name,
3332 self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3333 node.name, result.fail_msg)
3334 node_entry.append((constants.RS_NODATA, None))
3337 self._CheckPayload(result)
3338 except errors.OpExecError, err:
3339 self.LogWarning("The payload returned by '%s' is not valid: %s",
3341 node_entry.append((constants.RS_NODATA, None))
3343 if self.op.command == constants.OOB_HEALTH:
3344 # For health we should log important events
3345 for item, status in result.payload:
3346 if status in [constants.OOB_STATUS_WARNING,
3347 constants.OOB_STATUS_CRITICAL]:
3348 self.LogWarning("On node '%s' item '%s' has status '%s'",
3349 node.name, item, status)
3351 if self.op.command == constants.OOB_POWER_ON:
3353 elif self.op.command == constants.OOB_POWER_OFF:
3354 node.powered = False
3355 elif self.op.command == constants.OOB_POWER_STATUS:
3356 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3357 if powered != node.powered:
3358 logging.warning(("Recorded power state (%s) of node '%s' does not"
3359 " match actual power state (%s)"), node.powered,
3362 # For configuration changing commands we should update the node
3363 if self.op.command in (constants.OOB_POWER_ON,
3364 constants.OOB_POWER_OFF):
3365 self.cfg.Update(node, feedback_fn)
3367 node_entry.append((constants.RS_NORMAL, result.payload))
3371 def _CheckPayload(self, result):
3372 """Checks if the payload is valid.
3374 @param result: RPC result
3375 @raises errors.OpExecError: If payload is not valid
3379 if self.op.command == constants.OOB_HEALTH:
3380 if not isinstance(result.payload, list):
3381 errs.append("command 'health' is expected to return a list but got %s" %
3382 type(result.payload))
3384 for item, status in result.payload:
3385 if status not in constants.OOB_STATUSES:
3386 errs.append("health item '%s' has invalid status '%s'" %
3389 if self.op.command == constants.OOB_POWER_STATUS:
3390 if not isinstance(result.payload, dict):
3391 errs.append("power-status is expected to return a dict but got %s" %
3392 type(result.payload))
3394 if self.op.command in [
3395 constants.OOB_POWER_ON,
3396 constants.OOB_POWER_OFF,
3397 constants.OOB_POWER_CYCLE,
3399 if result.payload is not None:
3400 errs.append("%s is expected to not return payload but got '%s'" %
3401 (self.op.command, result.payload))
3404 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3405 utils.CommaJoin(errs))
3409 class LUOsDiagnose(NoHooksLU):
3410 """Logical unit for OS diagnose/query.
3415 _BLK = "blacklisted"
3417 _FIELDS_STATIC = utils.FieldSet()
3418 _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3419 "parameters", "api_versions", _HID, _BLK)
3421 def CheckArguments(self):
3423 raise errors.OpPrereqError("Selective OS query not supported",
3426 _CheckOutputFields(static=self._FIELDS_STATIC,
3427 dynamic=self._FIELDS_DYNAMIC,
3428 selected=self.op.output_fields)
3430 def ExpandNames(self):
3431 # Lock all nodes, in shared mode
3432 # Temporary removal of locks, should be reverted later
3433 # TODO: reintroduce locks when they are lighter-weight
3434 self.needed_locks = {}
3435 #self.share_locks[locking.LEVEL_NODE] = 1
3436 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3439 def _DiagnoseByOS(rlist):
3440 """Remaps a per-node return list into an a per-os per-node dictionary
3442 @param rlist: a map with node names as keys and OS objects as values
3445 @return: a dictionary with osnames as keys and as value another
3446 map, with nodes as keys and tuples of (path, status, diagnose,
3447 variants, parameters, api_versions) as values, eg::
3449 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3450 (/srv/..., False, "invalid api")],
3451 "node2": [(/srv/..., True, "", [], [])]}
3456 # we build here the list of nodes that didn't fail the RPC (at RPC
3457 # level), so that nodes with a non-responding node daemon don't
3458 # make all OSes invalid
3459 good_nodes = [node_name for node_name in rlist
3460 if not rlist[node_name].fail_msg]
3461 for node_name, nr in rlist.items():
3462 if nr.fail_msg or not nr.payload:
3464 for (name, path, status, diagnose, variants,
3465 params, api_versions) in nr.payload:
3466 if name not in all_os:
3467 # build a list of nodes for this os containing empty lists
3468 # for each node in node_list
3470 for nname in good_nodes:
3471 all_os[name][nname] = []
3472 # convert params from [name, help] to (name, help)
3473 params = [tuple(v) for v in params]
3474 all_os[name][node_name].append((path, status, diagnose,
3475 variants, params, api_versions))
3478 def Exec(self, feedback_fn):
3479 """Compute the list of OSes.
3482 valid_nodes = [node.name
3483 for node in self.cfg.GetAllNodesInfo().values()
3484 if not node.offline and node.vm_capable]
3485 node_data = self.rpc.call_os_diagnose(valid_nodes)
3486 pol = self._DiagnoseByOS(node_data)
3488 cluster = self.cfg.GetClusterInfo()
3490 for os_name in utils.NiceSort(pol.keys()):
3491 os_data = pol[os_name]
3494 (variants, params, api_versions) = null_state = (set(), set(), set())
3495 for idx, osl in enumerate(os_data.values()):
3496 valid = bool(valid and osl and osl[0][1])
3498 (variants, params, api_versions) = null_state
3500 node_variants, node_params, node_api = osl[0][3:6]
3501 if idx == 0: # first entry
3502 variants = set(node_variants)
3503 params = set(node_params)
3504 api_versions = set(node_api)
3505 else: # keep consistency
3506 variants.intersection_update(node_variants)
3507 params.intersection_update(node_params)
3508 api_versions.intersection_update(node_api)
3510 is_hid = os_name in cluster.hidden_os
3511 is_blk = os_name in cluster.blacklisted_os
3512 if ((self._HID not in self.op.output_fields and is_hid) or
3513 (self._BLK not in self.op.output_fields and is_blk) or
3514 (self._VLD not in self.op.output_fields and not valid)):
3517 for field in self.op.output_fields:
3520 elif field == self._VLD:
3522 elif field == "node_status":
3523 # this is just a copy of the dict
3525 for node_name, nos_list in os_data.items():
3526 val[node_name] = nos_list
3527 elif field == "variants":
3528 val = utils.NiceSort(list(variants))
3529 elif field == "parameters":
3531 elif field == "api_versions":
3532 val = list(api_versions)
3533 elif field == self._HID:
3535 elif field == self._BLK:
3538 raise errors.ParameterError(field)
3545 class LUNodeRemove(LogicalUnit):
3546 """Logical unit for removing a node.
3549 HPATH = "node-remove"
3550 HTYPE = constants.HTYPE_NODE
3552 def BuildHooksEnv(self):
3555 This doesn't run on the target node in the pre phase as a failed
3556 node would then be impossible to remove.
3560 "OP_TARGET": self.op.node_name,
3561 "NODE_NAME": self.op.node_name,
3563 all_nodes = self.cfg.GetNodeList()
3565 all_nodes.remove(self.op.node_name)
3567 logging.warning("Node %s which is about to be removed not found"
3568 " in the all nodes list", self.op.node_name)
3569 return env, all_nodes, all_nodes
3571 def CheckPrereq(self):
3572 """Check prerequisites.
3575 - the node exists in the configuration
3576 - it does not have primary or secondary instances
3577 - it's not the master
3579 Any errors are signaled by raising errors.OpPrereqError.
3582 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3583 node = self.cfg.GetNodeInfo(self.op.node_name)
3584 assert node is not None
3586 instance_list = self.cfg.GetInstanceList()
3588 masternode = self.cfg.GetMasterNode()
3589 if node.name == masternode:
3590 raise errors.OpPrereqError("Node is the master node,"
3591 " you need to failover first.",
3594 for instance_name in instance_list:
3595 instance = self.cfg.GetInstanceInfo(instance_name)
3596 if node.name in instance.all_nodes:
3597 raise errors.OpPrereqError("Instance %s is still running on the node,"
3598 " please remove first." % instance_name,
3600 self.op.node_name = node.name
3603 def Exec(self, feedback_fn):
3604 """Removes the node from the cluster.
3608 logging.info("Stopping the node daemon and removing configs from node %s",
3611 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3613 # Promote nodes to master candidate as needed
3614 _AdjustCandidatePool(self, exceptions=[node.name])
3615 self.context.RemoveNode(node.name)
3617 # Run post hooks on the node before it's removed
3618 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3620 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3622 # pylint: disable-msg=W0702
3623 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3625 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3626 msg = result.fail_msg
3628 self.LogWarning("Errors encountered on the remote node while leaving"
3629 " the cluster: %s", msg)
3631 # Remove node from our /etc/hosts
3632 if self.cfg.GetClusterInfo().modify_etc_hosts:
3633 master_node = self.cfg.GetMasterNode()
3634 result = self.rpc.call_etc_hosts_modify(master_node,
3635 constants.ETC_HOSTS_REMOVE,
3637 result.Raise("Can't update hosts file with new host data")
3638 _RedistributeAncillaryFiles(self)
3641 class _NodeQuery(_QueryBase):
3642 FIELDS = query.NODE_FIELDS
3644 def ExpandNames(self, lu):
3645 lu.needed_locks = {}
3646 lu.share_locks[locking.LEVEL_NODE] = 1
3649 self.wanted = _GetWantedNodes(lu, self.names)
3651 self.wanted = locking.ALL_SET
3653 self.do_locking = (self.use_locking and
3654 query.NQ_LIVE in self.requested_data)
3657 # if we don't request only static fields, we need to lock the nodes
3658 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3660 def DeclareLocks(self, lu, level):
3663 def _GetQueryData(self, lu):
3664 """Computes the list of nodes and their attributes.
3667 all_info = lu.cfg.GetAllNodesInfo()
3669 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3671 # Gather data as requested
3672 if query.NQ_LIVE in self.requested_data:
3673 # filter out non-vm_capable nodes
3674 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3676 node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3677 lu.cfg.GetHypervisorType())
3678 live_data = dict((name, nresult.payload)
3679 for (name, nresult) in node_data.items()
3680 if not nresult.fail_msg and nresult.payload)
3684 if query.NQ_INST in self.requested_data:
3685 node_to_primary = dict([(name, set()) for name in nodenames])
3686 node_to_secondary = dict([(name, set()) for name in nodenames])
3688 inst_data = lu.cfg.GetAllInstancesInfo()
3690 for inst in inst_data.values():
3691 if inst.primary_node in node_to_primary:
3692 node_to_primary[inst.primary_node].add(inst.name)
3693 for secnode in inst.secondary_nodes:
3694 if secnode in node_to_secondary:
3695 node_to_secondary[secnode].add(inst.name)
3697 node_to_primary = None
3698 node_to_secondary = None
3700 if query.NQ_OOB in self.requested_data:
3701 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3702 for name, node in all_info.iteritems())
3706 if query.NQ_GROUP in self.requested_data:
3707 groups = lu.cfg.GetAllNodeGroupsInfo()
3711 return query.NodeQueryData([all_info[name] for name in nodenames],
3712 live_data, lu.cfg.GetMasterNode(),
3713 node_to_primary, node_to_secondary, groups,
3714 oob_support, lu.cfg.GetClusterInfo())
3717 class LUNodeQuery(NoHooksLU):
3718 """Logical unit for querying nodes.
3721 # pylint: disable-msg=W0142
3724 def CheckArguments(self):
3725 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
3726 self.op.output_fields, self.op.use_locking)
3728 def ExpandNames(self):
3729 self.nq.ExpandNames(self)
3731 def Exec(self, feedback_fn):
3732 return self.nq.OldStyleQuery(self)
3735 class LUNodeQueryvols(NoHooksLU):
3736 """Logical unit for getting volumes on node(s).
3740 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3741 _FIELDS_STATIC = utils.FieldSet("node")
3743 def CheckArguments(self):
3744 _CheckOutputFields(static=self._FIELDS_STATIC,
3745 dynamic=self._FIELDS_DYNAMIC,
3746 selected=self.op.output_fields)
3748 def ExpandNames(self):
3749 self.needed_locks = {}
3750 self.share_locks[locking.LEVEL_NODE] = 1
3751 if not self.op.nodes:
3752 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3754 self.needed_locks[locking.LEVEL_NODE] = \
3755 _GetWantedNodes(self, self.op.nodes)
3757 def Exec(self, feedback_fn):
3758 """Computes the list of nodes and their attributes.
3761 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3762 volumes = self.rpc.call_node_volumes(nodenames)
3764 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3765 in self.cfg.GetInstanceList()]
3767 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3770 for node in nodenames:
3771 nresult = volumes[node]
3774 msg = nresult.fail_msg
3776 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3779 node_vols = nresult.payload[:]
3780 node_vols.sort(key=lambda vol: vol['dev'])
3782 for vol in node_vols:
3784 for field in self.op.output_fields:
3787 elif field == "phys":
3791 elif field == "name":
3793 elif field == "size":
3794 val = int(float(vol['size']))
3795 elif field == "instance":
3797 if node not in lv_by_node[inst]:
3799 if vol['name'] in lv_by_node[inst][node]:
3805 raise errors.ParameterError(field)
3806 node_output.append(str(val))
3808 output.append(node_output)
3813 class LUNodeQueryStorage(NoHooksLU):
3814 """Logical unit for getting information on storage units on node(s).
3817 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3820 def CheckArguments(self):
3821 _CheckOutputFields(static=self._FIELDS_STATIC,
3822 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3823 selected=self.op.output_fields)
3825 def ExpandNames(self):
3826 self.needed_locks = {}
3827 self.share_locks[locking.LEVEL_NODE] = 1
3830 self.needed_locks[locking.LEVEL_NODE] = \
3831 _GetWantedNodes(self, self.op.nodes)
3833 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3835 def Exec(self, feedback_fn):
3836 """Computes the list of nodes and their attributes.
3839 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3841 # Always get name to sort by
3842 if constants.SF_NAME in self.op.output_fields:
3843 fields = self.op.output_fields[:]
3845 fields = [constants.SF_NAME] + self.op.output_fields
3847 # Never ask for node or type as it's only known to the LU
3848 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3849 while extra in fields:
3850 fields.remove(extra)
3852 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3853 name_idx = field_idx[constants.SF_NAME]
3855 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3856 data = self.rpc.call_storage_list(self.nodes,
3857 self.op.storage_type, st_args,
3858 self.op.name, fields)
3862 for node in utils.NiceSort(self.nodes):
3863 nresult = data[node]
3867 msg = nresult.fail_msg
3869 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3872 rows = dict([(row[name_idx], row) for row in nresult.payload])
3874 for name in utils.NiceSort(rows.keys()):
3879 for field in self.op.output_fields:
3880 if field == constants.SF_NODE:
3882 elif field == constants.SF_TYPE:
3883 val = self.op.storage_type
3884 elif field in field_idx:
3885 val = row[field_idx[field]]
3887 raise errors.ParameterError(field)
3896 class _InstanceQuery(_QueryBase):
3897 FIELDS = query.INSTANCE_FIELDS
3899 def ExpandNames(self, lu):
3900 lu.needed_locks = {}
3901 lu.share_locks[locking.LEVEL_INSTANCE] = 1
3902 lu.share_locks[locking.LEVEL_NODE] = 1
3905 self.wanted = _GetWantedInstances(lu, self.names)
3907 self.wanted = locking.ALL_SET
3909 self.do_locking = (self.use_locking and
3910 query.IQ_LIVE in self.requested_data)
3912 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3913 lu.needed_locks[locking.LEVEL_NODE] = []
3914 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3916 def DeclareLocks(self, lu, level):
3917 if level == locking.LEVEL_NODE and self.do_locking:
3918 lu._LockInstancesNodes() # pylint: disable-msg=W0212
3920 def _GetQueryData(self, lu):
3921 """Computes the list of instances and their attributes.
3924 cluster = lu.cfg.GetClusterInfo()
3925 all_info = lu.cfg.GetAllInstancesInfo()
3927 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3929 instance_list = [all_info[name] for name in instance_names]
3930 nodes = frozenset(itertools.chain(*(inst.all_nodes
3931 for inst in instance_list)))
3932 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3935 wrongnode_inst = set()
3937 # Gather data as requested
3938 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3940 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3942 result = node_data[name]
3944 # offline nodes will be in both lists
3945 assert result.fail_msg
3946 offline_nodes.append(name)
3948 bad_nodes.append(name)
3949 elif result.payload:
3950 for inst in result.payload:
3951 if all_info[inst].primary_node == name:
3952 live_data.update(result.payload)
3954 wrongnode_inst.add(inst)
3955 # else no instance is alive
3959 if query.IQ_DISKUSAGE in self.requested_data:
3960 disk_usage = dict((inst.name,
3961 _ComputeDiskSize(inst.disk_template,
3962 [{"size": disk.size}
3963 for disk in inst.disks]))
3964 for inst in instance_list)
3968 if query.IQ_CONSOLE in self.requested_data:
3970 for inst in instance_list:
3971 if inst.name in live_data:
3972 # Instance is running
3973 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3975 consinfo[inst.name] = None
3976 assert set(consinfo.keys()) == set(instance_names)
3980 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3981 disk_usage, offline_nodes, bad_nodes,
3982 live_data, wrongnode_inst, consinfo)
3985 class LUQuery(NoHooksLU):
3986 """Query for resources/items of a certain kind.
3989 # pylint: disable-msg=W0142
3992 def CheckArguments(self):
3993 qcls = _GetQueryImplementation(self.op.what)
3995 self.impl = qcls(self.op.filter, self.op.fields, False)
3997 def ExpandNames(self):
3998 self.impl.ExpandNames(self)
4000 def DeclareLocks(self, level):
4001 self.impl.DeclareLocks(self, level)
4003 def Exec(self, feedback_fn):
4004 return self.impl.NewStyleQuery(self)
4007 class LUQueryFields(NoHooksLU):
4008 """Query for resources/items of a certain kind.
4011 # pylint: disable-msg=W0142
4014 def CheckArguments(self):
4015 self.qcls = _GetQueryImplementation(self.op.what)
4017 def ExpandNames(self):
4018 self.needed_locks = {}
4020 def Exec(self, feedback_fn):
4021 return self.qcls.FieldsQuery(self.op.fields)
4024 class LUNodeModifyStorage(NoHooksLU):
4025 """Logical unit for modifying a storage volume on a node.
4030 def CheckArguments(self):
4031 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4033 storage_type = self.op.storage_type
4036 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4038 raise errors.OpPrereqError("Storage units of type '%s' can not be"
4039 " modified" % storage_type,
4042 diff = set(self.op.changes.keys()) - modifiable
4044 raise errors.OpPrereqError("The following fields can not be modified for"
4045 " storage units of type '%s': %r" %
4046 (storage_type, list(diff)),
4049 def ExpandNames(self):
4050 self.needed_locks = {
4051 locking.LEVEL_NODE: self.op.node_name,
4054 def Exec(self, feedback_fn):
4055 """Computes the list of nodes and their attributes.
4058 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4059 result = self.rpc.call_storage_modify(self.op.node_name,
4060 self.op.storage_type, st_args,
4061 self.op.name, self.op.changes)
4062 result.Raise("Failed to modify storage unit '%s' on %s" %
4063 (self.op.name, self.op.node_name))
4066 class LUNodeAdd(LogicalUnit):
4067 """Logical unit for adding node to the cluster.
4071 HTYPE = constants.HTYPE_NODE
4072 _NFLAGS = ["master_capable", "vm_capable"]
4074 def CheckArguments(self):
4075 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4076 # validate/normalize the node name
4077 self.hostname = netutils.GetHostname(name=self.op.node_name,
4078 family=self.primary_ip_family)
4079 self.op.node_name = self.hostname.name
4080 if self.op.readd and self.op.group:
4081 raise errors.OpPrereqError("Cannot pass a node group when a node is"
4082 " being readded", errors.ECODE_INVAL)
4084 def BuildHooksEnv(self):
4087 This will run on all nodes before, and on all nodes + the new node after.
4091 "OP_TARGET": self.op.node_name,
4092 "NODE_NAME": self.op.node_name,
4093 "NODE_PIP": self.op.primary_ip,
4094 "NODE_SIP": self.op.secondary_ip,
4095 "MASTER_CAPABLE": str(self.op.master_capable),
4096 "VM_CAPABLE": str(self.op.vm_capable),
4098 nodes_0 = self.cfg.GetNodeList()
4099 nodes_1 = nodes_0 + [self.op.node_name, ]
4100 return env, nodes_0, nodes_1
4102 def CheckPrereq(self):
4103 """Check prerequisites.
4106 - the new node is not already in the config
4108 - its parameters (single/dual homed) matches the cluster
4110 Any errors are signaled by raising errors.OpPrereqError.
4114 hostname = self.hostname
4115 node = hostname.name
4116 primary_ip = self.op.primary_ip = hostname.ip
4117 if self.op.secondary_ip is None:
4118 if self.primary_ip_family == netutils.IP6Address.family:
4119 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4120 " IPv4 address must be given as secondary",
4122 self.op.secondary_ip = primary_ip
4124 secondary_ip = self.op.secondary_ip
4125 if not netutils.IP4Address.IsValid(secondary_ip):
4126 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4127 " address" % secondary_ip, errors.ECODE_INVAL)
4129 node_list = cfg.GetNodeList()
4130 if not self.op.readd and node in node_list:
4131 raise errors.OpPrereqError("Node %s is already in the configuration" %
4132 node, errors.ECODE_EXISTS)
4133 elif self.op.readd and node not in node_list:
4134 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4137 self.changed_primary_ip = False
4139 for existing_node_name in node_list:
4140 existing_node = cfg.GetNodeInfo(existing_node_name)
4142 if self.op.readd and node == existing_node_name:
4143 if existing_node.secondary_ip != secondary_ip:
4144 raise errors.OpPrereqError("Readded node doesn't have the same IP"
4145 " address configuration as before",
4147 if existing_node.primary_ip != primary_ip:
4148 self.changed_primary_ip = True
4152 if (existing_node.primary_ip == primary_ip or
4153 existing_node.secondary_ip == primary_ip or
4154 existing_node.primary_ip == secondary_ip or
4155 existing_node.secondary_ip == secondary_ip):
4156 raise errors.OpPrereqError("New node ip address(es) conflict with"
4157 " existing node %s" % existing_node.name,
4158 errors.ECODE_NOTUNIQUE)
4160 # After this 'if' block, None is no longer a valid value for the
4161 # _capable op attributes
4163 old_node = self.cfg.GetNodeInfo(node)
4164 assert old_node is not None, "Can't retrieve locked node %s" % node
4165 for attr in self._NFLAGS:
4166 if getattr(self.op, attr) is None:
4167 setattr(self.op, attr, getattr(old_node, attr))
4169 for attr in self._NFLAGS:
4170 if getattr(self.op, attr) is None:
4171 setattr(self.op, attr, True)
4173 if self.op.readd and not self.op.vm_capable:
4174 pri, sec = cfg.GetNodeInstances(node)
4176 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4177 " flag set to false, but it already holds"
4178 " instances" % node,
4181 # check that the type of the node (single versus dual homed) is the
4182 # same as for the master
4183 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4184 master_singlehomed = myself.secondary_ip == myself.primary_ip
4185 newbie_singlehomed = secondary_ip == primary_ip
4186 if master_singlehomed != newbie_singlehomed:
4187 if master_singlehomed:
4188 raise errors.OpPrereqError("The master has no secondary ip but the"
4189 " new node has one",
4192 raise errors.OpPrereqError("The master has a secondary ip but the"
4193 " new node doesn't have one",
4196 # checks reachability
4197 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4198 raise errors.OpPrereqError("Node not reachable by ping",
4199 errors.ECODE_ENVIRON)
4201 if not newbie_singlehomed:
4202 # check reachability from my secondary ip to newbie's secondary ip
4203 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4204 source=myself.secondary_ip):
4205 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4206 " based ping to node daemon port",
4207 errors.ECODE_ENVIRON)
4214 if self.op.master_capable:
4215 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4217 self.master_candidate = False
4220 self.new_node = old_node
4222 node_group = cfg.LookupNodeGroup(self.op.group)
4223 self.new_node = objects.Node(name=node,
4224 primary_ip=primary_ip,
4225 secondary_ip=secondary_ip,
4226 master_candidate=self.master_candidate,
4227 offline=False, drained=False,
4230 if self.op.ndparams:
4231 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4233 def Exec(self, feedback_fn):
4234 """Adds the new node to the cluster.
4237 new_node = self.new_node
4238 node = new_node.name
4240 # We adding a new node so we assume it's powered
4241 new_node.powered = True
4243 # for re-adds, reset the offline/drained/master-candidate flags;
4244 # we need to reset here, otherwise offline would prevent RPC calls
4245 # later in the procedure; this also means that if the re-add
4246 # fails, we are left with a non-offlined, broken node
4248 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4249 self.LogInfo("Readding a node, the offline/drained flags were reset")
4250 # if we demote the node, we do cleanup later in the procedure
4251 new_node.master_candidate = self.master_candidate
4252 if self.changed_primary_ip:
4253 new_node.primary_ip = self.op.primary_ip
4255 # copy the master/vm_capable flags
4256 for attr in self._NFLAGS:
4257 setattr(new_node, attr, getattr(self.op, attr))
4259 # notify the user about any possible mc promotion
4260 if new_node.master_candidate:
4261 self.LogInfo("Node will be a master candidate")
4263 if self.op.ndparams:
4264 new_node.ndparams = self.op.ndparams
4266 new_node.ndparams = {}
4268 # check connectivity
4269 result = self.rpc.call_version([node])[node]
4270 result.Raise("Can't get version information from node %s" % node)
4271 if constants.PROTOCOL_VERSION == result.payload:
4272 logging.info("Communication to node %s fine, sw version %s match",
4273 node, result.payload)
4275 raise errors.OpExecError("Version mismatch master version %s,"
4276 " node version %s" %
4277 (constants.PROTOCOL_VERSION, result.payload))
4279 # Add node to our /etc/hosts, and add key to known_hosts
4280 if self.cfg.GetClusterInfo().modify_etc_hosts:
4281 master_node = self.cfg.GetMasterNode()
4282 result = self.rpc.call_etc_hosts_modify(master_node,
4283 constants.ETC_HOSTS_ADD,
4286 result.Raise("Can't update hosts file with new host data")
4288 if new_node.secondary_ip != new_node.primary_ip:
4289 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4292 node_verify_list = [self.cfg.GetMasterNode()]
4293 node_verify_param = {
4294 constants.NV_NODELIST: [node],
4295 # TODO: do a node-net-test as well?
4298 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4299 self.cfg.GetClusterName())
4300 for verifier in node_verify_list:
4301 result[verifier].Raise("Cannot communicate with node %s" % verifier)
4302 nl_payload = result[verifier].payload[constants.NV_NODELIST]
4304 for failed in nl_payload:
4305 feedback_fn("ssh/hostname verification failed"
4306 " (checking from %s): %s" %
4307 (verifier, nl_payload[failed]))
4308 raise errors.OpExecError("ssh/hostname verification failed.")
4311 _RedistributeAncillaryFiles(self)
4312 self.context.ReaddNode(new_node)
4313 # make sure we redistribute the config
4314 self.cfg.Update(new_node, feedback_fn)
4315 # and make sure the new node will not have old files around
4316 if not new_node.master_candidate:
4317 result = self.rpc.call_node_demote_from_mc(new_node.name)
4318 msg = result.fail_msg
4320 self.LogWarning("Node failed to demote itself from master"
4321 " candidate status: %s" % msg)
4323 _RedistributeAncillaryFiles(self, additional_nodes=[node],
4324 additional_vm=self.op.vm_capable)
4325 self.context.AddNode(new_node, self.proc.GetECId())
4328 class LUNodeSetParams(LogicalUnit):
4329 """Modifies the parameters of a node.
4331 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4332 to the node role (as _ROLE_*)
4333 @cvar _R2F: a dictionary from node role to tuples of flags
4334 @cvar _FLAGS: a list of attribute names corresponding to the flags
4337 HPATH = "node-modify"
4338 HTYPE = constants.HTYPE_NODE
4340 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4342 (True, False, False): _ROLE_CANDIDATE,
4343 (False, True, False): _ROLE_DRAINED,
4344 (False, False, True): _ROLE_OFFLINE,
4345 (False, False, False): _ROLE_REGULAR,
4347 _R2F = dict((v, k) for k, v in _F2R.items())
4348 _FLAGS = ["master_candidate", "drained", "offline"]
4350 def CheckArguments(self):
4351 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4352 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4353 self.op.master_capable, self.op.vm_capable,
4354 self.op.secondary_ip, self.op.ndparams]
4355 if all_mods.count(None) == len(all_mods):
4356 raise errors.OpPrereqError("Please pass at least one modification",
4358 if all_mods.count(True) > 1:
4359 raise errors.OpPrereqError("Can't set the node into more than one"
4360 " state at the same time",
4363 # Boolean value that tells us whether we might be demoting from MC
4364 self.might_demote = (self.op.master_candidate == False or
4365 self.op.offline == True or
4366 self.op.drained == True or
4367 self.op.master_capable == False)
4369 if self.op.secondary_ip:
4370 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4371 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4372 " address" % self.op.secondary_ip,
4375 self.lock_all = self.op.auto_promote and self.might_demote
4376 self.lock_instances = self.op.secondary_ip is not None
4378 def ExpandNames(self):
4380 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4382 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4384 if self.lock_instances:
4385 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4387 def DeclareLocks(self, level):
4388 # If we have locked all instances, before waiting to lock nodes, release
4389 # all the ones living on nodes unrelated to the current operation.
4390 if level == locking.LEVEL_NODE and self.lock_instances:
4391 instances_release = []
4393 self.affected_instances = []
4394 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4395 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4396 instance = self.context.cfg.GetInstanceInfo(instance_name)
4397 i_mirrored = instance.disk_template in constants.DTS_INT_MIRROR
4398 if i_mirrored and self.op.node_name in instance.all_nodes:
4399 instances_keep.append(instance_name)
4400 self.affected_instances.append(instance)
4402 instances_release.append(instance_name)
4403 if instances_release:
4404 self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4405 self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4407 def BuildHooksEnv(self):
4410 This runs on the master node.
4414 "OP_TARGET": self.op.node_name,
4415 "MASTER_CANDIDATE": str(self.op.master_candidate),
4416 "OFFLINE": str(self.op.offline),
4417 "DRAINED": str(self.op.drained),
4418 "MASTER_CAPABLE": str(self.op.master_capable),
4419 "VM_CAPABLE": str(self.op.vm_capable),
4421 nl = [self.cfg.GetMasterNode(),
4425 def CheckPrereq(self):
4426 """Check prerequisites.
4428 This only checks the instance list against the existing names.
4431 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4433 if (self.op.master_candidate is not None or
4434 self.op.drained is not None or
4435 self.op.offline is not None):
4436 # we can't change the master's node flags
4437 if self.op.node_name == self.cfg.GetMasterNode():
4438 raise errors.OpPrereqError("The master role can be changed"
4439 " only via master-failover",
4442 if self.op.master_candidate and not node.master_capable:
4443 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4444 " it a master candidate" % node.name,
4447 if self.op.vm_capable == False:
4448 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4450 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4451 " the vm_capable flag" % node.name,
4454 if node.master_candidate and self.might_demote and not self.lock_all:
4455 assert not self.op.auto_promote, "auto_promote set but lock_all not"
4456 # check if after removing the current node, we're missing master
4458 (mc_remaining, mc_should, _) = \
4459 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4460 if mc_remaining < mc_should:
4461 raise errors.OpPrereqError("Not enough master candidates, please"
4462 " pass auto promote option to allow"
4463 " promotion", errors.ECODE_STATE)
4465 self.old_flags = old_flags = (node.master_candidate,
4466 node.drained, node.offline)
4467 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4468 self.old_role = old_role = self._F2R[old_flags]
4470 # Check for ineffective changes
4471 for attr in self._FLAGS:
4472 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4473 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4474 setattr(self.op, attr, None)
4476 # Past this point, any flag change to False means a transition
4477 # away from the respective state, as only real changes are kept
4479 # TODO: We might query the real power state if it supports OOB
4480 if _SupportsOob(self.cfg, node):
4481 if self.op.offline is False and not (node.powered or
4482 self.op.powered == True):
4483 raise errors.OpPrereqError(("Please power on node %s first before you"
4484 " can reset offline state") %
4486 elif self.op.powered is not None:
4487 raise errors.OpPrereqError(("Unable to change powered state for node %s"
4488 " which does not support out-of-band"
4489 " handling") % self.op.node_name)
4491 # If we're being deofflined/drained, we'll MC ourself if needed
4492 if (self.op.drained == False or self.op.offline == False or
4493 (self.op.master_capable and not node.master_capable)):
4494 if _DecideSelfPromotion(self):
4495 self.op.master_candidate = True
4496 self.LogInfo("Auto-promoting node to master candidate")
4498 # If we're no longer master capable, we'll demote ourselves from MC
4499 if self.op.master_capable == False and node.master_candidate:
4500 self.LogInfo("Demoting from master candidate")
4501 self.op.master_candidate = False
4504 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4505 if self.op.master_candidate:
4506 new_role = self._ROLE_CANDIDATE
4507 elif self.op.drained:
4508 new_role = self._ROLE_DRAINED
4509 elif self.op.offline:
4510 new_role = self._ROLE_OFFLINE
4511 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4512 # False is still in new flags, which means we're un-setting (the
4514 new_role = self._ROLE_REGULAR
4515 else: # no new flags, nothing, keep old role
4518 self.new_role = new_role
4520 if old_role == self._ROLE_OFFLINE and new_role != old_role:
4521 # Trying to transition out of offline status
4522 result = self.rpc.call_version([node.name])[node.name]
4524 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4525 " to report its version: %s" %
4526 (node.name, result.fail_msg),
4529 self.LogWarning("Transitioning node from offline to online state"
4530 " without using re-add. Please make sure the node"
4533 if self.op.secondary_ip:
4534 # Ok even without locking, because this can't be changed by any LU
4535 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4536 master_singlehomed = master.secondary_ip == master.primary_ip
4537 if master_singlehomed and self.op.secondary_ip:
4538 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4539 " homed cluster", errors.ECODE_INVAL)
4542 if self.affected_instances:
4543 raise errors.OpPrereqError("Cannot change secondary ip: offline"
4544 " node has instances (%s) configured"
4545 " to use it" % self.affected_instances)
4547 # On online nodes, check that no instances are running, and that
4548 # the node has the new ip and we can reach it.
4549 for instance in self.affected_instances:
4550 _CheckInstanceDown(self, instance, "cannot change secondary ip")
4552 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4553 if master.name != node.name:
4554 # check reachability from master secondary ip to new secondary ip
4555 if not netutils.TcpPing(self.op.secondary_ip,
4556 constants.DEFAULT_NODED_PORT,
4557 source=master.secondary_ip):
4558 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4559 " based ping to node daemon port",
4560 errors.ECODE_ENVIRON)
4562 if self.op.ndparams:
4563 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4564 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4565 self.new_ndparams = new_ndparams
4567 def Exec(self, feedback_fn):
4572 old_role = self.old_role
4573 new_role = self.new_role
4577 if self.op.ndparams:
4578 node.ndparams = self.new_ndparams
4580 if self.op.powered is not None:
4581 node.powered = self.op.powered
4583 for attr in ["master_capable", "vm_capable"]:
4584 val = getattr(self.op, attr)
4586 setattr(node, attr, val)
4587 result.append((attr, str(val)))
4589 if new_role != old_role:
4590 # Tell the node to demote itself, if no longer MC and not offline
4591 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4592 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4594 self.LogWarning("Node failed to demote itself: %s", msg)
4596 new_flags = self._R2F[new_role]
4597 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4599 result.append((desc, str(nf)))
4600 (node.master_candidate, node.drained, node.offline) = new_flags
4602 # we locked all nodes, we adjust the CP before updating this node
4604 _AdjustCandidatePool(self, [node.name])
4606 if self.op.secondary_ip:
4607 node.secondary_ip = self.op.secondary_ip
4608 result.append(("secondary_ip", self.op.secondary_ip))
4610 # this will trigger configuration file update, if needed
4611 self.cfg.Update(node, feedback_fn)
4613 # this will trigger job queue propagation or cleanup if the mc
4615 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4616 self.context.ReaddNode(node)
4621 class LUNodePowercycle(NoHooksLU):
4622 """Powercycles a node.
4627 def CheckArguments(self):
4628 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4629 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4630 raise errors.OpPrereqError("The node is the master and the force"
4631 " parameter was not set",
4634 def ExpandNames(self):
4635 """Locking for PowercycleNode.
4637 This is a last-resort option and shouldn't block on other
4638 jobs. Therefore, we grab no locks.
4641 self.needed_locks = {}
4643 def Exec(self, feedback_fn):
4647 result = self.rpc.call_node_powercycle(self.op.node_name,
4648 self.cfg.GetHypervisorType())
4649 result.Raise("Failed to schedule the reboot")
4650 return result.payload
4653 class LUClusterQuery(NoHooksLU):
4654 """Query cluster configuration.
4659 def ExpandNames(self):
4660 self.needed_locks = {}
4662 def Exec(self, feedback_fn):
4663 """Return cluster config.
4666 cluster = self.cfg.GetClusterInfo()
4669 # Filter just for enabled hypervisors
4670 for os_name, hv_dict in cluster.os_hvp.items():
4671 os_hvp[os_name] = {}
4672 for hv_name, hv_params in hv_dict.items():
4673 if hv_name in cluster.enabled_hypervisors:
4674 os_hvp[os_name][hv_name] = hv_params
4676 # Convert ip_family to ip_version
4677 primary_ip_version = constants.IP4_VERSION
4678 if cluster.primary_ip_family == netutils.IP6Address.family:
4679 primary_ip_version = constants.IP6_VERSION
4682 "software_version": constants.RELEASE_VERSION,
4683 "protocol_version": constants.PROTOCOL_VERSION,
4684 "config_version": constants.CONFIG_VERSION,
4685 "os_api_version": max(constants.OS_API_VERSIONS),
4686 "export_version": constants.EXPORT_VERSION,
4687 "architecture": (platform.architecture()[0], platform.machine()),
4688 "name": cluster.cluster_name,
4689 "master": cluster.master_node,
4690 "default_hypervisor": cluster.enabled_hypervisors[0],
4691 "enabled_hypervisors": cluster.enabled_hypervisors,
4692 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4693 for hypervisor_name in cluster.enabled_hypervisors]),
4695 "beparams": cluster.beparams,
4696 "osparams": cluster.osparams,
4697 "nicparams": cluster.nicparams,
4698 "ndparams": cluster.ndparams,
4699 "candidate_pool_size": cluster.candidate_pool_size,
4700 "master_netdev": cluster.master_netdev,
4701 "volume_group_name": cluster.volume_group_name,
4702 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4703 "file_storage_dir": cluster.file_storage_dir,
4704 "shared_file_storage_dir": cluster.shared_file_storage_dir,
4705 "maintain_node_health": cluster.maintain_node_health,
4706 "ctime": cluster.ctime,
4707 "mtime": cluster.mtime,
4708 "uuid": cluster.uuid,
4709 "tags": list(cluster.GetTags()),
4710 "uid_pool": cluster.uid_pool,
4711 "default_iallocator": cluster.default_iallocator,
4712 "reserved_lvs": cluster.reserved_lvs,
4713 "primary_ip_version": primary_ip_version,
4714 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4715 "hidden_os": cluster.hidden_os,
4716 "blacklisted_os": cluster.blacklisted_os,
4722 class LUClusterConfigQuery(NoHooksLU):
4723 """Return configuration values.
4727 _FIELDS_DYNAMIC = utils.FieldSet()
4728 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4729 "watcher_pause", "volume_group_name")
4731 def CheckArguments(self):
4732 _CheckOutputFields(static=self._FIELDS_STATIC,
4733 dynamic=self._FIELDS_DYNAMIC,
4734 selected=self.op.output_fields)
4736 def ExpandNames(self):
4737 self.needed_locks = {}
4739 def Exec(self, feedback_fn):
4740 """Dump a representation of the cluster config to the standard output.
4744 for field in self.op.output_fields:
4745 if field == "cluster_name":
4746 entry = self.cfg.GetClusterName()
4747 elif field == "master_node":
4748 entry = self.cfg.GetMasterNode()
4749 elif field == "drain_flag":
4750 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4751 elif field == "watcher_pause":
4752 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4753 elif field == "volume_group_name":
4754 entry = self.cfg.GetVGName()
4756 raise errors.ParameterError(field)
4757 values.append(entry)
4761 class LUInstanceActivateDisks(NoHooksLU):
4762 """Bring up an instance's disks.
4767 def ExpandNames(self):
4768 self._ExpandAndLockInstance()
4769 self.needed_locks[locking.LEVEL_NODE] = []
4770 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4772 def DeclareLocks(self, level):
4773 if level == locking.LEVEL_NODE:
4774 self._LockInstancesNodes()
4776 def CheckPrereq(self):
4777 """Check prerequisites.
4779 This checks that the instance is in the cluster.
4782 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4783 assert self.instance is not None, \
4784 "Cannot retrieve locked instance %s" % self.op.instance_name
4785 _CheckNodeOnline(self, self.instance.primary_node)
4787 def Exec(self, feedback_fn):
4788 """Activate the disks.
4791 disks_ok, disks_info = \
4792 _AssembleInstanceDisks(self, self.instance,
4793 ignore_size=self.op.ignore_size)
4795 raise errors.OpExecError("Cannot activate block devices")
4800 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4802 """Prepare the block devices for an instance.
4804 This sets up the block devices on all nodes.
4806 @type lu: L{LogicalUnit}
4807 @param lu: the logical unit on whose behalf we execute
4808 @type instance: L{objects.Instance}
4809 @param instance: the instance for whose disks we assemble
4810 @type disks: list of L{objects.Disk} or None
4811 @param disks: which disks to assemble (or all, if None)
4812 @type ignore_secondaries: boolean
4813 @param ignore_secondaries: if true, errors on secondary nodes
4814 won't result in an error return from the function
4815 @type ignore_size: boolean
4816 @param ignore_size: if true, the current known size of the disk
4817 will not be used during the disk activation, useful for cases
4818 when the size is wrong
4819 @return: False if the operation failed, otherwise a list of
4820 (host, instance_visible_name, node_visible_name)
4821 with the mapping from node devices to instance devices
4826 iname = instance.name
4827 disks = _ExpandCheckDisks(instance, disks)
4829 # With the two passes mechanism we try to reduce the window of
4830 # opportunity for the race condition of switching DRBD to primary
4831 # before handshaking occured, but we do not eliminate it
4833 # The proper fix would be to wait (with some limits) until the
4834 # connection has been made and drbd transitions from WFConnection
4835 # into any other network-connected state (Connected, SyncTarget,
4838 # 1st pass, assemble on all nodes in secondary mode
4839 for idx, inst_disk in enumerate(disks):
4840 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4842 node_disk = node_disk.Copy()
4843 node_disk.UnsetSize()
4844 lu.cfg.SetDiskID(node_disk, node)
4845 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4846 msg = result.fail_msg
4848 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4849 " (is_primary=False, pass=1): %s",
4850 inst_disk.iv_name, node, msg)
4851 if not ignore_secondaries:
4854 # FIXME: race condition on drbd migration to primary
4856 # 2nd pass, do only the primary node
4857 for idx, inst_disk in enumerate(disks):
4860 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4861 if node != instance.primary_node:
4864 node_disk = node_disk.Copy()
4865 node_disk.UnsetSize()
4866 lu.cfg.SetDiskID(node_disk, node)
4867 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4868 msg = result.fail_msg
4870 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4871 " (is_primary=True, pass=2): %s",
4872 inst_disk.iv_name, node, msg)
4875 dev_path = result.payload
4877 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4879 # leave the disks configured for the primary node
4880 # this is a workaround that would be fixed better by
4881 # improving the logical/physical id handling
4883 lu.cfg.SetDiskID(disk, instance.primary_node)
4885 return disks_ok, device_info
4888 def _StartInstanceDisks(lu, instance, force):
4889 """Start the disks of an instance.
4892 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4893 ignore_secondaries=force)
4895 _ShutdownInstanceDisks(lu, instance)
4896 if force is not None and not force:
4897 lu.proc.LogWarning("", hint="If the message above refers to a"
4899 " you can retry the operation using '--force'.")
4900 raise errors.OpExecError("Disk consistency error")
4903 class LUInstanceDeactivateDisks(NoHooksLU):
4904 """Shutdown an instance's disks.
4909 def ExpandNames(self):
4910 self._ExpandAndLockInstance()
4911 self.needed_locks[locking.LEVEL_NODE] = []
4912 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4914 def DeclareLocks(self, level):
4915 if level == locking.LEVEL_NODE:
4916 self._LockInstancesNodes()
4918 def CheckPrereq(self):
4919 """Check prerequisites.
4921 This checks that the instance is in the cluster.
4924 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4925 assert self.instance is not None, \
4926 "Cannot retrieve locked instance %s" % self.op.instance_name
4928 def Exec(self, feedback_fn):
4929 """Deactivate the disks
4932 instance = self.instance
4934 _ShutdownInstanceDisks(self, instance)
4936 _SafeShutdownInstanceDisks(self, instance)
4939 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4940 """Shutdown block devices of an instance.
4942 This function checks if an instance is running, before calling
4943 _ShutdownInstanceDisks.
4946 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4947 _ShutdownInstanceDisks(lu, instance, disks=disks)
4950 def _ExpandCheckDisks(instance, disks):
4951 """Return the instance disks selected by the disks list
4953 @type disks: list of L{objects.Disk} or None
4954 @param disks: selected disks
4955 @rtype: list of L{objects.Disk}
4956 @return: selected instance disks to act on
4960 return instance.disks
4962 if not set(disks).issubset(instance.disks):
4963 raise errors.ProgrammerError("Can only act on disks belonging to the"
4968 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4969 """Shutdown block devices of an instance.
4971 This does the shutdown on all nodes of the instance.
4973 If the ignore_primary is false, errors on the primary node are
4978 disks = _ExpandCheckDisks(instance, disks)
4981 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4982 lu.cfg.SetDiskID(top_disk, node)
4983 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4984 msg = result.fail_msg
4986 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4987 disk.iv_name, node, msg)
4988 if ((node == instance.primary_node and not ignore_primary) or
4989 (node != instance.primary_node and not result.offline)):
4994 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4995 """Checks if a node has enough free memory.
4997 This function check if a given node has the needed amount of free
4998 memory. In case the node has less memory or we cannot get the
4999 information from the node, this function raise an OpPrereqError
5002 @type lu: C{LogicalUnit}
5003 @param lu: a logical unit from which we get configuration data
5005 @param node: the node to check
5006 @type reason: C{str}
5007 @param reason: string to use in the error message
5008 @type requested: C{int}
5009 @param requested: the amount of memory in MiB to check for
5010 @type hypervisor_name: C{str}
5011 @param hypervisor_name: the hypervisor to ask for memory stats
5012 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5013 we cannot check the node
5016 nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5017 nodeinfo[node].Raise("Can't get data from node %s" % node,
5018 prereq=True, ecode=errors.ECODE_ENVIRON)
5019 free_mem = nodeinfo[node].payload.get('memory_free', None)
5020 if not isinstance(free_mem, int):
5021 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5022 " was '%s'" % (node, free_mem),
5023 errors.ECODE_ENVIRON)
5024 if requested > free_mem:
5025 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5026 " needed %s MiB, available %s MiB" %
5027 (node, reason, requested, free_mem),
5031 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5032 """Checks if nodes have enough free disk space in the all VGs.
5034 This function check if all given nodes have the needed amount of
5035 free disk. In case any node has less disk or we cannot get the
5036 information from the node, this function raise an OpPrereqError
5039 @type lu: C{LogicalUnit}
5040 @param lu: a logical unit from which we get configuration data
5041 @type nodenames: C{list}
5042 @param nodenames: the list of node names to check
5043 @type req_sizes: C{dict}
5044 @param req_sizes: the hash of vg and corresponding amount of disk in
5046 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5047 or we cannot check the node
5050 for vg, req_size in req_sizes.items():
5051 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5054 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5055 """Checks if nodes have enough free disk space in the specified VG.
5057 This function check if all given nodes have the needed amount of
5058 free disk. In case any node has less disk or we cannot get the
5059 information from the node, this function raise an OpPrereqError
5062 @type lu: C{LogicalUnit}
5063 @param lu: a logical unit from which we get configuration data
5064 @type nodenames: C{list}
5065 @param nodenames: the list of node names to check
5067 @param vg: the volume group to check
5068 @type requested: C{int}
5069 @param requested: the amount of disk in MiB to check for
5070 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5071 or we cannot check the node
5074 nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5075 for node in nodenames:
5076 info = nodeinfo[node]
5077 info.Raise("Cannot get current information from node %s" % node,
5078 prereq=True, ecode=errors.ECODE_ENVIRON)
5079 vg_free = info.payload.get("vg_free", None)
5080 if not isinstance(vg_free, int):
5081 raise errors.OpPrereqError("Can't compute free disk space on node"
5082 " %s for vg %s, result was '%s'" %
5083 (node, vg, vg_free), errors.ECODE_ENVIRON)
5084 if requested > vg_free:
5085 raise errors.OpPrereqError("Not enough disk space on target node %s"
5086 " vg %s: required %d MiB, available %d MiB" %
5087 (node, vg, requested, vg_free),
5091 class LUInstanceStartup(LogicalUnit):
5092 """Starts an instance.
5095 HPATH = "instance-start"
5096 HTYPE = constants.HTYPE_INSTANCE
5099 def CheckArguments(self):
5101 if self.op.beparams:
5102 # fill the beparams dict
5103 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5105 def ExpandNames(self):
5106 self._ExpandAndLockInstance()
5108 def BuildHooksEnv(self):
5111 This runs on master, primary and secondary nodes of the instance.
5115 "FORCE": self.op.force,
5117 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5118 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5121 def CheckPrereq(self):
5122 """Check prerequisites.
5124 This checks that the instance is in the cluster.
5127 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5128 assert self.instance is not None, \
5129 "Cannot retrieve locked instance %s" % self.op.instance_name
5132 if self.op.hvparams:
5133 # check hypervisor parameter syntax (locally)
5134 cluster = self.cfg.GetClusterInfo()
5135 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5136 filled_hvp = cluster.FillHV(instance)
5137 filled_hvp.update(self.op.hvparams)
5138 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5139 hv_type.CheckParameterSyntax(filled_hvp)
5140 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5142 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5144 if self.primary_offline and self.op.ignore_offline_nodes:
5145 self.proc.LogWarning("Ignoring offline primary node")
5147 if self.op.hvparams or self.op.beparams:
5148 self.proc.LogWarning("Overridden parameters are ignored")
5150 _CheckNodeOnline(self, instance.primary_node)
5152 bep = self.cfg.GetClusterInfo().FillBE(instance)
5154 # check bridges existence
5155 _CheckInstanceBridgesExist(self, instance)
5157 remote_info = self.rpc.call_instance_info(instance.primary_node,
5159 instance.hypervisor)
5160 remote_info.Raise("Error checking node %s" % instance.primary_node,
5161 prereq=True, ecode=errors.ECODE_ENVIRON)
5162 if not remote_info.payload: # not running already
5163 _CheckNodeFreeMemory(self, instance.primary_node,
5164 "starting instance %s" % instance.name,
5165 bep[constants.BE_MEMORY], instance.hypervisor)
5167 def Exec(self, feedback_fn):
5168 """Start the instance.
5171 instance = self.instance
5172 force = self.op.force
5174 self.cfg.MarkInstanceUp(instance.name)
5176 if self.primary_offline:
5177 assert self.op.ignore_offline_nodes
5178 self.proc.LogInfo("Primary node offline, marked instance as started")
5180 node_current = instance.primary_node
5182 _StartInstanceDisks(self, instance, force)
5184 result = self.rpc.call_instance_start(node_current, instance,
5185 self.op.hvparams, self.op.beparams)
5186 msg = result.fail_msg
5188 _ShutdownInstanceDisks(self, instance)
5189 raise errors.OpExecError("Could not start instance: %s" % msg)
5192 class LUInstanceReboot(LogicalUnit):
5193 """Reboot an instance.
5196 HPATH = "instance-reboot"
5197 HTYPE = constants.HTYPE_INSTANCE
5200 def ExpandNames(self):
5201 self._ExpandAndLockInstance()
5203 def BuildHooksEnv(self):
5206 This runs on master, primary and secondary nodes of the instance.
5210 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5211 "REBOOT_TYPE": self.op.reboot_type,
5212 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5214 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5215 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5218 def CheckPrereq(self):
5219 """Check prerequisites.
5221 This checks that the instance is in the cluster.
5224 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5225 assert self.instance is not None, \
5226 "Cannot retrieve locked instance %s" % self.op.instance_name
5228 _CheckNodeOnline(self, instance.primary_node)
5230 # check bridges existence
5231 _CheckInstanceBridgesExist(self, instance)
5233 def Exec(self, feedback_fn):
5234 """Reboot the instance.
5237 instance = self.instance
5238 ignore_secondaries = self.op.ignore_secondaries
5239 reboot_type = self.op.reboot_type
5241 remote_info = self.rpc.call_instance_info(instance.primary_node,
5243 instance.hypervisor)
5244 remote_info.Raise("Error checking node %s" % instance.primary_node)
5245 instance_running = bool(remote_info.payload)
5247 node_current = instance.primary_node
5249 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5250 constants.INSTANCE_REBOOT_HARD]:
5251 for disk in instance.disks:
5252 self.cfg.SetDiskID(disk, node_current)
5253 result = self.rpc.call_instance_reboot(node_current, instance,
5255 self.op.shutdown_timeout)
5256 result.Raise("Could not reboot instance")
5258 if instance_running:
5259 result = self.rpc.call_instance_shutdown(node_current, instance,
5260 self.op.shutdown_timeout)
5261 result.Raise("Could not shutdown instance for full reboot")
5262 _ShutdownInstanceDisks(self, instance)
5264 self.LogInfo("Instance %s was already stopped, starting now",
5266 _StartInstanceDisks(self, instance, ignore_secondaries)
5267 result = self.rpc.call_instance_start(node_current, instance, None, None)
5268 msg = result.fail_msg
5270 _ShutdownInstanceDisks(self, instance)
5271 raise errors.OpExecError("Could not start instance for"
5272 " full reboot: %s" % msg)
5274 self.cfg.MarkInstanceUp(instance.name)
5277 class LUInstanceShutdown(LogicalUnit):
5278 """Shutdown an instance.
5281 HPATH = "instance-stop"
5282 HTYPE = constants.HTYPE_INSTANCE
5285 def ExpandNames(self):
5286 self._ExpandAndLockInstance()
5288 def BuildHooksEnv(self):
5291 This runs on master, primary and secondary nodes of the instance.
5294 env = _BuildInstanceHookEnvByObject(self, self.instance)
5295 env["TIMEOUT"] = self.op.timeout
5296 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5299 def CheckPrereq(self):
5300 """Check prerequisites.
5302 This checks that the instance is in the cluster.
5305 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5306 assert self.instance is not None, \
5307 "Cannot retrieve locked instance %s" % self.op.instance_name
5309 self.primary_offline = \
5310 self.cfg.GetNodeInfo(self.instance.primary_node).offline
5312 if self.primary_offline and self.op.ignore_offline_nodes:
5313 self.proc.LogWarning("Ignoring offline primary node")
5315 _CheckNodeOnline(self, self.instance.primary_node)
5317 def Exec(self, feedback_fn):
5318 """Shutdown the instance.
5321 instance = self.instance
5322 node_current = instance.primary_node
5323 timeout = self.op.timeout
5325 self.cfg.MarkInstanceDown(instance.name)
5327 if self.primary_offline:
5328 assert self.op.ignore_offline_nodes
5329 self.proc.LogInfo("Primary node offline, marked instance as stopped")
5331 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5332 msg = result.fail_msg
5334 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5336 _ShutdownInstanceDisks(self, instance)
5339 class LUInstanceReinstall(LogicalUnit):
5340 """Reinstall an instance.
5343 HPATH = "instance-reinstall"
5344 HTYPE = constants.HTYPE_INSTANCE
5347 def ExpandNames(self):
5348 self._ExpandAndLockInstance()
5350 def BuildHooksEnv(self):
5353 This runs on master, primary and secondary nodes of the instance.
5356 env = _BuildInstanceHookEnvByObject(self, self.instance)
5357 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5360 def CheckPrereq(self):
5361 """Check prerequisites.
5363 This checks that the instance is in the cluster and is not running.
5366 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5367 assert instance is not None, \
5368 "Cannot retrieve locked instance %s" % self.op.instance_name
5369 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5370 " offline, cannot reinstall")
5371 for node in instance.secondary_nodes:
5372 _CheckNodeOnline(self, node, "Instance secondary node offline,"
5373 " cannot reinstall")
5375 if instance.disk_template == constants.DT_DISKLESS:
5376 raise errors.OpPrereqError("Instance '%s' has no disks" %
5377 self.op.instance_name,
5379 _CheckInstanceDown(self, instance, "cannot reinstall")
5381 if self.op.os_type is not None:
5383 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5384 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5385 instance_os = self.op.os_type
5387 instance_os = instance.os
5389 nodelist = list(instance.all_nodes)
5391 if self.op.osparams:
5392 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5393 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5394 self.os_inst = i_osdict # the new dict (without defaults)
5398 self.instance = instance
5400 def Exec(self, feedback_fn):
5401 """Reinstall the instance.
5404 inst = self.instance
5406 if self.op.os_type is not None:
5407 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5408 inst.os = self.op.os_type
5409 # Write to configuration
5410 self.cfg.Update(inst, feedback_fn)
5412 _StartInstanceDisks(self, inst, None)
5414 feedback_fn("Running the instance OS create scripts...")
5415 # FIXME: pass debug option from opcode to backend
5416 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5417 self.op.debug_level,
5418 osparams=self.os_inst)
5419 result.Raise("Could not install OS for instance %s on node %s" %
5420 (inst.name, inst.primary_node))
5422 _ShutdownInstanceDisks(self, inst)
5425 class LUInstanceRecreateDisks(LogicalUnit):
5426 """Recreate an instance's missing disks.
5429 HPATH = "instance-recreate-disks"
5430 HTYPE = constants.HTYPE_INSTANCE
5433 def ExpandNames(self):
5434 self._ExpandAndLockInstance()
5436 def BuildHooksEnv(self):
5439 This runs on master, primary and secondary nodes of the instance.
5442 env = _BuildInstanceHookEnvByObject(self, self.instance)
5443 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5446 def CheckPrereq(self):
5447 """Check prerequisites.
5449 This checks that the instance is in the cluster and is not running.
5452 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5453 assert instance is not None, \
5454 "Cannot retrieve locked instance %s" % self.op.instance_name
5455 _CheckNodeOnline(self, instance.primary_node)
5457 if instance.disk_template == constants.DT_DISKLESS:
5458 raise errors.OpPrereqError("Instance '%s' has no disks" %
5459 self.op.instance_name, errors.ECODE_INVAL)
5460 _CheckInstanceDown(self, instance, "cannot recreate disks")
5462 if not self.op.disks:
5463 self.op.disks = range(len(instance.disks))
5465 for idx in self.op.disks:
5466 if idx >= len(instance.disks):
5467 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5470 self.instance = instance
5472 def Exec(self, feedback_fn):
5473 """Recreate the disks.
5477 for idx, _ in enumerate(self.instance.disks):
5478 if idx not in self.op.disks: # disk idx has not been passed in
5482 _CreateDisks(self, self.instance, to_skip=to_skip)
5485 class LUInstanceRename(LogicalUnit):
5486 """Rename an instance.
5489 HPATH = "instance-rename"
5490 HTYPE = constants.HTYPE_INSTANCE
5492 def CheckArguments(self):
5496 if self.op.ip_check and not self.op.name_check:
5497 # TODO: make the ip check more flexible and not depend on the name check
5498 raise errors.OpPrereqError("Cannot do ip check without a name check",
5501 def BuildHooksEnv(self):
5504 This runs on master, primary and secondary nodes of the instance.
5507 env = _BuildInstanceHookEnvByObject(self, self.instance)
5508 env["INSTANCE_NEW_NAME"] = self.op.new_name
5509 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5512 def CheckPrereq(self):
5513 """Check prerequisites.
5515 This checks that the instance is in the cluster and is not running.
5518 self.op.instance_name = _ExpandInstanceName(self.cfg,
5519 self.op.instance_name)
5520 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5521 assert instance is not None
5522 _CheckNodeOnline(self, instance.primary_node)
5523 _CheckInstanceDown(self, instance, "cannot rename")
5524 self.instance = instance
5526 new_name = self.op.new_name
5527 if self.op.name_check:
5528 hostname = netutils.GetHostname(name=new_name)
5529 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5531 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
5532 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
5533 " same as given hostname '%s'") %
5534 (hostname.name, self.op.new_name),
5536 new_name = self.op.new_name = hostname.name
5537 if (self.op.ip_check and
5538 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5539 raise errors.OpPrereqError("IP %s of instance %s already in use" %
5540 (hostname.ip, new_name),
5541 errors.ECODE_NOTUNIQUE)
5543 instance_list = self.cfg.GetInstanceList()
5544 if new_name in instance_list and new_name != instance.name:
5545 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5546 new_name, errors.ECODE_EXISTS)
5548 def Exec(self, feedback_fn):
5549 """Rename the instance.
5552 inst = self.instance
5553 old_name = inst.name
5555 rename_file_storage = False
5556 if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
5557 self.op.new_name != inst.name):
5558 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5559 rename_file_storage = True
5561 self.cfg.RenameInstance(inst.name, self.op.new_name)
5562 # Change the instance lock. This is definitely safe while we hold the BGL
5563 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5564 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5566 # re-read the instance from the configuration after rename
5567 inst = self.cfg.GetInstanceInfo(self.op.new_name)
5569 if rename_file_storage:
5570 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5571 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5572 old_file_storage_dir,
5573 new_file_storage_dir)
5574 result.Raise("Could not rename on node %s directory '%s' to '%s'"
5575 " (but the instance has been renamed in Ganeti)" %
5576 (inst.primary_node, old_file_storage_dir,
5577 new_file_storage_dir))
5579 _StartInstanceDisks(self, inst, None)
5581 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5582 old_name, self.op.debug_level)
5583 msg = result.fail_msg
5585 msg = ("Could not run OS rename script for instance %s on node %s"
5586 " (but the instance has been renamed in Ganeti): %s" %
5587 (inst.name, inst.primary_node, msg))
5588 self.proc.LogWarning(msg)
5590 _ShutdownInstanceDisks(self, inst)
5595 class LUInstanceRemove(LogicalUnit):
5596 """Remove an instance.
5599 HPATH = "instance-remove"
5600 HTYPE = constants.HTYPE_INSTANCE
5603 def ExpandNames(self):
5604 self._ExpandAndLockInstance()
5605 self.needed_locks[locking.LEVEL_NODE] = []
5606 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5608 def DeclareLocks(self, level):
5609 if level == locking.LEVEL_NODE:
5610 self._LockInstancesNodes()
5612 def BuildHooksEnv(self):
5615 This runs on master, primary and secondary nodes of the instance.
5618 env = _BuildInstanceHookEnvByObject(self, self.instance)
5619 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5620 nl = [self.cfg.GetMasterNode()]
5621 nl_post = list(self.instance.all_nodes) + nl
5622 return env, nl, nl_post
5624 def CheckPrereq(self):
5625 """Check prerequisites.
5627 This checks that the instance is in the cluster.
5630 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5631 assert self.instance is not None, \
5632 "Cannot retrieve locked instance %s" % self.op.instance_name
5634 def Exec(self, feedback_fn):
5635 """Remove the instance.
5638 instance = self.instance
5639 logging.info("Shutting down instance %s on node %s",
5640 instance.name, instance.primary_node)
5642 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5643 self.op.shutdown_timeout)
5644 msg = result.fail_msg
5646 if self.op.ignore_failures:
5647 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5649 raise errors.OpExecError("Could not shutdown instance %s on"
5651 (instance.name, instance.primary_node, msg))
5653 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5656 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5657 """Utility function to remove an instance.
5660 logging.info("Removing block devices for instance %s", instance.name)
5662 if not _RemoveDisks(lu, instance):
5663 if not ignore_failures:
5664 raise errors.OpExecError("Can't remove instance's disks")
5665 feedback_fn("Warning: can't remove instance's disks")
5667 logging.info("Removing instance %s out of cluster config", instance.name)
5669 lu.cfg.RemoveInstance(instance.name)
5671 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5672 "Instance lock removal conflict"
5674 # Remove lock for the instance
5675 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5678 class LUInstanceQuery(NoHooksLU):
5679 """Logical unit for querying instances.
5682 # pylint: disable-msg=W0142
5685 def CheckArguments(self):
5686 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
5687 self.op.output_fields, self.op.use_locking)
5689 def ExpandNames(self):
5690 self.iq.ExpandNames(self)
5692 def DeclareLocks(self, level):
5693 self.iq.DeclareLocks(self, level)
5695 def Exec(self, feedback_fn):
5696 return self.iq.OldStyleQuery(self)
5699 class LUInstanceFailover(LogicalUnit):
5700 """Failover an instance.
5703 HPATH = "instance-failover"
5704 HTYPE = constants.HTYPE_INSTANCE
5707 def CheckArguments(self):
5708 """Check the arguments.
5711 self.iallocator = getattr(self.op, "iallocator", None)
5712 self.target_node = getattr(self.op, "target_node", None)
5713 _CheckIAllocatorOrNode(self, "iallocator", "target_node")
5715 def ExpandNames(self):
5716 self._ExpandAndLockInstance()
5718 if self.op.target_node is not None:
5719 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5721 self.needed_locks[locking.LEVEL_NODE] = []
5722 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5724 def DeclareLocks(self, level):
5725 if level == locking.LEVEL_NODE:
5726 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
5727 if instance.disk_template in constants.DTS_EXT_MIRROR:
5728 if self.op.target_node is None:
5729 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5731 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
5732 self.op.target_node]
5733 del self.recalculate_locks[locking.LEVEL_NODE]
5735 self._LockInstancesNodes()
5737 def BuildHooksEnv(self):
5740 This runs on master, primary and secondary nodes of the instance.
5743 instance = self.instance
5744 source_node = instance.primary_node
5746 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5747 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5748 "OLD_PRIMARY": source_node,
5749 "NEW_PRIMARY": self.op.target_node,
5752 if instance.disk_template in constants.DTS_INT_MIRROR:
5753 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
5754 env["NEW_SECONDARY"] = source_node
5756 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
5758 env.update(_BuildInstanceHookEnvByObject(self, instance))
5759 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5761 nl_post.append(source_node)
5762 return env, nl, nl_post
5764 def CheckPrereq(self):
5765 """Check prerequisites.
5767 This checks that the instance is in the cluster.
5770 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5771 assert self.instance is not None, \
5772 "Cannot retrieve locked instance %s" % self.op.instance_name
5774 bep = self.cfg.GetClusterInfo().FillBE(instance)
5775 if instance.disk_template not in constants.DTS_MIRRORED:
5776 raise errors.OpPrereqError("Instance's disk layout is not"
5777 " mirrored, cannot failover.",
5780 if instance.disk_template in constants.DTS_EXT_MIRROR:
5781 if self.op.iallocator:
5782 self._RunAllocator()
5783 # Release all unnecessary node locks
5784 nodes_keep = [instance.primary_node, self.op.target_node]
5785 nodes_rel = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5786 if node not in nodes_keep]
5787 self.context.glm.release(locking.LEVEL_NODE, nodes_rel)
5788 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5790 # self.op.target_node is already populated, either directly or by the
5792 target_node = self.op.target_node
5795 secondary_nodes = instance.secondary_nodes
5796 if not secondary_nodes:
5797 raise errors.ConfigurationError("No secondary node but using"
5798 " %s disk template" %
5799 instance.disk_template)
5800 target_node = secondary_nodes[0]
5802 _CheckNodeOnline(self, target_node)
5803 _CheckNodeNotDrained(self, target_node)
5805 # Save target_node so that we can use it in BuildHooksEnv
5806 self.op.target_node = target_node
5808 if instance.admin_up:
5809 # check memory requirements on the secondary node
5810 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5811 instance.name, bep[constants.BE_MEMORY],
5812 instance.hypervisor)
5814 self.LogInfo("Not checking memory on the secondary node as"
5815 " instance will not be started")
5817 # check bridge existance
5818 _CheckInstanceBridgesExist(self, instance, node=target_node)
5820 def Exec(self, feedback_fn):
5821 """Failover an instance.
5823 The failover is done by shutting it down on its present node and
5824 starting it on the secondary.
5827 instance = self.instance
5828 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5830 source_node = instance.primary_node
5831 target_node = self.op.target_node
5833 if instance.admin_up:
5834 feedback_fn("* checking disk consistency between source and target")
5835 for dev in instance.disks:
5836 # for drbd, these are drbd over lvm
5837 if not _CheckDiskConsistency(self, dev, target_node, False):
5838 if not self.op.ignore_consistency:
5839 raise errors.OpExecError("Disk %s is degraded on target node,"
5840 " aborting failover." % dev.iv_name)
5842 feedback_fn("* not checking disk consistency as instance is not running")
5844 feedback_fn("* shutting down instance on source node")
5845 logging.info("Shutting down instance %s on node %s",
5846 instance.name, source_node)
5848 result = self.rpc.call_instance_shutdown(source_node, instance,
5849 self.op.shutdown_timeout)
5850 msg = result.fail_msg
5852 if self.op.ignore_consistency or primary_node.offline:
5853 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5854 " Proceeding anyway. Please make sure node"
5855 " %s is down. Error details: %s",
5856 instance.name, source_node, source_node, msg)
5858 raise errors.OpExecError("Could not shutdown instance %s on"
5860 (instance.name, source_node, msg))
5862 feedback_fn("* deactivating the instance's disks on source node")
5863 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5864 raise errors.OpExecError("Can't shut down the instance's disks.")
5866 instance.primary_node = target_node
5867 # distribute new instance config to the other nodes
5868 self.cfg.Update(instance, feedback_fn)
5870 # Only start the instance if it's marked as up
5871 if instance.admin_up:
5872 feedback_fn("* activating the instance's disks on target node")
5873 logging.info("Starting instance %s on node %s",
5874 instance.name, target_node)
5876 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5877 ignore_secondaries=True)
5879 _ShutdownInstanceDisks(self, instance)
5880 raise errors.OpExecError("Can't activate the instance's disks")
5882 feedback_fn("* starting the instance on the target node")
5883 result = self.rpc.call_instance_start(target_node, instance, None, None)
5884 msg = result.fail_msg
5886 _ShutdownInstanceDisks(self, instance)
5887 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5888 (instance.name, target_node, msg))
5890 def _RunAllocator(self):
5891 """Run the allocator based on input opcode.
5894 ial = IAllocator(self.cfg, self.rpc,
5895 mode=constants.IALLOCATOR_MODE_RELOC,
5896 name=self.instance.name,
5897 # TODO See why hail breaks with a single node below
5898 relocate_from=[self.instance.primary_node,
5899 self.instance.primary_node],
5902 ial.Run(self.op.iallocator)
5905 raise errors.OpPrereqError("Can't compute nodes using"
5906 " iallocator '%s': %s" %
5907 (self.op.iallocator, ial.info),
5909 if len(ial.result) != ial.required_nodes:
5910 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5911 " of nodes (%s), required %s" %
5912 (self.op.iallocator, len(ial.result),
5913 ial.required_nodes), errors.ECODE_FAULT)
5914 self.op.target_node = ial.result[0]
5915 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5916 self.instance.name, self.op.iallocator,
5917 utils.CommaJoin(ial.result))
5920 class LUInstanceMigrate(LogicalUnit):
5921 """Migrate an instance.
5923 This is migration without shutting down, compared to the failover,
5924 which is done with shutdown.
5927 HPATH = "instance-migrate"
5928 HTYPE = constants.HTYPE_INSTANCE
5931 def CheckArguments(self):
5932 _CheckIAllocatorOrNode(self, "iallocator", "target_node")
5934 def ExpandNames(self):
5935 self._ExpandAndLockInstance()
5937 if self.op.target_node is not None:
5938 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5940 self.needed_locks[locking.LEVEL_NODE] = []
5941 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5943 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5944 self.op.cleanup, self.op.iallocator,
5945 self.op.target_node)
5946 self.tasklets = [self._migrater]
5948 def DeclareLocks(self, level):
5949 if level == locking.LEVEL_NODE:
5950 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
5951 if instance.disk_template in constants.DTS_EXT_MIRROR:
5952 if self.op.target_node is None:
5953 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5955 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
5956 self.op.target_node]
5957 del self.recalculate_locks[locking.LEVEL_NODE]
5959 self._LockInstancesNodes()
5961 def BuildHooksEnv(self):
5964 This runs on master, primary and secondary nodes of the instance.
5967 instance = self._migrater.instance
5968 source_node = instance.primary_node
5969 target_node = self._migrater.target_node
5970 env = _BuildInstanceHookEnvByObject(self, instance)
5971 env["MIGRATE_LIVE"] = self._migrater.live
5972 env["MIGRATE_CLEANUP"] = self.op.cleanup
5974 "OLD_PRIMARY": source_node,
5975 "NEW_PRIMARY": target_node,
5978 if instance.disk_template in constants.DTS_INT_MIRROR:
5979 env["OLD_SECONDARY"] = target_node
5980 env["NEW_SECONDARY"] = source_node
5982 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
5984 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5986 nl_post.append(source_node)
5987 return env, nl, nl_post
5990 class LUInstanceMove(LogicalUnit):
5991 """Move an instance by data-copying.
5994 HPATH = "instance-move"
5995 HTYPE = constants.HTYPE_INSTANCE
5998 def ExpandNames(self):
5999 self._ExpandAndLockInstance()
6000 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6001 self.op.target_node = target_node
6002 self.needed_locks[locking.LEVEL_NODE] = [target_node]
6003 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6005 def DeclareLocks(self, level):
6006 if level == locking.LEVEL_NODE:
6007 self._LockInstancesNodes(primary_only=True)
6009 def BuildHooksEnv(self):
6012 This runs on master, primary and secondary nodes of the instance.
6016 "TARGET_NODE": self.op.target_node,
6017 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6019 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6020 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
6021 self.op.target_node]
6024 def CheckPrereq(self):
6025 """Check prerequisites.
6027 This checks that the instance is in the cluster.
6030 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6031 assert self.instance is not None, \
6032 "Cannot retrieve locked instance %s" % self.op.instance_name
6034 node = self.cfg.GetNodeInfo(self.op.target_node)
6035 assert node is not None, \
6036 "Cannot retrieve locked node %s" % self.op.target_node
6038 self.target_node = target_node = node.name
6040 if target_node == instance.primary_node:
6041 raise errors.OpPrereqError("Instance %s is already on the node %s" %
6042 (instance.name, target_node),
6045 bep = self.cfg.GetClusterInfo().FillBE(instance)
6047 for idx, dsk in enumerate(instance.disks):
6048 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6049 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6050 " cannot copy" % idx, errors.ECODE_STATE)
6052 _CheckNodeOnline(self, target_node)
6053 _CheckNodeNotDrained(self, target_node)
6054 _CheckNodeVmCapable(self, target_node)
6056 if instance.admin_up:
6057 # check memory requirements on the secondary node
6058 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6059 instance.name, bep[constants.BE_MEMORY],
6060 instance.hypervisor)
6062 self.LogInfo("Not checking memory on the secondary node as"
6063 " instance will not be started")
6065 # check bridge existance
6066 _CheckInstanceBridgesExist(self, instance, node=target_node)
6068 def Exec(self, feedback_fn):
6069 """Move an instance.
6071 The move is done by shutting it down on its present node, copying
6072 the data over (slow) and starting it on the new node.
6075 instance = self.instance
6077 source_node = instance.primary_node
6078 target_node = self.target_node
6080 self.LogInfo("Shutting down instance %s on source node %s",
6081 instance.name, source_node)
6083 result = self.rpc.call_instance_shutdown(source_node, instance,
6084 self.op.shutdown_timeout)
6085 msg = result.fail_msg
6087 if self.op.ignore_consistency:
6088 self.proc.LogWarning("Could not shutdown instance %s on node %s."
6089 " Proceeding anyway. Please make sure node"
6090 " %s is down. Error details: %s",
6091 instance.name, source_node, source_node, msg)
6093 raise errors.OpExecError("Could not shutdown instance %s on"
6095 (instance.name, source_node, msg))
6097 # create the target disks
6099 _CreateDisks(self, instance, target_node=target_node)
6100 except errors.OpExecError:
6101 self.LogWarning("Device creation failed, reverting...")
6103 _RemoveDisks(self, instance, target_node=target_node)
6105 self.cfg.ReleaseDRBDMinors(instance.name)
6108 cluster_name = self.cfg.GetClusterInfo().cluster_name
6111 # activate, get path, copy the data over
6112 for idx, disk in enumerate(instance.disks):
6113 self.LogInfo("Copying data for disk %d", idx)
6114 result = self.rpc.call_blockdev_assemble(target_node, disk,
6115 instance.name, True, idx)
6117 self.LogWarning("Can't assemble newly created disk %d: %s",
6118 idx, result.fail_msg)
6119 errs.append(result.fail_msg)
6121 dev_path = result.payload
6122 result = self.rpc.call_blockdev_export(source_node, disk,
6123 target_node, dev_path,
6126 self.LogWarning("Can't copy data over for disk %d: %s",
6127 idx, result.fail_msg)
6128 errs.append(result.fail_msg)
6132 self.LogWarning("Some disks failed to copy, aborting")
6134 _RemoveDisks(self, instance, target_node=target_node)
6136 self.cfg.ReleaseDRBDMinors(instance.name)
6137 raise errors.OpExecError("Errors during disk copy: %s" %
6140 instance.primary_node = target_node
6141 self.cfg.Update(instance, feedback_fn)
6143 self.LogInfo("Removing the disks on the original node")
6144 _RemoveDisks(self, instance, target_node=source_node)
6146 # Only start the instance if it's marked as up
6147 if instance.admin_up:
6148 self.LogInfo("Starting instance %s on node %s",
6149 instance.name, target_node)
6151 disks_ok, _ = _AssembleInstanceDisks(self, instance,
6152 ignore_secondaries=True)
6154 _ShutdownInstanceDisks(self, instance)
6155 raise errors.OpExecError("Can't activate the instance's disks")
6157 result = self.rpc.call_instance_start(target_node, instance, None, None)
6158 msg = result.fail_msg
6160 _ShutdownInstanceDisks(self, instance)
6161 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6162 (instance.name, target_node, msg))
6165 class LUNodeMigrate(LogicalUnit):
6166 """Migrate all instances from a node.
6169 HPATH = "node-migrate"
6170 HTYPE = constants.HTYPE_NODE
6173 def CheckArguments(self):
6174 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
6176 def ExpandNames(self):
6177 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6179 self.needed_locks = {}
6181 # Create tasklets for migrating instances for all instances on this node
6185 self.lock_all_nodes = False
6187 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6188 logging.debug("Migrating instance %s", inst.name)
6189 names.append(inst.name)
6191 tasklets.append(TLMigrateInstance(self, inst.name, False,
6192 self.op.iallocator, None))
6194 if inst.disk_template in constants.DTS_EXT_MIRROR:
6195 # We need to lock all nodes, as the iallocator will choose the
6196 # destination nodes afterwards
6197 self.lock_all_nodes = True
6199 self.tasklets = tasklets
6201 # Declare node locks
6202 if self.lock_all_nodes:
6203 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6205 self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
6206 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6208 # Declare instance locks
6209 self.needed_locks[locking.LEVEL_INSTANCE] = names
6211 def DeclareLocks(self, level):
6212 if level == locking.LEVEL_NODE and not self.lock_all_nodes:
6213 self._LockInstancesNodes()
6215 def BuildHooksEnv(self):
6218 This runs on the master, the primary and all the secondaries.
6222 "NODE_NAME": self.op.node_name,
6225 nl = [self.cfg.GetMasterNode()]
6227 return (env, nl, nl)
6230 class TLMigrateInstance(Tasklet):
6231 """Tasklet class for instance migration.
6234 @ivar live: whether the migration will be done live or non-live;
6235 this variable is initalized only after CheckPrereq has run
6238 def __init__(self, lu, instance_name, cleanup,
6239 iallocator=None, target_node=None):
6240 """Initializes this class.
6243 Tasklet.__init__(self, lu)
6246 self.instance_name = instance_name
6247 self.cleanup = cleanup
6248 self.live = False # will be overridden later
6249 self.iallocator = iallocator
6250 self.target_node = target_node
6252 def CheckPrereq(self):
6253 """Check prerequisites.
6255 This checks that the instance is in the cluster.
6258 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6259 instance = self.cfg.GetInstanceInfo(instance_name)
6260 assert instance is not None
6261 self.instance = instance
6263 if instance.disk_template not in constants.DTS_MIRRORED:
6264 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
6265 " migrations" % instance.disk_template,
6268 if instance.disk_template in constants.DTS_EXT_MIRROR:
6269 if [self.iallocator, self.target_node].count(None) != 1:
6270 raise errors.OpPrereqError("Do not specify both, iallocator and"
6271 " target node", errors.ECODE_INVAL)
6274 self._RunAllocator()
6276 # self.target_node is already populated, either directly or by the
6278 target_node = self.target_node
6280 if len(self.lu.tasklets) == 1:
6281 # It is safe to remove locks only when we're the only tasklet in the LU
6282 nodes_keep = [instance.primary_node, self.target_node]
6283 nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE]
6284 if node not in nodes_keep]
6285 self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel)
6286 self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6289 secondary_nodes = instance.secondary_nodes
6290 if not secondary_nodes:
6291 raise errors.ConfigurationError("No secondary node but using"
6292 " %s disk template" %
6293 instance.disk_template)
6294 target_node = secondary_nodes[0]
6296 i_be = self.cfg.GetClusterInfo().FillBE(instance)
6298 # check memory requirements on the secondary node
6299 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6300 instance.name, i_be[constants.BE_MEMORY],
6301 instance.hypervisor)
6303 # check bridge existance
6304 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6306 if not self.cleanup:
6307 _CheckNodeNotDrained(self.lu, target_node)
6308 result = self.rpc.call_instance_migratable(instance.primary_node,
6310 result.Raise("Can't migrate, please use failover",
6311 prereq=True, ecode=errors.ECODE_STATE)
6314 def _RunAllocator(self):
6315 """Run the allocator based on input opcode.
6318 ial = IAllocator(self.cfg, self.rpc,
6319 mode=constants.IALLOCATOR_MODE_RELOC,
6320 name=self.instance_name,
6321 # TODO See why hail breaks with a single node below
6322 relocate_from=[self.instance.primary_node,
6323 self.instance.primary_node],
6326 ial.Run(self.iallocator)
6329 raise errors.OpPrereqError("Can't compute nodes using"
6330 " iallocator '%s': %s" %
6331 (self.iallocator, ial.info),
6333 if len(ial.result) != ial.required_nodes:
6334 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6335 " of nodes (%s), required %s" %
6336 (self.iallocator, len(ial.result),
6337 ial.required_nodes), errors.ECODE_FAULT)
6338 self.target_node = ial.result[0]
6339 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6340 self.instance_name, self.iallocator,
6341 utils.CommaJoin(ial.result))
6343 if self.lu.op.live is not None and self.lu.op.mode is not None:
6344 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6345 " parameters are accepted",
6347 if self.lu.op.live is not None:
6349 self.lu.op.mode = constants.HT_MIGRATION_LIVE
6351 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6352 # reset the 'live' parameter to None so that repeated
6353 # invocations of CheckPrereq do not raise an exception
6354 self.lu.op.live = None
6355 elif self.lu.op.mode is None:
6356 # read the default value from the hypervisor
6357 i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False)
6358 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6360 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6362 def _WaitUntilSync(self):
6363 """Poll with custom rpc for disk sync.
6365 This uses our own step-based rpc call.
6368 self.feedback_fn("* wait until resync is done")
6372 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6374 self.instance.disks)
6376 for node, nres in result.items():
6377 nres.Raise("Cannot resync disks on node %s" % node)
6378 node_done, node_percent = nres.payload
6379 all_done = all_done and node_done
6380 if node_percent is not None:
6381 min_percent = min(min_percent, node_percent)
6383 if min_percent < 100:
6384 self.feedback_fn(" - progress: %.1f%%" % min_percent)
6387 def _EnsureSecondary(self, node):
6388 """Demote a node to secondary.
6391 self.feedback_fn("* switching node %s to secondary mode" % node)
6393 for dev in self.instance.disks:
6394 self.cfg.SetDiskID(dev, node)
6396 result = self.rpc.call_blockdev_close(node, self.instance.name,
6397 self.instance.disks)
6398 result.Raise("Cannot change disk to secondary on node %s" % node)
6400 def _GoStandalone(self):
6401 """Disconnect from the network.
6404 self.feedback_fn("* changing into standalone mode")
6405 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6406 self.instance.disks)
6407 for node, nres in result.items():
6408 nres.Raise("Cannot disconnect disks node %s" % node)
6410 def _GoReconnect(self, multimaster):
6411 """Reconnect to the network.
6417 msg = "single-master"
6418 self.feedback_fn("* changing disks into %s mode" % msg)
6419 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6420 self.instance.disks,
6421 self.instance.name, multimaster)
6422 for node, nres in result.items():
6423 nres.Raise("Cannot change disks config on node %s" % node)
6425 def _ExecCleanup(self):
6426 """Try to cleanup after a failed migration.
6428 The cleanup is done by:
6429 - check that the instance is running only on one node
6430 (and update the config if needed)
6431 - change disks on its secondary node to secondary
6432 - wait until disks are fully synchronized
6433 - disconnect from the network
6434 - change disks into single-master mode
6435 - wait again until disks are fully synchronized
6438 instance = self.instance
6439 target_node = self.target_node
6440 source_node = self.source_node
6442 # check running on only one node
6443 self.feedback_fn("* checking where the instance actually runs"
6444 " (if this hangs, the hypervisor might be in"
6446 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6447 for node, result in ins_l.items():
6448 result.Raise("Can't contact node %s" % node)
6450 runningon_source = instance.name in ins_l[source_node].payload
6451 runningon_target = instance.name in ins_l[target_node].payload
6453 if runningon_source and runningon_target:
6454 raise errors.OpExecError("Instance seems to be running on two nodes,"
6455 " or the hypervisor is confused. You will have"
6456 " to ensure manually that it runs only on one"
6457 " and restart this operation.")
6459 if not (runningon_source or runningon_target):
6460 raise errors.OpExecError("Instance does not seem to be running at all."
6461 " In this case, it's safer to repair by"
6462 " running 'gnt-instance stop' to ensure disk"
6463 " shutdown, and then restarting it.")
6465 if runningon_target:
6466 # the migration has actually succeeded, we need to update the config
6467 self.feedback_fn("* instance running on secondary node (%s),"
6468 " updating config" % target_node)
6469 instance.primary_node = target_node
6470 self.cfg.Update(instance, self.feedback_fn)
6471 demoted_node = source_node
6473 self.feedback_fn("* instance confirmed to be running on its"
6474 " primary node (%s)" % source_node)
6475 demoted_node = target_node
6477 if instance.disk_template in constants.DTS_INT_MIRROR:
6478 self._EnsureSecondary(demoted_node)
6480 self._WaitUntilSync()
6481 except errors.OpExecError:
6482 # we ignore here errors, since if the device is standalone, it
6483 # won't be able to sync
6485 self._GoStandalone()
6486 self._GoReconnect(False)
6487 self._WaitUntilSync()
6489 self.feedback_fn("* done")
6491 def _RevertDiskStatus(self):
6492 """Try to revert the disk status after a failed migration.
6495 target_node = self.target_node
6496 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
6500 self._EnsureSecondary(target_node)
6501 self._GoStandalone()
6502 self._GoReconnect(False)
6503 self._WaitUntilSync()
6504 except errors.OpExecError, err:
6505 self.lu.LogWarning("Migration failed and I can't reconnect the"
6506 " drives: error '%s'\n"
6507 "Please look and recover the instance status" %
6510 def _AbortMigration(self):
6511 """Call the hypervisor code to abort a started migration.
6514 instance = self.instance
6515 target_node = self.target_node
6516 migration_info = self.migration_info
6518 abort_result = self.rpc.call_finalize_migration(target_node,
6522 abort_msg = abort_result.fail_msg
6524 logging.error("Aborting migration failed on target node %s: %s",
6525 target_node, abort_msg)
6526 # Don't raise an exception here, as we stil have to try to revert the
6527 # disk status, even if this step failed.
6529 def _ExecMigration(self):
6530 """Migrate an instance.
6532 The migrate is done by:
6533 - change the disks into dual-master mode
6534 - wait until disks are fully synchronized again
6535 - migrate the instance
6536 - change disks on the new secondary node (the old primary) to secondary
6537 - wait until disks are fully synchronized
6538 - change disks into single-master mode
6541 instance = self.instance
6542 target_node = self.target_node
6543 source_node = self.source_node
6545 self.feedback_fn("* checking disk consistency between source and target")
6546 for dev in instance.disks:
6547 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6548 raise errors.OpExecError("Disk %s is degraded or not fully"
6549 " synchronized on target node,"
6550 " aborting migrate." % dev.iv_name)
6552 # First get the migration information from the remote node
6553 result = self.rpc.call_migration_info(source_node, instance)
6554 msg = result.fail_msg
6556 log_err = ("Failed fetching source migration information from %s: %s" %
6558 logging.error(log_err)
6559 raise errors.OpExecError(log_err)
6561 self.migration_info = migration_info = result.payload
6563 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6564 # Then switch the disks to master/master mode
6565 self._EnsureSecondary(target_node)
6566 self._GoStandalone()
6567 self._GoReconnect(True)
6568 self._WaitUntilSync()
6570 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6571 result = self.rpc.call_accept_instance(target_node,
6574 self.nodes_ip[target_node])
6576 msg = result.fail_msg
6578 logging.error("Instance pre-migration failed, trying to revert"
6579 " disk status: %s", msg)
6580 self.feedback_fn("Pre-migration failed, aborting")
6581 self._AbortMigration()
6582 self._RevertDiskStatus()
6583 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6584 (instance.name, msg))
6586 self.feedback_fn("* migrating instance to %s" % target_node)
6588 result = self.rpc.call_instance_migrate(source_node, instance,
6589 self.nodes_ip[target_node],
6591 msg = result.fail_msg
6593 logging.error("Instance migration failed, trying to revert"
6594 " disk status: %s", msg)
6595 self.feedback_fn("Migration failed, aborting")
6596 self._AbortMigration()
6597 self._RevertDiskStatus()
6598 raise errors.OpExecError("Could not migrate instance %s: %s" %
6599 (instance.name, msg))
6602 instance.primary_node = target_node
6603 # distribute new instance config to the other nodes
6604 self.cfg.Update(instance, self.feedback_fn)
6606 result = self.rpc.call_finalize_migration(target_node,
6610 msg = result.fail_msg
6612 logging.error("Instance migration succeeded, but finalization failed:"
6614 raise errors.OpExecError("Could not finalize instance migration: %s" %
6617 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6618 self._EnsureSecondary(source_node)
6619 self._WaitUntilSync()
6620 self._GoStandalone()
6621 self._GoReconnect(False)
6622 self._WaitUntilSync()
6624 self.feedback_fn("* done")
6626 def Exec(self, feedback_fn):
6627 """Perform the migration.
6630 feedback_fn("Migrating instance %s" % self.instance.name)
6632 self.feedback_fn = feedback_fn
6634 self.source_node = self.instance.primary_node
6636 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
6637 if self.instance.disk_template in constants.DTS_INT_MIRROR:
6638 self.target_node = self.instance.secondary_nodes[0]
6639 # Otherwise self.target_node has been populated either
6640 # directly, or through an iallocator.
6642 self.all_nodes = [self.source_node, self.target_node]
6644 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6645 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6649 return self._ExecCleanup()
6651 return self._ExecMigration()
6654 def _CreateBlockDev(lu, node, instance, device, force_create,
6656 """Create a tree of block devices on a given node.
6658 If this device type has to be created on secondaries, create it and
6661 If not, just recurse to children keeping the same 'force' value.
6663 @param lu: the lu on whose behalf we execute
6664 @param node: the node on which to create the device
6665 @type instance: L{objects.Instance}
6666 @param instance: the instance which owns the device
6667 @type device: L{objects.Disk}
6668 @param device: the device to create
6669 @type force_create: boolean
6670 @param force_create: whether to force creation of this device; this
6671 will be change to True whenever we find a device which has
6672 CreateOnSecondary() attribute
6673 @param info: the extra 'metadata' we should attach to the device
6674 (this will be represented as a LVM tag)
6675 @type force_open: boolean
6676 @param force_open: this parameter will be passes to the
6677 L{backend.BlockdevCreate} function where it specifies
6678 whether we run on primary or not, and it affects both
6679 the child assembly and the device own Open() execution
6682 if device.CreateOnSecondary():
6686 for child in device.children:
6687 _CreateBlockDev(lu, node, instance, child, force_create,
6690 if not force_create:
6693 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6696 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6697 """Create a single block device on a given node.
6699 This will not recurse over children of the device, so they must be
6702 @param lu: the lu on whose behalf we execute
6703 @param node: the node on which to create the device
6704 @type instance: L{objects.Instance}
6705 @param instance: the instance which owns the device
6706 @type device: L{objects.Disk}
6707 @param device: the device to create
6708 @param info: the extra 'metadata' we should attach to the device
6709 (this will be represented as a LVM tag)
6710 @type force_open: boolean
6711 @param force_open: this parameter will be passes to the
6712 L{backend.BlockdevCreate} function where it specifies
6713 whether we run on primary or not, and it affects both
6714 the child assembly and the device own Open() execution
6717 lu.cfg.SetDiskID(device, node)
6718 result = lu.rpc.call_blockdev_create(node, device, device.size,
6719 instance.name, force_open, info)
6720 result.Raise("Can't create block device %s on"
6721 " node %s for instance %s" % (device, node, instance.name))
6722 if device.physical_id is None:
6723 device.physical_id = result.payload
6726 def _GenerateUniqueNames(lu, exts):
6727 """Generate a suitable LV name.
6729 This will generate a logical volume name for the given instance.
6734 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6735 results.append("%s%s" % (new_id, val))
6739 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6741 """Generate a drbd8 device complete with its children.
6744 port = lu.cfg.AllocatePort()
6745 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6746 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6747 logical_id=(vgname, names[0]))
6748 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6749 logical_id=(vgname, names[1]))
6750 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6751 logical_id=(primary, secondary, port,
6754 children=[dev_data, dev_meta],
6759 def _GenerateDiskTemplate(lu, template_name,
6760 instance_name, primary_node,
6761 secondary_nodes, disk_info,
6762 file_storage_dir, file_driver,
6763 base_index, feedback_fn):
6764 """Generate the entire disk layout for a given template type.
6767 #TODO: compute space requirements
6769 vgname = lu.cfg.GetVGName()
6770 disk_count = len(disk_info)
6772 if template_name == constants.DT_DISKLESS:
6774 elif template_name == constants.DT_PLAIN:
6775 if len(secondary_nodes) != 0:
6776 raise errors.ProgrammerError("Wrong template configuration")
6778 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6779 for i in range(disk_count)])
6780 for idx, disk in enumerate(disk_info):
6781 disk_index = idx + base_index
6782 vg = disk.get("vg", vgname)
6783 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6784 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6785 logical_id=(vg, names[idx]),
6786 iv_name="disk/%d" % disk_index,
6788 disks.append(disk_dev)
6789 elif template_name == constants.DT_DRBD8:
6790 if len(secondary_nodes) != 1:
6791 raise errors.ProgrammerError("Wrong template configuration")
6792 remote_node = secondary_nodes[0]
6793 minors = lu.cfg.AllocateDRBDMinor(
6794 [primary_node, remote_node] * len(disk_info), instance_name)
6797 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6798 for i in range(disk_count)]):
6799 names.append(lv_prefix + "_data")
6800 names.append(lv_prefix + "_meta")
6801 for idx, disk in enumerate(disk_info):
6802 disk_index = idx + base_index
6803 vg = disk.get("vg", vgname)
6804 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6805 disk["size"], vg, names[idx*2:idx*2+2],
6806 "disk/%d" % disk_index,
6807 minors[idx*2], minors[idx*2+1])
6808 disk_dev.mode = disk["mode"]
6809 disks.append(disk_dev)
6810 elif template_name == constants.DT_FILE:
6811 if len(secondary_nodes) != 0:
6812 raise errors.ProgrammerError("Wrong template configuration")
6814 opcodes.RequireFileStorage()
6816 for idx, disk in enumerate(disk_info):
6817 disk_index = idx + base_index
6818 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6819 iv_name="disk/%d" % disk_index,
6820 logical_id=(file_driver,
6821 "%s/disk%d" % (file_storage_dir,
6824 disks.append(disk_dev)
6825 elif template_name == constants.DT_SHARED_FILE:
6826 if len(secondary_nodes) != 0:
6827 raise errors.ProgrammerError("Wrong template configuration")
6829 opcodes.RequireSharedFileStorage()
6831 for idx, disk in enumerate(disk_info):
6832 disk_index = idx + base_index
6833 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6834 iv_name="disk/%d" % disk_index,
6835 logical_id=(file_driver,
6836 "%s/disk%d" % (file_storage_dir,
6839 disks.append(disk_dev)
6840 elif template_name == constants.DT_BLOCK:
6841 if len(secondary_nodes) != 0:
6842 raise errors.ProgrammerError("Wrong template configuration")
6844 for idx, disk in enumerate(disk_info):
6845 disk_index = idx + base_index
6846 disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"],
6847 logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
6849 iv_name="disk/%d" % disk_index,
6851 disks.append(disk_dev)
6854 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6858 def _GetInstanceInfoText(instance):
6859 """Compute that text that should be added to the disk's metadata.
6862 return "originstname+%s" % instance.name
6865 def _CalcEta(time_taken, written, total_size):
6866 """Calculates the ETA based on size written and total size.
6868 @param time_taken: The time taken so far
6869 @param written: amount written so far
6870 @param total_size: The total size of data to be written
6871 @return: The remaining time in seconds
6874 avg_time = time_taken / float(written)
6875 return (total_size - written) * avg_time
6878 def _WipeDisks(lu, instance):
6879 """Wipes instance disks.
6881 @type lu: L{LogicalUnit}
6882 @param lu: the logical unit on whose behalf we execute
6883 @type instance: L{objects.Instance}
6884 @param instance: the instance whose disks we should create
6885 @return: the success of the wipe
6888 node = instance.primary_node
6890 for device in instance.disks:
6891 lu.cfg.SetDiskID(device, node)
6893 logging.info("Pause sync of instance %s disks", instance.name)
6894 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6896 for idx, success in enumerate(result.payload):
6898 logging.warn("pause-sync of instance %s for disks %d failed",
6902 for idx, device in enumerate(instance.disks):
6903 lu.LogInfo("* Wiping disk %d", idx)
6904 logging.info("Wiping disk %d for instance %s, node %s",
6905 idx, instance.name, node)
6907 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6908 # MAX_WIPE_CHUNK at max
6909 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6910 constants.MIN_WIPE_CHUNK_PERCENT)
6915 start_time = time.time()
6917 while offset < size:
6918 wipe_size = min(wipe_chunk_size, size - offset)
6919 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6920 result.Raise("Could not wipe disk %d at offset %d for size %d" %
6921 (idx, offset, wipe_size))
6924 if now - last_output >= 60:
6925 eta = _CalcEta(now - start_time, offset, size)
6926 lu.LogInfo(" - done: %.1f%% ETA: %s" %
6927 (offset / float(size) * 100, utils.FormatSeconds(eta)))
6930 logging.info("Resume sync of instance %s disks", instance.name)
6932 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6934 for idx, success in enumerate(result.payload):
6936 lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6937 " look at the status and troubleshoot the issue.", idx)
6938 logging.warn("resume-sync of instance %s for disks %d failed",
6942 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6943 """Create all disks for an instance.
6945 This abstracts away some work from AddInstance.
6947 @type lu: L{LogicalUnit}
6948 @param lu: the logical unit on whose behalf we execute
6949 @type instance: L{objects.Instance}
6950 @param instance: the instance whose disks we should create
6952 @param to_skip: list of indices to skip
6953 @type target_node: string
6954 @param target_node: if passed, overrides the target node for creation
6956 @return: the success of the creation
6959 info = _GetInstanceInfoText(instance)
6960 if target_node is None:
6961 pnode = instance.primary_node
6962 all_nodes = instance.all_nodes
6967 if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
6968 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6969 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6971 result.Raise("Failed to create directory '%s' on"
6972 " node %s" % (file_storage_dir, pnode))
6974 # Note: this needs to be kept in sync with adding of disks in
6975 # LUInstanceSetParams
6976 for idx, device in enumerate(instance.disks):
6977 if to_skip and idx in to_skip:
6979 logging.info("Creating volume %s for instance %s",
6980 device.iv_name, instance.name)
6982 for node in all_nodes:
6983 f_create = node == pnode
6984 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6987 def _RemoveDisks(lu, instance, target_node=None):
6988 """Remove all disks for an instance.
6990 This abstracts away some work from `AddInstance()` and
6991 `RemoveInstance()`. Note that in case some of the devices couldn't
6992 be removed, the removal will continue with the other ones (compare
6993 with `_CreateDisks()`).
6995 @type lu: L{LogicalUnit}
6996 @param lu: the logical unit on whose behalf we execute
6997 @type instance: L{objects.Instance}
6998 @param instance: the instance whose disks we should remove
6999 @type target_node: string
7000 @param target_node: used to override the node on which to remove the disks
7002 @return: the success of the removal
7005 logging.info("Removing block devices for instance %s", instance.name)
7008 for device in instance.disks:
7010 edata = [(target_node, device)]
7012 edata = device.ComputeNodeTree(instance.primary_node)
7013 for node, disk in edata:
7014 lu.cfg.SetDiskID(disk, node)
7015 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7017 lu.LogWarning("Could not remove block device %s on node %s,"
7018 " continuing anyway: %s", device.iv_name, node, msg)
7021 if instance.disk_template == constants.DT_FILE:
7022 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7026 tgt = instance.primary_node
7027 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7029 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7030 file_storage_dir, instance.primary_node, result.fail_msg)
7036 def _ComputeDiskSizePerVG(disk_template, disks):
7037 """Compute disk size requirements in the volume group
7040 def _compute(disks, payload):
7041 """Universal algorithm
7046 vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
7050 # Required free disk space as a function of disk and swap space
7052 constants.DT_DISKLESS: {},
7053 constants.DT_PLAIN: _compute(disks, 0),
7054 # 128 MB are added for drbd metadata for each disk
7055 constants.DT_DRBD8: _compute(disks, 128),
7056 constants.DT_FILE: {},
7057 constants.DT_SHARED_FILE: {},
7060 if disk_template not in req_size_dict:
7061 raise errors.ProgrammerError("Disk template '%s' size requirement"
7062 " is unknown" % disk_template)
7064 return req_size_dict[disk_template]
7067 def _ComputeDiskSize(disk_template, disks):
7068 """Compute disk size requirements in the volume group
7071 # Required free disk space as a function of disk and swap space
7073 constants.DT_DISKLESS: None,
7074 constants.DT_PLAIN: sum(d["size"] for d in disks),
7075 # 128 MB are added for drbd metadata for each disk
7076 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
7077 constants.DT_FILE: None,
7078 constants.DT_SHARED_FILE: 0,
7079 constants.DT_BLOCK: 0,
7082 if disk_template not in req_size_dict:
7083 raise errors.ProgrammerError("Disk template '%s' size requirement"
7084 " is unknown" % disk_template)
7086 return req_size_dict[disk_template]
7089 def _FilterVmNodes(lu, nodenames):
7090 """Filters out non-vm_capable nodes from a list.
7092 @type lu: L{LogicalUnit}
7093 @param lu: the logical unit for which we check
7094 @type nodenames: list
7095 @param nodenames: the list of nodes on which we should check
7097 @return: the list of vm-capable nodes
7100 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
7101 return [name for name in nodenames if name not in vm_nodes]
7104 def _CheckHVParams(lu, nodenames, hvname, hvparams):
7105 """Hypervisor parameter validation.
7107 This function abstract the hypervisor parameter validation to be
7108 used in both instance create and instance modify.
7110 @type lu: L{LogicalUnit}
7111 @param lu: the logical unit for which we check
7112 @type nodenames: list
7113 @param nodenames: the list of nodes on which we should check
7114 @type hvname: string
7115 @param hvname: the name of the hypervisor we should use
7116 @type hvparams: dict
7117 @param hvparams: the parameters which we need to check
7118 @raise errors.OpPrereqError: if the parameters are not valid
7121 nodenames = _FilterVmNodes(lu, nodenames)
7122 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
7125 for node in nodenames:
7129 info.Raise("Hypervisor parameter validation failed on node %s" % node)
7132 def _CheckOSParams(lu, required, nodenames, osname, osparams):
7133 """OS parameters validation.
7135 @type lu: L{LogicalUnit}
7136 @param lu: the logical unit for which we check
7137 @type required: boolean
7138 @param required: whether the validation should fail if the OS is not
7140 @type nodenames: list
7141 @param nodenames: the list of nodes on which we should check
7142 @type osname: string
7143 @param osname: the name of the hypervisor we should use
7144 @type osparams: dict
7145 @param osparams: the parameters which we need to check
7146 @raise errors.OpPrereqError: if the parameters are not valid
7149 nodenames = _FilterVmNodes(lu, nodenames)
7150 result = lu.rpc.call_os_validate(required, nodenames, osname,
7151 [constants.OS_VALIDATE_PARAMETERS],
7153 for node, nres in result.items():
7154 # we don't check for offline cases since this should be run only
7155 # against the master node and/or an instance's nodes
7156 nres.Raise("OS Parameters validation failed on node %s" % node)
7157 if not nres.payload:
7158 lu.LogInfo("OS %s not found on node %s, validation skipped",
7162 class LUInstanceCreate(LogicalUnit):
7163 """Create an instance.
7166 HPATH = "instance-add"
7167 HTYPE = constants.HTYPE_INSTANCE
7170 def CheckArguments(self):
7174 # do not require name_check to ease forward/backward compatibility
7176 if self.op.no_install and self.op.start:
7177 self.LogInfo("No-installation mode selected, disabling startup")
7178 self.op.start = False
7179 # validate/normalize the instance name
7180 self.op.instance_name = \
7181 netutils.Hostname.GetNormalizedName(self.op.instance_name)
7183 if self.op.ip_check and not self.op.name_check:
7184 # TODO: make the ip check more flexible and not depend on the name check
7185 raise errors.OpPrereqError("Cannot do ip check without a name check",
7188 # check nics' parameter names
7189 for nic in self.op.nics:
7190 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7192 # check disks. parameter names and consistent adopt/no-adopt strategy
7193 has_adopt = has_no_adopt = False
7194 for disk in self.op.disks:
7195 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7200 if has_adopt and has_no_adopt:
7201 raise errors.OpPrereqError("Either all disks are adopted or none is",
7204 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7205 raise errors.OpPrereqError("Disk adoption is not supported for the"
7206 " '%s' disk template" %
7207 self.op.disk_template,
7209 if self.op.iallocator is not None:
7210 raise errors.OpPrereqError("Disk adoption not allowed with an"
7211 " iallocator script", errors.ECODE_INVAL)
7212 if self.op.mode == constants.INSTANCE_IMPORT:
7213 raise errors.OpPrereqError("Disk adoption not allowed for"
7214 " instance import", errors.ECODE_INVAL)
7216 if self.op.disk_template in constants.DTS_MUST_ADOPT:
7217 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
7218 " but no 'adopt' parameter given" %
7219 self.op.disk_template,
7222 self.adopt_disks = has_adopt
7224 # instance name verification
7225 if self.op.name_check:
7226 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7227 self.op.instance_name = self.hostname1.name
7228 # used in CheckPrereq for ip ping check
7229 self.check_ip = self.hostname1.ip
7231 self.check_ip = None
7233 # file storage checks
7234 if (self.op.file_driver and
7235 not self.op.file_driver in constants.FILE_DRIVER):
7236 raise errors.OpPrereqError("Invalid file driver name '%s'" %
7237 self.op.file_driver, errors.ECODE_INVAL)
7239 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7240 raise errors.OpPrereqError("File storage directory path not absolute",
7243 ### Node/iallocator related checks
7244 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7246 if self.op.pnode is not None:
7247 if self.op.disk_template in constants.DTS_INT_MIRROR:
7248 if self.op.snode is None:
7249 raise errors.OpPrereqError("The networked disk templates need"
7250 " a mirror node", errors.ECODE_INVAL)
7252 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7254 self.op.snode = None
7256 self._cds = _GetClusterDomainSecret()
7258 if self.op.mode == constants.INSTANCE_IMPORT:
7259 # On import force_variant must be True, because if we forced it at
7260 # initial install, our only chance when importing it back is that it
7262 self.op.force_variant = True
7264 if self.op.no_install:
7265 self.LogInfo("No-installation mode has no effect during import")
7267 elif self.op.mode == constants.INSTANCE_CREATE:
7268 if self.op.os_type is None:
7269 raise errors.OpPrereqError("No guest OS specified",
7271 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7272 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7273 " installation" % self.op.os_type,
7275 if self.op.disk_template is None:
7276 raise errors.OpPrereqError("No disk template specified",
7279 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7280 # Check handshake to ensure both clusters have the same domain secret
7281 src_handshake = self.op.source_handshake
7282 if not src_handshake:
7283 raise errors.OpPrereqError("Missing source handshake",
7286 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7289 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7292 # Load and check source CA
7293 self.source_x509_ca_pem = self.op.source_x509_ca
7294 if not self.source_x509_ca_pem:
7295 raise errors.OpPrereqError("Missing source X509 CA",
7299 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7301 except OpenSSL.crypto.Error, err:
7302 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7303 (err, ), errors.ECODE_INVAL)
7305 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7306 if errcode is not None:
7307 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7310 self.source_x509_ca = cert
7312 src_instance_name = self.op.source_instance_name
7313 if not src_instance_name:
7314 raise errors.OpPrereqError("Missing source instance name",
7317 self.source_instance_name = \
7318 netutils.GetHostname(name=src_instance_name).name
7321 raise errors.OpPrereqError("Invalid instance creation mode %r" %
7322 self.op.mode, errors.ECODE_INVAL)
7324 def ExpandNames(self):
7325 """ExpandNames for CreateInstance.
7327 Figure out the right locks for instance creation.
7330 self.needed_locks = {}
7332 instance_name = self.op.instance_name
7333 # this is just a preventive check, but someone might still add this
7334 # instance in the meantime, and creation will fail at lock-add time
7335 if instance_name in self.cfg.GetInstanceList():
7336 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7337 instance_name, errors.ECODE_EXISTS)
7339 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7341 if self.op.iallocator:
7342 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7344 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7345 nodelist = [self.op.pnode]
7346 if self.op.snode is not None:
7347 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7348 nodelist.append(self.op.snode)
7349 self.needed_locks[locking.LEVEL_NODE] = nodelist
7351 # in case of import lock the source node too
7352 if self.op.mode == constants.INSTANCE_IMPORT:
7353 src_node = self.op.src_node
7354 src_path = self.op.src_path
7356 if src_path is None:
7357 self.op.src_path = src_path = self.op.instance_name
7359 if src_node is None:
7360 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7361 self.op.src_node = None
7362 if os.path.isabs(src_path):
7363 raise errors.OpPrereqError("Importing an instance from an absolute"
7364 " path requires a source node option.",
7367 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7368 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7369 self.needed_locks[locking.LEVEL_NODE].append(src_node)
7370 if not os.path.isabs(src_path):
7371 self.op.src_path = src_path = \
7372 utils.PathJoin(constants.EXPORT_DIR, src_path)
7374 def _RunAllocator(self):
7375 """Run the allocator based on input opcode.
7378 nics = [n.ToDict() for n in self.nics]
7379 ial = IAllocator(self.cfg, self.rpc,
7380 mode=constants.IALLOCATOR_MODE_ALLOC,
7381 name=self.op.instance_name,
7382 disk_template=self.op.disk_template,
7385 vcpus=self.be_full[constants.BE_VCPUS],
7386 mem_size=self.be_full[constants.BE_MEMORY],
7389 hypervisor=self.op.hypervisor,
7392 ial.Run(self.op.iallocator)
7395 raise errors.OpPrereqError("Can't compute nodes using"
7396 " iallocator '%s': %s" %
7397 (self.op.iallocator, ial.info),
7399 if len(ial.result) != ial.required_nodes:
7400 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7401 " of nodes (%s), required %s" %
7402 (self.op.iallocator, len(ial.result),
7403 ial.required_nodes), errors.ECODE_FAULT)
7404 self.op.pnode = ial.result[0]
7405 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7406 self.op.instance_name, self.op.iallocator,
7407 utils.CommaJoin(ial.result))
7408 if ial.required_nodes == 2:
7409 self.op.snode = ial.result[1]
7411 def BuildHooksEnv(self):
7414 This runs on master, primary and secondary nodes of the instance.
7418 "ADD_MODE": self.op.mode,
7420 if self.op.mode == constants.INSTANCE_IMPORT:
7421 env["SRC_NODE"] = self.op.src_node
7422 env["SRC_PATH"] = self.op.src_path
7423 env["SRC_IMAGES"] = self.src_images
7425 env.update(_BuildInstanceHookEnv(
7426 name=self.op.instance_name,
7427 primary_node=self.op.pnode,
7428 secondary_nodes=self.secondaries,
7429 status=self.op.start,
7430 os_type=self.op.os_type,
7431 memory=self.be_full[constants.BE_MEMORY],
7432 vcpus=self.be_full[constants.BE_VCPUS],
7433 nics=_NICListToTuple(self, self.nics),
7434 disk_template=self.op.disk_template,
7435 disks=[(d["size"], d["mode"]) for d in self.disks],
7438 hypervisor_name=self.op.hypervisor,
7441 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7445 def _ReadExportInfo(self):
7446 """Reads the export information from disk.
7448 It will override the opcode source node and path with the actual
7449 information, if these two were not specified before.
7451 @return: the export information
7454 assert self.op.mode == constants.INSTANCE_IMPORT
7456 src_node = self.op.src_node
7457 src_path = self.op.src_path
7459 if src_node is None:
7460 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7461 exp_list = self.rpc.call_export_list(locked_nodes)
7463 for node in exp_list:
7464 if exp_list[node].fail_msg:
7466 if src_path in exp_list[node].payload:
7468 self.op.src_node = src_node = node
7469 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7473 raise errors.OpPrereqError("No export found for relative path %s" %
7474 src_path, errors.ECODE_INVAL)
7476 _CheckNodeOnline(self, src_node)
7477 result = self.rpc.call_export_info(src_node, src_path)
7478 result.Raise("No export or invalid export found in dir %s" % src_path)
7480 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7481 if not export_info.has_section(constants.INISECT_EXP):
7482 raise errors.ProgrammerError("Corrupted export config",
7483 errors.ECODE_ENVIRON)
7485 ei_version = export_info.get(constants.INISECT_EXP, "version")
7486 if (int(ei_version) != constants.EXPORT_VERSION):
7487 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7488 (ei_version, constants.EXPORT_VERSION),
7489 errors.ECODE_ENVIRON)
7492 def _ReadExportParams(self, einfo):
7493 """Use export parameters as defaults.
7495 In case the opcode doesn't specify (as in override) some instance
7496 parameters, then try to use them from the export information, if
7500 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7502 if self.op.disk_template is None:
7503 if einfo.has_option(constants.INISECT_INS, "disk_template"):
7504 self.op.disk_template = einfo.get(constants.INISECT_INS,
7507 raise errors.OpPrereqError("No disk template specified and the export"
7508 " is missing the disk_template information",
7511 if not self.op.disks:
7512 if einfo.has_option(constants.INISECT_INS, "disk_count"):
7514 # TODO: import the disk iv_name too
7515 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7516 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7517 disks.append({"size": disk_sz})
7518 self.op.disks = disks
7520 raise errors.OpPrereqError("No disk info specified and the export"
7521 " is missing the disk information",
7524 if (not self.op.nics and
7525 einfo.has_option(constants.INISECT_INS, "nic_count")):
7527 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7529 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7530 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7535 if (self.op.hypervisor is None and
7536 einfo.has_option(constants.INISECT_INS, "hypervisor")):
7537 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7538 if einfo.has_section(constants.INISECT_HYP):
7539 # use the export parameters but do not override the ones
7540 # specified by the user
7541 for name, value in einfo.items(constants.INISECT_HYP):
7542 if name not in self.op.hvparams:
7543 self.op.hvparams[name] = value
7545 if einfo.has_section(constants.INISECT_BEP):
7546 # use the parameters, without overriding
7547 for name, value in einfo.items(constants.INISECT_BEP):
7548 if name not in self.op.beparams:
7549 self.op.beparams[name] = value
7551 # try to read the parameters old style, from the main section
7552 for name in constants.BES_PARAMETERS:
7553 if (name not in self.op.beparams and
7554 einfo.has_option(constants.INISECT_INS, name)):
7555 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7557 if einfo.has_section(constants.INISECT_OSP):
7558 # use the parameters, without overriding
7559 for name, value in einfo.items(constants.INISECT_OSP):
7560 if name not in self.op.osparams:
7561 self.op.osparams[name] = value
7563 def _RevertToDefaults(self, cluster):
7564 """Revert the instance parameters to the default values.
7568 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7569 for name in self.op.hvparams.keys():
7570 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7571 del self.op.hvparams[name]
7573 be_defs = cluster.SimpleFillBE({})
7574 for name in self.op.beparams.keys():
7575 if name in be_defs and be_defs[name] == self.op.beparams[name]:
7576 del self.op.beparams[name]
7578 nic_defs = cluster.SimpleFillNIC({})
7579 for nic in self.op.nics:
7580 for name in constants.NICS_PARAMETERS:
7581 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7584 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7585 for name in self.op.osparams.keys():
7586 if name in os_defs and os_defs[name] == self.op.osparams[name]:
7587 del self.op.osparams[name]
7589 def CheckPrereq(self):
7590 """Check prerequisites.
7593 if self.op.mode == constants.INSTANCE_IMPORT:
7594 export_info = self._ReadExportInfo()
7595 self._ReadExportParams(export_info)
7597 if (not self.cfg.GetVGName() and
7598 self.op.disk_template not in constants.DTS_NOT_LVM):
7599 raise errors.OpPrereqError("Cluster does not support lvm-based"
7600 " instances", errors.ECODE_STATE)
7602 if self.op.hypervisor is None:
7603 self.op.hypervisor = self.cfg.GetHypervisorType()
7605 cluster = self.cfg.GetClusterInfo()
7606 enabled_hvs = cluster.enabled_hypervisors
7607 if self.op.hypervisor not in enabled_hvs:
7608 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7609 " cluster (%s)" % (self.op.hypervisor,
7610 ",".join(enabled_hvs)),
7613 # check hypervisor parameter syntax (locally)
7614 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7615 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7617 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7618 hv_type.CheckParameterSyntax(filled_hvp)
7619 self.hv_full = filled_hvp
7620 # check that we don't specify global parameters on an instance
7621 _CheckGlobalHvParams(self.op.hvparams)
7623 # fill and remember the beparams dict
7624 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7625 self.be_full = cluster.SimpleFillBE(self.op.beparams)
7627 # build os parameters
7628 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7630 # now that hvp/bep are in final format, let's reset to defaults,
7632 if self.op.identify_defaults:
7633 self._RevertToDefaults(cluster)
7637 for idx, nic in enumerate(self.op.nics):
7638 nic_mode_req = nic.get("mode", None)
7639 nic_mode = nic_mode_req
7640 if nic_mode is None:
7641 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7643 # in routed mode, for the first nic, the default ip is 'auto'
7644 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7645 default_ip_mode = constants.VALUE_AUTO
7647 default_ip_mode = constants.VALUE_NONE
7649 # ip validity checks
7650 ip = nic.get("ip", default_ip_mode)
7651 if ip is None or ip.lower() == constants.VALUE_NONE:
7653 elif ip.lower() == constants.VALUE_AUTO:
7654 if not self.op.name_check:
7655 raise errors.OpPrereqError("IP address set to auto but name checks"
7656 " have been skipped",
7658 nic_ip = self.hostname1.ip
7660 if not netutils.IPAddress.IsValid(ip):
7661 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7665 # TODO: check the ip address for uniqueness
7666 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7667 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7670 # MAC address verification
7671 mac = nic.get("mac", constants.VALUE_AUTO)
7672 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7673 mac = utils.NormalizeAndValidateMac(mac)
7676 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7677 except errors.ReservationError:
7678 raise errors.OpPrereqError("MAC address %s already in use"
7679 " in cluster" % mac,
7680 errors.ECODE_NOTUNIQUE)
7682 # Build nic parameters
7683 link = nic.get(constants.INIC_LINK, None)
7686 nicparams[constants.NIC_MODE] = nic_mode_req
7688 nicparams[constants.NIC_LINK] = link
7690 check_params = cluster.SimpleFillNIC(nicparams)
7691 objects.NIC.CheckParameterSyntax(check_params)
7692 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7694 # disk checks/pre-build
7696 for disk in self.op.disks:
7697 mode = disk.get("mode", constants.DISK_RDWR)
7698 if mode not in constants.DISK_ACCESS_SET:
7699 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7700 mode, errors.ECODE_INVAL)
7701 size = disk.get("size", None)
7703 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7706 except (TypeError, ValueError):
7707 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7709 vg = disk.get("vg", self.cfg.GetVGName())
7710 new_disk = {"size": size, "mode": mode, "vg": vg}
7712 new_disk["adopt"] = disk["adopt"]
7713 self.disks.append(new_disk)
7715 if self.op.mode == constants.INSTANCE_IMPORT:
7717 # Check that the new instance doesn't have less disks than the export
7718 instance_disks = len(self.disks)
7719 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7720 if instance_disks < export_disks:
7721 raise errors.OpPrereqError("Not enough disks to import."
7722 " (instance: %d, export: %d)" %
7723 (instance_disks, export_disks),
7727 for idx in range(export_disks):
7728 option = 'disk%d_dump' % idx
7729 if export_info.has_option(constants.INISECT_INS, option):
7730 # FIXME: are the old os-es, disk sizes, etc. useful?
7731 export_name = export_info.get(constants.INISECT_INS, option)
7732 image = utils.PathJoin(self.op.src_path, export_name)
7733 disk_images.append(image)
7735 disk_images.append(False)
7737 self.src_images = disk_images
7739 old_name = export_info.get(constants.INISECT_INS, 'name')
7741 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7742 except (TypeError, ValueError), err:
7743 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7744 " an integer: %s" % str(err),
7746 if self.op.instance_name == old_name:
7747 for idx, nic in enumerate(self.nics):
7748 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7749 nic_mac_ini = 'nic%d_mac' % idx
7750 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7752 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7754 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7755 if self.op.ip_check:
7756 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7757 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7758 (self.check_ip, self.op.instance_name),
7759 errors.ECODE_NOTUNIQUE)
7761 #### mac address generation
7762 # By generating here the mac address both the allocator and the hooks get
7763 # the real final mac address rather than the 'auto' or 'generate' value.
7764 # There is a race condition between the generation and the instance object
7765 # creation, which means that we know the mac is valid now, but we're not
7766 # sure it will be when we actually add the instance. If things go bad
7767 # adding the instance will abort because of a duplicate mac, and the
7768 # creation job will fail.
7769 for nic in self.nics:
7770 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7771 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7775 if self.op.iallocator is not None:
7776 self._RunAllocator()
7778 #### node related checks
7780 # check primary node
7781 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7782 assert self.pnode is not None, \
7783 "Cannot retrieve locked node %s" % self.op.pnode
7785 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7786 pnode.name, errors.ECODE_STATE)
7788 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7789 pnode.name, errors.ECODE_STATE)
7790 if not pnode.vm_capable:
7791 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7792 " '%s'" % pnode.name, errors.ECODE_STATE)
7794 self.secondaries = []
7796 # mirror node verification
7797 if self.op.disk_template in constants.DTS_INT_MIRROR:
7798 if self.op.snode == pnode.name:
7799 raise errors.OpPrereqError("The secondary node cannot be the"
7800 " primary node.", errors.ECODE_INVAL)
7801 _CheckNodeOnline(self, self.op.snode)
7802 _CheckNodeNotDrained(self, self.op.snode)
7803 _CheckNodeVmCapable(self, self.op.snode)
7804 self.secondaries.append(self.op.snode)
7806 nodenames = [pnode.name] + self.secondaries
7808 if not self.adopt_disks:
7809 # Check lv size requirements, if not adopting
7810 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7811 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7813 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
7814 all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7815 if len(all_lvs) != len(self.disks):
7816 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7818 for lv_name in all_lvs:
7820 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7821 # to ReserveLV uses the same syntax
7822 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7823 except errors.ReservationError:
7824 raise errors.OpPrereqError("LV named %s used by another instance" %
7825 lv_name, errors.ECODE_NOTUNIQUE)
7827 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7828 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7830 node_lvs = self.rpc.call_lv_list([pnode.name],
7831 vg_names.payload.keys())[pnode.name]
7832 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7833 node_lvs = node_lvs.payload
7835 delta = all_lvs.difference(node_lvs.keys())
7837 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7838 utils.CommaJoin(delta),
7840 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7842 raise errors.OpPrereqError("Online logical volumes found, cannot"
7843 " adopt: %s" % utils.CommaJoin(online_lvs),
7845 # update the size of disk based on what is found
7846 for dsk in self.disks:
7847 dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7849 elif self.op.disk_template == constants.DT_BLOCK:
7850 # Normalize and de-duplicate device paths
7851 all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks])
7852 if len(all_disks) != len(self.disks):
7853 raise errors.OpPrereqError("Duplicate disk names given for adoption",
7855 baddisks = [d for d in all_disks
7856 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
7858 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
7859 " cannot be adopted" %
7860 (", ".join(baddisks),
7861 constants.ADOPTABLE_BLOCKDEV_ROOT),
7864 node_disks = self.rpc.call_bdev_sizes([pnode.name],
7865 list(all_disks))[pnode.name]
7866 node_disks.Raise("Cannot get block device information from node %s" %
7868 node_disks = node_disks.payload
7869 delta = all_disks.difference(node_disks.keys())
7871 raise errors.OpPrereqError("Missing block device(s): %s" %
7872 utils.CommaJoin(delta),
7874 for dsk in self.disks:
7875 dsk["size"] = int(float(node_disks[dsk["adopt"]]))
7877 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7879 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7880 # check OS parameters (remotely)
7881 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7883 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7885 # memory check on primary node
7887 _CheckNodeFreeMemory(self, self.pnode.name,
7888 "creating instance %s" % self.op.instance_name,
7889 self.be_full[constants.BE_MEMORY],
7892 self.dry_run_result = list(nodenames)
7894 def Exec(self, feedback_fn):
7895 """Create and add the instance to the cluster.
7898 instance = self.op.instance_name
7899 pnode_name = self.pnode.name
7901 ht_kind = self.op.hypervisor
7902 if ht_kind in constants.HTS_REQ_PORT:
7903 network_port = self.cfg.AllocatePort()
7907 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
7908 # this is needed because os.path.join does not accept None arguments
7909 if self.op.file_storage_dir is None:
7910 string_file_storage_dir = ""
7912 string_file_storage_dir = self.op.file_storage_dir
7914 # build the full file storage dir path
7915 if self.op.disk_template == constants.DT_SHARED_FILE:
7916 get_fsd_fn = self.cfg.GetSharedFileStorageDir
7918 get_fsd_fn = self.cfg.GetFileStorageDir
7920 file_storage_dir = utils.PathJoin(get_fsd_fn(),
7921 string_file_storage_dir, instance)
7923 file_storage_dir = ""
7925 disks = _GenerateDiskTemplate(self,
7926 self.op.disk_template,
7927 instance, pnode_name,
7931 self.op.file_driver,
7935 iobj = objects.Instance(name=instance, os=self.op.os_type,
7936 primary_node=pnode_name,
7937 nics=self.nics, disks=disks,
7938 disk_template=self.op.disk_template,
7940 network_port=network_port,
7941 beparams=self.op.beparams,
7942 hvparams=self.op.hvparams,
7943 hypervisor=self.op.hypervisor,
7944 osparams=self.op.osparams,
7947 if self.adopt_disks:
7948 if self.op.disk_template == constants.DT_PLAIN:
7949 # rename LVs to the newly-generated names; we need to construct
7950 # 'fake' LV disks with the old data, plus the new unique_id
7951 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7953 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7954 rename_to.append(t_dsk.logical_id)
7955 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7956 self.cfg.SetDiskID(t_dsk, pnode_name)
7957 result = self.rpc.call_blockdev_rename(pnode_name,
7958 zip(tmp_disks, rename_to))
7959 result.Raise("Failed to rename adoped LVs")
7961 feedback_fn("* creating instance disks...")
7963 _CreateDisks(self, iobj)
7964 except errors.OpExecError:
7965 self.LogWarning("Device creation failed, reverting...")
7967 _RemoveDisks(self, iobj)
7969 self.cfg.ReleaseDRBDMinors(instance)
7972 if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7973 feedback_fn("* wiping instance disks...")
7975 _WipeDisks(self, iobj)
7976 except errors.OpExecError:
7977 self.LogWarning("Device wiping failed, reverting...")
7979 _RemoveDisks(self, iobj)
7981 self.cfg.ReleaseDRBDMinors(instance)
7984 feedback_fn("adding instance %s to cluster config" % instance)
7986 self.cfg.AddInstance(iobj, self.proc.GetECId())
7988 # Declare that we don't want to remove the instance lock anymore, as we've
7989 # added the instance to the config
7990 del self.remove_locks[locking.LEVEL_INSTANCE]
7991 # Unlock all the nodes
7992 if self.op.mode == constants.INSTANCE_IMPORT:
7993 nodes_keep = [self.op.src_node]
7994 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7995 if node != self.op.src_node]
7996 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7997 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7999 self.context.glm.release(locking.LEVEL_NODE)
8000 del self.acquired_locks[locking.LEVEL_NODE]
8002 if self.op.wait_for_sync:
8003 disk_abort = not _WaitForSync(self, iobj)
8004 elif iobj.disk_template in constants.DTS_INT_MIRROR:
8005 # make sure the disks are not degraded (still sync-ing is ok)
8007 feedback_fn("* checking mirrors status")
8008 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8013 _RemoveDisks(self, iobj)
8014 self.cfg.RemoveInstance(iobj.name)
8015 # Make sure the instance lock gets removed
8016 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8017 raise errors.OpExecError("There are some degraded disks for"
8020 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8021 if self.op.mode == constants.INSTANCE_CREATE:
8022 if not self.op.no_install:
8023 feedback_fn("* running the instance OS create scripts...")
8024 # FIXME: pass debug option from opcode to backend
8025 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
8026 self.op.debug_level)
8027 result.Raise("Could not add os for instance %s"
8028 " on node %s" % (instance, pnode_name))
8030 elif self.op.mode == constants.INSTANCE_IMPORT:
8031 feedback_fn("* running the instance OS import scripts...")
8035 for idx, image in enumerate(self.src_images):
8039 # FIXME: pass debug option from opcode to backend
8040 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
8041 constants.IEIO_FILE, (image, ),
8042 constants.IEIO_SCRIPT,
8043 (iobj.disks[idx], idx),
8045 transfers.append(dt)
8048 masterd.instance.TransferInstanceData(self, feedback_fn,
8049 self.op.src_node, pnode_name,
8050 self.pnode.secondary_ip,
8052 if not compat.all(import_result):
8053 self.LogWarning("Some disks for instance %s on node %s were not"
8054 " imported successfully" % (instance, pnode_name))
8056 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8057 feedback_fn("* preparing remote import...")
8058 # The source cluster will stop the instance before attempting to make a
8059 # connection. In some cases stopping an instance can take a long time,
8060 # hence the shutdown timeout is added to the connection timeout.
8061 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
8062 self.op.source_shutdown_timeout)
8063 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
8065 assert iobj.primary_node == self.pnode.name
8067 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
8068 self.source_x509_ca,
8069 self._cds, timeouts)
8070 if not compat.all(disk_results):
8071 # TODO: Should the instance still be started, even if some disks
8072 # failed to import (valid for local imports, too)?
8073 self.LogWarning("Some disks for instance %s on node %s were not"
8074 " imported successfully" % (instance, pnode_name))
8076 # Run rename script on newly imported instance
8077 assert iobj.name == instance
8078 feedback_fn("Running rename script for %s" % instance)
8079 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
8080 self.source_instance_name,
8081 self.op.debug_level)
8083 self.LogWarning("Failed to run rename script for %s on node"
8084 " %s: %s" % (instance, pnode_name, result.fail_msg))
8087 # also checked in the prereq part
8088 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
8092 iobj.admin_up = True
8093 self.cfg.Update(iobj, feedback_fn)
8094 logging.info("Starting instance %s on node %s", instance, pnode_name)
8095 feedback_fn("* starting instance...")
8096 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
8097 result.Raise("Could not start instance")
8099 return list(iobj.all_nodes)
8102 class LUInstanceConsole(NoHooksLU):
8103 """Connect to an instance's console.
8105 This is somewhat special in that it returns the command line that
8106 you need to run on the master node in order to connect to the
8112 def ExpandNames(self):
8113 self._ExpandAndLockInstance()
8115 def CheckPrereq(self):
8116 """Check prerequisites.
8118 This checks that the instance is in the cluster.
8121 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8122 assert self.instance is not None, \
8123 "Cannot retrieve locked instance %s" % self.op.instance_name
8124 _CheckNodeOnline(self, self.instance.primary_node)
8126 def Exec(self, feedback_fn):
8127 """Connect to the console of an instance
8130 instance = self.instance
8131 node = instance.primary_node
8133 node_insts = self.rpc.call_instance_list([node],
8134 [instance.hypervisor])[node]
8135 node_insts.Raise("Can't get node information from %s" % node)
8137 if instance.name not in node_insts.payload:
8138 if instance.admin_up:
8139 state = constants.INSTST_ERRORDOWN
8141 state = constants.INSTST_ADMINDOWN
8142 raise errors.OpExecError("Instance %s is not running (state %s)" %
8143 (instance.name, state))
8145 logging.debug("Connecting to console of %s on %s", instance.name, node)
8147 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
8150 def _GetInstanceConsole(cluster, instance):
8151 """Returns console information for an instance.
8153 @type cluster: L{objects.Cluster}
8154 @type instance: L{objects.Instance}
8158 hyper = hypervisor.GetHypervisor(instance.hypervisor)
8159 # beparams and hvparams are passed separately, to avoid editing the
8160 # instance and then saving the defaults in the instance itself.
8161 hvparams = cluster.FillHV(instance)
8162 beparams = cluster.FillBE(instance)
8163 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8165 assert console.instance == instance.name
8166 assert console.Validate()
8168 return console.ToDict()
8171 class LUInstanceReplaceDisks(LogicalUnit):
8172 """Replace the disks of an instance.
8175 HPATH = "mirrors-replace"
8176 HTYPE = constants.HTYPE_INSTANCE
8179 def CheckArguments(self):
8180 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8183 def ExpandNames(self):
8184 self._ExpandAndLockInstance()
8186 if self.op.iallocator is not None:
8187 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8189 elif self.op.remote_node is not None:
8190 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8191 self.op.remote_node = remote_node
8193 # Warning: do not remove the locking of the new secondary here
8194 # unless DRBD8.AddChildren is changed to work in parallel;
8195 # currently it doesn't since parallel invocations of
8196 # FindUnusedMinor will conflict
8197 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
8198 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8201 self.needed_locks[locking.LEVEL_NODE] = []
8202 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8204 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8205 self.op.iallocator, self.op.remote_node,
8206 self.op.disks, False, self.op.early_release)
8208 self.tasklets = [self.replacer]
8210 def DeclareLocks(self, level):
8211 # If we're not already locking all nodes in the set we have to declare the
8212 # instance's primary/secondary nodes.
8213 if (level == locking.LEVEL_NODE and
8214 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
8215 self._LockInstancesNodes()
8217 def BuildHooksEnv(self):
8220 This runs on the master, the primary and all the secondaries.
8223 instance = self.replacer.instance
8225 "MODE": self.op.mode,
8226 "NEW_SECONDARY": self.op.remote_node,
8227 "OLD_SECONDARY": instance.secondary_nodes[0],
8229 env.update(_BuildInstanceHookEnvByObject(self, instance))
8231 self.cfg.GetMasterNode(),
8232 instance.primary_node,
8234 if self.op.remote_node is not None:
8235 nl.append(self.op.remote_node)
8239 class TLReplaceDisks(Tasklet):
8240 """Replaces disks for an instance.
8242 Note: Locking is not within the scope of this class.
8245 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8246 disks, delay_iallocator, early_release):
8247 """Initializes this class.
8250 Tasklet.__init__(self, lu)
8253 self.instance_name = instance_name
8255 self.iallocator_name = iallocator_name
8256 self.remote_node = remote_node
8258 self.delay_iallocator = delay_iallocator
8259 self.early_release = early_release
8262 self.instance = None
8263 self.new_node = None
8264 self.target_node = None
8265 self.other_node = None
8266 self.remote_node_info = None
8267 self.node_secondary_ip = None
8270 def CheckArguments(mode, remote_node, iallocator):
8271 """Helper function for users of this class.
8274 # check for valid parameter combination
8275 if mode == constants.REPLACE_DISK_CHG:
8276 if remote_node is None and iallocator is None:
8277 raise errors.OpPrereqError("When changing the secondary either an"
8278 " iallocator script must be used or the"
8279 " new node given", errors.ECODE_INVAL)
8281 if remote_node is not None and iallocator is not None:
8282 raise errors.OpPrereqError("Give either the iallocator or the new"
8283 " secondary, not both", errors.ECODE_INVAL)
8285 elif remote_node is not None or iallocator is not None:
8286 # Not replacing the secondary
8287 raise errors.OpPrereqError("The iallocator and new node options can"
8288 " only be used when changing the"
8289 " secondary node", errors.ECODE_INVAL)
8292 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8293 """Compute a new secondary node using an IAllocator.
8296 ial = IAllocator(lu.cfg, lu.rpc,
8297 mode=constants.IALLOCATOR_MODE_RELOC,
8299 relocate_from=relocate_from)
8301 ial.Run(iallocator_name)
8304 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8305 " %s" % (iallocator_name, ial.info),
8308 if len(ial.result) != ial.required_nodes:
8309 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8310 " of nodes (%s), required %s" %
8312 len(ial.result), ial.required_nodes),
8315 remote_node_name = ial.result[0]
8317 lu.LogInfo("Selected new secondary for instance '%s': %s",
8318 instance_name, remote_node_name)
8320 return remote_node_name
8322 def _FindFaultyDisks(self, node_name):
8323 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8326 def CheckPrereq(self):
8327 """Check prerequisites.
8329 This checks that the instance is in the cluster.
8332 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8333 assert instance is not None, \
8334 "Cannot retrieve locked instance %s" % self.instance_name
8336 if instance.disk_template != constants.DT_DRBD8:
8337 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8338 " instances", errors.ECODE_INVAL)
8340 if len(instance.secondary_nodes) != 1:
8341 raise errors.OpPrereqError("The instance has a strange layout,"
8342 " expected one secondary but found %d" %
8343 len(instance.secondary_nodes),
8346 if not self.delay_iallocator:
8347 self._CheckPrereq2()
8349 def _CheckPrereq2(self):
8350 """Check prerequisites, second part.
8352 This function should always be part of CheckPrereq. It was separated and is
8353 now called from Exec because during node evacuation iallocator was only
8354 called with an unmodified cluster model, not taking planned changes into
8358 instance = self.instance
8359 secondary_node = instance.secondary_nodes[0]
8361 if self.iallocator_name is None:
8362 remote_node = self.remote_node
8364 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8365 instance.name, instance.secondary_nodes)
8367 if remote_node is not None:
8368 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8369 assert self.remote_node_info is not None, \
8370 "Cannot retrieve locked node %s" % remote_node
8372 self.remote_node_info = None
8374 if remote_node == self.instance.primary_node:
8375 raise errors.OpPrereqError("The specified node is the primary node of"
8376 " the instance.", errors.ECODE_INVAL)
8378 if remote_node == secondary_node:
8379 raise errors.OpPrereqError("The specified node is already the"
8380 " secondary node of the instance.",
8383 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8384 constants.REPLACE_DISK_CHG):
8385 raise errors.OpPrereqError("Cannot specify disks to be replaced",
8388 if self.mode == constants.REPLACE_DISK_AUTO:
8389 faulty_primary = self._FindFaultyDisks(instance.primary_node)
8390 faulty_secondary = self._FindFaultyDisks(secondary_node)
8392 if faulty_primary and faulty_secondary:
8393 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8394 " one node and can not be repaired"
8395 " automatically" % self.instance_name,
8399 self.disks = faulty_primary
8400 self.target_node = instance.primary_node
8401 self.other_node = secondary_node
8402 check_nodes = [self.target_node, self.other_node]
8403 elif faulty_secondary:
8404 self.disks = faulty_secondary
8405 self.target_node = secondary_node
8406 self.other_node = instance.primary_node
8407 check_nodes = [self.target_node, self.other_node]
8413 # Non-automatic modes
8414 if self.mode == constants.REPLACE_DISK_PRI:
8415 self.target_node = instance.primary_node
8416 self.other_node = secondary_node
8417 check_nodes = [self.target_node, self.other_node]
8419 elif self.mode == constants.REPLACE_DISK_SEC:
8420 self.target_node = secondary_node
8421 self.other_node = instance.primary_node
8422 check_nodes = [self.target_node, self.other_node]
8424 elif self.mode == constants.REPLACE_DISK_CHG:
8425 self.new_node = remote_node
8426 self.other_node = instance.primary_node
8427 self.target_node = secondary_node
8428 check_nodes = [self.new_node, self.other_node]
8430 _CheckNodeNotDrained(self.lu, remote_node)
8431 _CheckNodeVmCapable(self.lu, remote_node)
8433 old_node_info = self.cfg.GetNodeInfo(secondary_node)
8434 assert old_node_info is not None
8435 if old_node_info.offline and not self.early_release:
8436 # doesn't make sense to delay the release
8437 self.early_release = True
8438 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8439 " early-release mode", secondary_node)
8442 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8445 # If not specified all disks should be replaced
8447 self.disks = range(len(self.instance.disks))
8449 for node in check_nodes:
8450 _CheckNodeOnline(self.lu, node)
8452 # Check whether disks are valid
8453 for disk_idx in self.disks:
8454 instance.FindDisk(disk_idx)
8456 # Get secondary node IP addresses
8459 for node_name in [self.target_node, self.other_node, self.new_node]:
8460 if node_name is not None:
8461 node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8463 self.node_secondary_ip = node_2nd_ip
8465 def Exec(self, feedback_fn):
8466 """Execute disk replacement.
8468 This dispatches the disk replacement to the appropriate handler.
8471 if self.delay_iallocator:
8472 self._CheckPrereq2()
8475 feedback_fn("No disks need replacement")
8478 feedback_fn("Replacing disk(s) %s for %s" %
8479 (utils.CommaJoin(self.disks), self.instance.name))
8481 activate_disks = (not self.instance.admin_up)
8483 # Activate the instance disks if we're replacing them on a down instance
8485 _StartInstanceDisks(self.lu, self.instance, True)
8488 # Should we replace the secondary node?
8489 if self.new_node is not None:
8490 fn = self._ExecDrbd8Secondary
8492 fn = self._ExecDrbd8DiskOnly
8494 return fn(feedback_fn)
8497 # Deactivate the instance disks if we're replacing them on a
8500 _SafeShutdownInstanceDisks(self.lu, self.instance)
8502 def _CheckVolumeGroup(self, nodes):
8503 self.lu.LogInfo("Checking volume groups")
8505 vgname = self.cfg.GetVGName()
8507 # Make sure volume group exists on all involved nodes
8508 results = self.rpc.call_vg_list(nodes)
8510 raise errors.OpExecError("Can't list volume groups on the nodes")
8514 res.Raise("Error checking node %s" % node)
8515 if vgname not in res.payload:
8516 raise errors.OpExecError("Volume group '%s' not found on node %s" %
8519 def _CheckDisksExistence(self, nodes):
8520 # Check disk existence
8521 for idx, dev in enumerate(self.instance.disks):
8522 if idx not in self.disks:
8526 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8527 self.cfg.SetDiskID(dev, node)
8529 result = self.rpc.call_blockdev_find(node, dev)
8531 msg = result.fail_msg
8532 if msg or not result.payload:
8534 msg = "disk not found"
8535 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8538 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8539 for idx, dev in enumerate(self.instance.disks):
8540 if idx not in self.disks:
8543 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8546 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8548 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8549 " replace disks for instance %s" %
8550 (node_name, self.instance.name))
8552 def _CreateNewStorage(self, node_name):
8553 vgname = self.cfg.GetVGName()
8556 for idx, dev in enumerate(self.instance.disks):
8557 if idx not in self.disks:
8560 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8562 self.cfg.SetDiskID(dev, node_name)
8564 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8565 names = _GenerateUniqueNames(self.lu, lv_names)
8567 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8568 logical_id=(vgname, names[0]))
8569 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8570 logical_id=(vgname, names[1]))
8572 new_lvs = [lv_data, lv_meta]
8573 old_lvs = dev.children
8574 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8576 # we pass force_create=True to force the LVM creation
8577 for new_lv in new_lvs:
8578 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8579 _GetInstanceInfoText(self.instance), False)
8583 def _CheckDevices(self, node_name, iv_names):
8584 for name, (dev, _, _) in iv_names.iteritems():
8585 self.cfg.SetDiskID(dev, node_name)
8587 result = self.rpc.call_blockdev_find(node_name, dev)
8589 msg = result.fail_msg
8590 if msg or not result.payload:
8592 msg = "disk not found"
8593 raise errors.OpExecError("Can't find DRBD device %s: %s" %
8596 if result.payload.is_degraded:
8597 raise errors.OpExecError("DRBD device %s is degraded!" % name)
8599 def _RemoveOldStorage(self, node_name, iv_names):
8600 for name, (_, old_lvs, _) in iv_names.iteritems():
8601 self.lu.LogInfo("Remove logical volumes for %s" % name)
8604 self.cfg.SetDiskID(lv, node_name)
8606 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8608 self.lu.LogWarning("Can't remove old LV: %s" % msg,
8609 hint="remove unused LVs manually")
8611 def _ReleaseNodeLock(self, node_name):
8612 """Releases the lock for a given node."""
8613 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8615 def _ExecDrbd8DiskOnly(self, feedback_fn):
8616 """Replace a disk on the primary or secondary for DRBD 8.
8618 The algorithm for replace is quite complicated:
8620 1. for each disk to be replaced:
8622 1. create new LVs on the target node with unique names
8623 1. detach old LVs from the drbd device
8624 1. rename old LVs to name_replaced.<time_t>
8625 1. rename new LVs to old LVs
8626 1. attach the new LVs (with the old names now) to the drbd device
8628 1. wait for sync across all devices
8630 1. for each modified disk:
8632 1. remove old LVs (which have the name name_replaces.<time_t>)
8634 Failures are not very well handled.
8639 # Step: check device activation
8640 self.lu.LogStep(1, steps_total, "Check device existence")
8641 self._CheckDisksExistence([self.other_node, self.target_node])
8642 self._CheckVolumeGroup([self.target_node, self.other_node])
8644 # Step: check other node consistency
8645 self.lu.LogStep(2, steps_total, "Check peer consistency")
8646 self._CheckDisksConsistency(self.other_node,
8647 self.other_node == self.instance.primary_node,
8650 # Step: create new storage
8651 self.lu.LogStep(3, steps_total, "Allocate new storage")
8652 iv_names = self._CreateNewStorage(self.target_node)
8654 # Step: for each lv, detach+rename*2+attach
8655 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8656 for dev, old_lvs, new_lvs in iv_names.itervalues():
8657 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8659 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8661 result.Raise("Can't detach drbd from local storage on node"
8662 " %s for device %s" % (self.target_node, dev.iv_name))
8664 #cfg.Update(instance)
8666 # ok, we created the new LVs, so now we know we have the needed
8667 # storage; as such, we proceed on the target node to rename
8668 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8669 # using the assumption that logical_id == physical_id (which in
8670 # turn is the unique_id on that node)
8672 # FIXME(iustin): use a better name for the replaced LVs
8673 temp_suffix = int(time.time())
8674 ren_fn = lambda d, suff: (d.physical_id[0],
8675 d.physical_id[1] + "_replaced-%s" % suff)
8677 # Build the rename list based on what LVs exist on the node
8678 rename_old_to_new = []
8679 for to_ren in old_lvs:
8680 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8681 if not result.fail_msg and result.payload:
8683 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8685 self.lu.LogInfo("Renaming the old LVs on the target node")
8686 result = self.rpc.call_blockdev_rename(self.target_node,
8688 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8690 # Now we rename the new LVs to the old LVs
8691 self.lu.LogInfo("Renaming the new LVs on the target node")
8692 rename_new_to_old = [(new, old.physical_id)
8693 for old, new in zip(old_lvs, new_lvs)]
8694 result = self.rpc.call_blockdev_rename(self.target_node,
8696 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8698 for old, new in zip(old_lvs, new_lvs):
8699 new.logical_id = old.logical_id
8700 self.cfg.SetDiskID(new, self.target_node)
8702 for disk in old_lvs:
8703 disk.logical_id = ren_fn(disk, temp_suffix)
8704 self.cfg.SetDiskID(disk, self.target_node)
8706 # Now that the new lvs have the old name, we can add them to the device
8707 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8708 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8710 msg = result.fail_msg
8712 for new_lv in new_lvs:
8713 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8716 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8717 hint=("cleanup manually the unused logical"
8719 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8721 dev.children = new_lvs
8723 self.cfg.Update(self.instance, feedback_fn)
8726 if self.early_release:
8727 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8729 self._RemoveOldStorage(self.target_node, iv_names)
8730 # WARNING: we release both node locks here, do not do other RPCs
8731 # than WaitForSync to the primary node
8732 self._ReleaseNodeLock([self.target_node, self.other_node])
8735 # This can fail as the old devices are degraded and _WaitForSync
8736 # does a combined result over all disks, so we don't check its return value
8737 self.lu.LogStep(cstep, steps_total, "Sync devices")
8739 _WaitForSync(self.lu, self.instance)
8741 # Check all devices manually
8742 self._CheckDevices(self.instance.primary_node, iv_names)
8744 # Step: remove old storage
8745 if not self.early_release:
8746 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8748 self._RemoveOldStorage(self.target_node, iv_names)
8750 def _ExecDrbd8Secondary(self, feedback_fn):
8751 """Replace the secondary node for DRBD 8.
8753 The algorithm for replace is quite complicated:
8754 - for all disks of the instance:
8755 - create new LVs on the new node with same names
8756 - shutdown the drbd device on the old secondary
8757 - disconnect the drbd network on the primary
8758 - create the drbd device on the new secondary
8759 - network attach the drbd on the primary, using an artifice:
8760 the drbd code for Attach() will connect to the network if it
8761 finds a device which is connected to the good local disks but
8763 - wait for sync across all devices
8764 - remove all disks from the old secondary
8766 Failures are not very well handled.
8771 # Step: check device activation
8772 self.lu.LogStep(1, steps_total, "Check device existence")
8773 self._CheckDisksExistence([self.instance.primary_node])
8774 self._CheckVolumeGroup([self.instance.primary_node])
8776 # Step: check other node consistency
8777 self.lu.LogStep(2, steps_total, "Check peer consistency")
8778 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8780 # Step: create new storage
8781 self.lu.LogStep(3, steps_total, "Allocate new storage")
8782 for idx, dev in enumerate(self.instance.disks):
8783 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8784 (self.new_node, idx))
8785 # we pass force_create=True to force LVM creation
8786 for new_lv in dev.children:
8787 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8788 _GetInstanceInfoText(self.instance), False)
8790 # Step 4: dbrd minors and drbd setups changes
8791 # after this, we must manually remove the drbd minors on both the
8792 # error and the success paths
8793 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8794 minors = self.cfg.AllocateDRBDMinor([self.new_node
8795 for dev in self.instance.disks],
8797 logging.debug("Allocated minors %r", minors)
8800 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8801 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8802 (self.new_node, idx))
8803 # create new devices on new_node; note that we create two IDs:
8804 # one without port, so the drbd will be activated without
8805 # networking information on the new node at this stage, and one
8806 # with network, for the latter activation in step 4
8807 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8808 if self.instance.primary_node == o_node1:
8811 assert self.instance.primary_node == o_node2, "Three-node instance?"
8814 new_alone_id = (self.instance.primary_node, self.new_node, None,
8815 p_minor, new_minor, o_secret)
8816 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8817 p_minor, new_minor, o_secret)
8819 iv_names[idx] = (dev, dev.children, new_net_id)
8820 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8822 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8823 logical_id=new_alone_id,
8824 children=dev.children,
8827 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8828 _GetInstanceInfoText(self.instance), False)
8829 except errors.GenericError:
8830 self.cfg.ReleaseDRBDMinors(self.instance.name)
8833 # We have new devices, shutdown the drbd on the old secondary
8834 for idx, dev in enumerate(self.instance.disks):
8835 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8836 self.cfg.SetDiskID(dev, self.target_node)
8837 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8839 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8840 "node: %s" % (idx, msg),
8841 hint=("Please cleanup this device manually as"
8842 " soon as possible"))
8844 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8845 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8846 self.node_secondary_ip,
8847 self.instance.disks)\
8848 [self.instance.primary_node]
8850 msg = result.fail_msg
8852 # detaches didn't succeed (unlikely)
8853 self.cfg.ReleaseDRBDMinors(self.instance.name)
8854 raise errors.OpExecError("Can't detach the disks from the network on"
8855 " old node: %s" % (msg,))
8857 # if we managed to detach at least one, we update all the disks of
8858 # the instance to point to the new secondary
8859 self.lu.LogInfo("Updating instance configuration")
8860 for dev, _, new_logical_id in iv_names.itervalues():
8861 dev.logical_id = new_logical_id
8862 self.cfg.SetDiskID(dev, self.instance.primary_node)
8864 self.cfg.Update(self.instance, feedback_fn)
8866 # and now perform the drbd attach
8867 self.lu.LogInfo("Attaching primary drbds to new secondary"
8868 " (standalone => connected)")
8869 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8871 self.node_secondary_ip,
8872 self.instance.disks,
8875 for to_node, to_result in result.items():
8876 msg = to_result.fail_msg
8878 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8880 hint=("please do a gnt-instance info to see the"
8881 " status of disks"))
8883 if self.early_release:
8884 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8886 self._RemoveOldStorage(self.target_node, iv_names)
8887 # WARNING: we release all node locks here, do not do other RPCs
8888 # than WaitForSync to the primary node
8889 self._ReleaseNodeLock([self.instance.primary_node,
8894 # This can fail as the old devices are degraded and _WaitForSync
8895 # does a combined result over all disks, so we don't check its return value
8896 self.lu.LogStep(cstep, steps_total, "Sync devices")
8898 _WaitForSync(self.lu, self.instance)
8900 # Check all devices manually
8901 self._CheckDevices(self.instance.primary_node, iv_names)
8903 # Step: remove old storage
8904 if not self.early_release:
8905 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8906 self._RemoveOldStorage(self.target_node, iv_names)
8909 class LURepairNodeStorage(NoHooksLU):
8910 """Repairs the volume group on a node.
8915 def CheckArguments(self):
8916 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8918 storage_type = self.op.storage_type
8920 if (constants.SO_FIX_CONSISTENCY not in
8921 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8922 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8923 " repaired" % storage_type,
8926 def ExpandNames(self):
8927 self.needed_locks = {
8928 locking.LEVEL_NODE: [self.op.node_name],
8931 def _CheckFaultyDisks(self, instance, node_name):
8932 """Ensure faulty disks abort the opcode or at least warn."""
8934 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8936 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8937 " node '%s'" % (instance.name, node_name),
8939 except errors.OpPrereqError, err:
8940 if self.op.ignore_consistency:
8941 self.proc.LogWarning(str(err.args[0]))
8945 def CheckPrereq(self):
8946 """Check prerequisites.
8949 # Check whether any instance on this node has faulty disks
8950 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8951 if not inst.admin_up:
8953 check_nodes = set(inst.all_nodes)
8954 check_nodes.discard(self.op.node_name)
8955 for inst_node_name in check_nodes:
8956 self._CheckFaultyDisks(inst, inst_node_name)
8958 def Exec(self, feedback_fn):
8959 feedback_fn("Repairing storage unit '%s' on %s ..." %
8960 (self.op.name, self.op.node_name))
8962 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8963 result = self.rpc.call_storage_execute(self.op.node_name,
8964 self.op.storage_type, st_args,
8966 constants.SO_FIX_CONSISTENCY)
8967 result.Raise("Failed to repair storage unit '%s' on %s" %
8968 (self.op.name, self.op.node_name))
8971 class LUNodeEvacStrategy(NoHooksLU):
8972 """Computes the node evacuation strategy.
8977 def CheckArguments(self):
8978 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8980 def ExpandNames(self):
8981 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8982 self.needed_locks = locks = {}
8983 if self.op.remote_node is None:
8984 locks[locking.LEVEL_NODE] = locking.ALL_SET
8986 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8987 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8989 def Exec(self, feedback_fn):
8990 if self.op.remote_node is not None:
8992 for node in self.op.nodes:
8993 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8996 if i.primary_node == self.op.remote_node:
8997 raise errors.OpPrereqError("Node %s is the primary node of"
8998 " instance %s, cannot use it as"
9000 (self.op.remote_node, i.name),
9002 result.append([i.name, self.op.remote_node])
9004 ial = IAllocator(self.cfg, self.rpc,
9005 mode=constants.IALLOCATOR_MODE_MEVAC,
9006 evac_nodes=self.op.nodes)
9007 ial.Run(self.op.iallocator, validate=True)
9009 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
9015 class LUInstanceGrowDisk(LogicalUnit):
9016 """Grow a disk of an instance.
9020 HTYPE = constants.HTYPE_INSTANCE
9023 def ExpandNames(self):
9024 self._ExpandAndLockInstance()
9025 self.needed_locks[locking.LEVEL_NODE] = []
9026 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9028 def DeclareLocks(self, level):
9029 if level == locking.LEVEL_NODE:
9030 self._LockInstancesNodes()
9032 def BuildHooksEnv(self):
9035 This runs on the master, the primary and all the secondaries.
9039 "DISK": self.op.disk,
9040 "AMOUNT": self.op.amount,
9042 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9043 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9046 def CheckPrereq(self):
9047 """Check prerequisites.
9049 This checks that the instance is in the cluster.
9052 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9053 assert instance is not None, \
9054 "Cannot retrieve locked instance %s" % self.op.instance_name
9055 nodenames = list(instance.all_nodes)
9056 for node in nodenames:
9057 _CheckNodeOnline(self, node)
9059 self.instance = instance
9061 if instance.disk_template not in constants.DTS_GROWABLE:
9062 raise errors.OpPrereqError("Instance's disk layout does not support"
9063 " growing.", errors.ECODE_INVAL)
9065 self.disk = instance.FindDisk(self.op.disk)
9067 if instance.disk_template not in (constants.DT_FILE,
9068 constants.DT_SHARED_FILE):
9069 # TODO: check the free disk space for file, when that feature will be
9071 _CheckNodesFreeDiskPerVG(self, nodenames,
9072 self.disk.ComputeGrowth(self.op.amount))
9074 def Exec(self, feedback_fn):
9075 """Execute disk grow.
9078 instance = self.instance
9081 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
9083 raise errors.OpExecError("Cannot activate block device to grow")
9085 for node in instance.all_nodes:
9086 self.cfg.SetDiskID(disk, node)
9087 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
9088 result.Raise("Grow request failed to node %s" % node)
9090 # TODO: Rewrite code to work properly
9091 # DRBD goes into sync mode for a short amount of time after executing the
9092 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
9093 # calling "resize" in sync mode fails. Sleeping for a short amount of
9094 # time is a work-around.
9097 disk.RecordGrow(self.op.amount)
9098 self.cfg.Update(instance, feedback_fn)
9099 if self.op.wait_for_sync:
9100 disk_abort = not _WaitForSync(self, instance, disks=[disk])
9102 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
9103 " status.\nPlease check the instance.")
9104 if not instance.admin_up:
9105 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
9106 elif not instance.admin_up:
9107 self.proc.LogWarning("Not shutting down the disk even if the instance is"
9108 " not supposed to be running because no wait for"
9109 " sync mode was requested.")
9112 class LUInstanceQueryData(NoHooksLU):
9113 """Query runtime instance data.
9118 def ExpandNames(self):
9119 self.needed_locks = {}
9120 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9122 if self.op.instances:
9123 self.wanted_names = []
9124 for name in self.op.instances:
9125 full_name = _ExpandInstanceName(self.cfg, name)
9126 self.wanted_names.append(full_name)
9127 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9129 self.wanted_names = None
9130 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9132 self.needed_locks[locking.LEVEL_NODE] = []
9133 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9135 def DeclareLocks(self, level):
9136 if level == locking.LEVEL_NODE:
9137 self._LockInstancesNodes()
9139 def CheckPrereq(self):
9140 """Check prerequisites.
9142 This only checks the optional instance list against the existing names.
9145 if self.wanted_names is None:
9146 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
9148 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
9149 in self.wanted_names]
9151 def _ComputeBlockdevStatus(self, node, instance_name, dev):
9152 """Returns the status of a block device
9155 if self.op.static or not node:
9158 self.cfg.SetDiskID(dev, node)
9160 result = self.rpc.call_blockdev_find(node, dev)
9164 result.Raise("Can't compute disk status for %s" % instance_name)
9166 status = result.payload
9170 return (status.dev_path, status.major, status.minor,
9171 status.sync_percent, status.estimated_time,
9172 status.is_degraded, status.ldisk_status)
9174 def _ComputeDiskStatus(self, instance, snode, dev):
9175 """Compute block device status.
9178 if dev.dev_type in constants.LDS_DRBD:
9179 # we change the snode then (otherwise we use the one passed in)
9180 if dev.logical_id[0] == instance.primary_node:
9181 snode = dev.logical_id[1]
9183 snode = dev.logical_id[0]
9185 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9187 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9190 dev_children = [self._ComputeDiskStatus(instance, snode, child)
9191 for child in dev.children]
9196 "iv_name": dev.iv_name,
9197 "dev_type": dev.dev_type,
9198 "logical_id": dev.logical_id,
9199 "physical_id": dev.physical_id,
9200 "pstatus": dev_pstatus,
9201 "sstatus": dev_sstatus,
9202 "children": dev_children,
9209 def Exec(self, feedback_fn):
9210 """Gather and return data"""
9213 cluster = self.cfg.GetClusterInfo()
9215 for instance in self.wanted_instances:
9216 if not self.op.static:
9217 remote_info = self.rpc.call_instance_info(instance.primary_node,
9219 instance.hypervisor)
9220 remote_info.Raise("Error checking node %s" % instance.primary_node)
9221 remote_info = remote_info.payload
9222 if remote_info and "state" in remote_info:
9225 remote_state = "down"
9228 if instance.admin_up:
9231 config_state = "down"
9233 disks = [self._ComputeDiskStatus(instance, None, device)
9234 for device in instance.disks]
9237 "name": instance.name,
9238 "config_state": config_state,
9239 "run_state": remote_state,
9240 "pnode": instance.primary_node,
9241 "snodes": instance.secondary_nodes,
9243 # this happens to be the same format used for hooks
9244 "nics": _NICListToTuple(self, instance.nics),
9245 "disk_template": instance.disk_template,
9247 "hypervisor": instance.hypervisor,
9248 "network_port": instance.network_port,
9249 "hv_instance": instance.hvparams,
9250 "hv_actual": cluster.FillHV(instance, skip_globals=True),
9251 "be_instance": instance.beparams,
9252 "be_actual": cluster.FillBE(instance),
9253 "os_instance": instance.osparams,
9254 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9255 "serial_no": instance.serial_no,
9256 "mtime": instance.mtime,
9257 "ctime": instance.ctime,
9258 "uuid": instance.uuid,
9261 result[instance.name] = idict
9266 class LUInstanceSetParams(LogicalUnit):
9267 """Modifies an instances's parameters.
9270 HPATH = "instance-modify"
9271 HTYPE = constants.HTYPE_INSTANCE
9274 def CheckArguments(self):
9275 if not (self.op.nics or self.op.disks or self.op.disk_template or
9276 self.op.hvparams or self.op.beparams or self.op.os_name):
9277 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9279 if self.op.hvparams:
9280 _CheckGlobalHvParams(self.op.hvparams)
9284 for disk_op, disk_dict in self.op.disks:
9285 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9286 if disk_op == constants.DDM_REMOVE:
9289 elif disk_op == constants.DDM_ADD:
9292 if not isinstance(disk_op, int):
9293 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9294 if not isinstance(disk_dict, dict):
9295 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9296 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9298 if disk_op == constants.DDM_ADD:
9299 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9300 if mode not in constants.DISK_ACCESS_SET:
9301 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9303 size = disk_dict.get('size', None)
9305 raise errors.OpPrereqError("Required disk parameter size missing",
9309 except (TypeError, ValueError), err:
9310 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9311 str(err), errors.ECODE_INVAL)
9312 disk_dict['size'] = size
9314 # modification of disk
9315 if 'size' in disk_dict:
9316 raise errors.OpPrereqError("Disk size change not possible, use"
9317 " grow-disk", errors.ECODE_INVAL)
9319 if disk_addremove > 1:
9320 raise errors.OpPrereqError("Only one disk add or remove operation"
9321 " supported at a time", errors.ECODE_INVAL)
9323 if self.op.disks and self.op.disk_template is not None:
9324 raise errors.OpPrereqError("Disk template conversion and other disk"
9325 " changes not supported at the same time",
9328 if (self.op.disk_template and
9329 self.op.disk_template in constants.DTS_INT_MIRROR and
9330 self.op.remote_node is None):
9331 raise errors.OpPrereqError("Changing the disk template to a mirrored"
9332 " one requires specifying a secondary node",
9337 for nic_op, nic_dict in self.op.nics:
9338 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9339 if nic_op == constants.DDM_REMOVE:
9342 elif nic_op == constants.DDM_ADD:
9345 if not isinstance(nic_op, int):
9346 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9347 if not isinstance(nic_dict, dict):
9348 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9349 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9351 # nic_dict should be a dict
9352 nic_ip = nic_dict.get('ip', None)
9353 if nic_ip is not None:
9354 if nic_ip.lower() == constants.VALUE_NONE:
9355 nic_dict['ip'] = None
9357 if not netutils.IPAddress.IsValid(nic_ip):
9358 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9361 nic_bridge = nic_dict.get('bridge', None)
9362 nic_link = nic_dict.get('link', None)
9363 if nic_bridge and nic_link:
9364 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9365 " at the same time", errors.ECODE_INVAL)
9366 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9367 nic_dict['bridge'] = None
9368 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9369 nic_dict['link'] = None
9371 if nic_op == constants.DDM_ADD:
9372 nic_mac = nic_dict.get('mac', None)
9374 nic_dict['mac'] = constants.VALUE_AUTO
9376 if 'mac' in nic_dict:
9377 nic_mac = nic_dict['mac']
9378 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9379 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9381 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9382 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9383 " modifying an existing nic",
9386 if nic_addremove > 1:
9387 raise errors.OpPrereqError("Only one NIC add or remove operation"
9388 " supported at a time", errors.ECODE_INVAL)
9390 def ExpandNames(self):
9391 self._ExpandAndLockInstance()
9392 self.needed_locks[locking.LEVEL_NODE] = []
9393 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9395 def DeclareLocks(self, level):
9396 if level == locking.LEVEL_NODE:
9397 self._LockInstancesNodes()
9398 if self.op.disk_template and self.op.remote_node:
9399 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9400 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9402 def BuildHooksEnv(self):
9405 This runs on the master, primary and secondaries.
9409 if constants.BE_MEMORY in self.be_new:
9410 args['memory'] = self.be_new[constants.BE_MEMORY]
9411 if constants.BE_VCPUS in self.be_new:
9412 args['vcpus'] = self.be_new[constants.BE_VCPUS]
9413 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9414 # information at all.
9417 nic_override = dict(self.op.nics)
9418 for idx, nic in enumerate(self.instance.nics):
9419 if idx in nic_override:
9420 this_nic_override = nic_override[idx]
9422 this_nic_override = {}
9423 if 'ip' in this_nic_override:
9424 ip = this_nic_override['ip']
9427 if 'mac' in this_nic_override:
9428 mac = this_nic_override['mac']
9431 if idx in self.nic_pnew:
9432 nicparams = self.nic_pnew[idx]
9434 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9435 mode = nicparams[constants.NIC_MODE]
9436 link = nicparams[constants.NIC_LINK]
9437 args['nics'].append((ip, mac, mode, link))
9438 if constants.DDM_ADD in nic_override:
9439 ip = nic_override[constants.DDM_ADD].get('ip', None)
9440 mac = nic_override[constants.DDM_ADD]['mac']
9441 nicparams = self.nic_pnew[constants.DDM_ADD]
9442 mode = nicparams[constants.NIC_MODE]
9443 link = nicparams[constants.NIC_LINK]
9444 args['nics'].append((ip, mac, mode, link))
9445 elif constants.DDM_REMOVE in nic_override:
9446 del args['nics'][-1]
9448 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9449 if self.op.disk_template:
9450 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9451 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9454 def CheckPrereq(self):
9455 """Check prerequisites.
9457 This only checks the instance list against the existing names.
9460 # checking the new params on the primary/secondary nodes
9462 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9463 cluster = self.cluster = self.cfg.GetClusterInfo()
9464 assert self.instance is not None, \
9465 "Cannot retrieve locked instance %s" % self.op.instance_name
9466 pnode = instance.primary_node
9467 nodelist = list(instance.all_nodes)
9470 if self.op.os_name and not self.op.force:
9471 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9472 self.op.force_variant)
9473 instance_os = self.op.os_name
9475 instance_os = instance.os
9477 if self.op.disk_template:
9478 if instance.disk_template == self.op.disk_template:
9479 raise errors.OpPrereqError("Instance already has disk template %s" %
9480 instance.disk_template, errors.ECODE_INVAL)
9482 if (instance.disk_template,
9483 self.op.disk_template) not in self._DISK_CONVERSIONS:
9484 raise errors.OpPrereqError("Unsupported disk template conversion from"
9485 " %s to %s" % (instance.disk_template,
9486 self.op.disk_template),
9488 _CheckInstanceDown(self, instance, "cannot change disk template")
9489 if self.op.disk_template in constants.DTS_INT_MIRROR:
9490 if self.op.remote_node == pnode:
9491 raise errors.OpPrereqError("Given new secondary node %s is the same"
9492 " as the primary node of the instance" %
9493 self.op.remote_node, errors.ECODE_STATE)
9494 _CheckNodeOnline(self, self.op.remote_node)
9495 _CheckNodeNotDrained(self, self.op.remote_node)
9496 # FIXME: here we assume that the old instance type is DT_PLAIN
9497 assert instance.disk_template == constants.DT_PLAIN
9498 disks = [{"size": d.size, "vg": d.logical_id[0]}
9499 for d in instance.disks]
9500 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9501 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9503 # hvparams processing
9504 if self.op.hvparams:
9505 hv_type = instance.hypervisor
9506 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9507 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9508 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9511 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9512 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9513 self.hv_new = hv_new # the new actual values
9514 self.hv_inst = i_hvdict # the new dict (without defaults)
9516 self.hv_new = self.hv_inst = {}
9518 # beparams processing
9519 if self.op.beparams:
9520 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9522 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9523 be_new = cluster.SimpleFillBE(i_bedict)
9524 self.be_new = be_new # the new actual values
9525 self.be_inst = i_bedict # the new dict (without defaults)
9527 self.be_new = self.be_inst = {}
9529 # osparams processing
9530 if self.op.osparams:
9531 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9532 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9533 self.os_inst = i_osdict # the new dict (without defaults)
9539 if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9540 mem_check_list = [pnode]
9541 if be_new[constants.BE_AUTO_BALANCE]:
9542 # either we changed auto_balance to yes or it was from before
9543 mem_check_list.extend(instance.secondary_nodes)
9544 instance_info = self.rpc.call_instance_info(pnode, instance.name,
9545 instance.hypervisor)
9546 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9547 instance.hypervisor)
9548 pninfo = nodeinfo[pnode]
9549 msg = pninfo.fail_msg
9551 # Assume the primary node is unreachable and go ahead
9552 self.warn.append("Can't get info from primary node %s: %s" %
9554 elif not isinstance(pninfo.payload.get('memory_free', None), int):
9555 self.warn.append("Node data from primary node %s doesn't contain"
9556 " free memory information" % pnode)
9557 elif instance_info.fail_msg:
9558 self.warn.append("Can't get instance runtime information: %s" %
9559 instance_info.fail_msg)
9561 if instance_info.payload:
9562 current_mem = int(instance_info.payload['memory'])
9564 # Assume instance not running
9565 # (there is a slight race condition here, but it's not very probable,
9566 # and we have no other way to check)
9568 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9569 pninfo.payload['memory_free'])
9571 raise errors.OpPrereqError("This change will prevent the instance"
9572 " from starting, due to %d MB of memory"
9573 " missing on its primary node" % miss_mem,
9576 if be_new[constants.BE_AUTO_BALANCE]:
9577 for node, nres in nodeinfo.items():
9578 if node not in instance.secondary_nodes:
9582 self.warn.append("Can't get info from secondary node %s: %s" %
9584 elif not isinstance(nres.payload.get('memory_free', None), int):
9585 self.warn.append("Secondary node %s didn't return free"
9586 " memory information" % node)
9587 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9588 self.warn.append("Not enough memory to failover instance to"
9589 " secondary node %s" % node)
9594 for nic_op, nic_dict in self.op.nics:
9595 if nic_op == constants.DDM_REMOVE:
9596 if not instance.nics:
9597 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9600 if nic_op != constants.DDM_ADD:
9602 if not instance.nics:
9603 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9604 " no NICs" % nic_op,
9606 if nic_op < 0 or nic_op >= len(instance.nics):
9607 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9609 (nic_op, len(instance.nics) - 1),
9611 old_nic_params = instance.nics[nic_op].nicparams
9612 old_nic_ip = instance.nics[nic_op].ip
9617 update_params_dict = dict([(key, nic_dict[key])
9618 for key in constants.NICS_PARAMETERS
9619 if key in nic_dict])
9621 if 'bridge' in nic_dict:
9622 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9624 new_nic_params = _GetUpdatedParams(old_nic_params,
9626 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9627 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9628 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9629 self.nic_pinst[nic_op] = new_nic_params
9630 self.nic_pnew[nic_op] = new_filled_nic_params
9631 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9633 if new_nic_mode == constants.NIC_MODE_BRIDGED:
9634 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9635 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9637 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9639 self.warn.append(msg)
9641 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9642 if new_nic_mode == constants.NIC_MODE_ROUTED:
9643 if 'ip' in nic_dict:
9644 nic_ip = nic_dict['ip']
9648 raise errors.OpPrereqError('Cannot set the nic ip to None'
9649 ' on a routed nic', errors.ECODE_INVAL)
9650 if 'mac' in nic_dict:
9651 nic_mac = nic_dict['mac']
9653 raise errors.OpPrereqError('Cannot set the nic mac to None',
9655 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9656 # otherwise generate the mac
9657 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9659 # or validate/reserve the current one
9661 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9662 except errors.ReservationError:
9663 raise errors.OpPrereqError("MAC address %s already in use"
9664 " in cluster" % nic_mac,
9665 errors.ECODE_NOTUNIQUE)
9668 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9669 raise errors.OpPrereqError("Disk operations not supported for"
9670 " diskless instances",
9672 for disk_op, _ in self.op.disks:
9673 if disk_op == constants.DDM_REMOVE:
9674 if len(instance.disks) == 1:
9675 raise errors.OpPrereqError("Cannot remove the last disk of"
9676 " an instance", errors.ECODE_INVAL)
9677 _CheckInstanceDown(self, instance, "cannot remove disks")
9679 if (disk_op == constants.DDM_ADD and
9680 len(instance.disks) >= constants.MAX_DISKS):
9681 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9682 " add more" % constants.MAX_DISKS,
9684 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9686 if disk_op < 0 or disk_op >= len(instance.disks):
9687 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9689 (disk_op, len(instance.disks)),
9694 def _ConvertPlainToDrbd(self, feedback_fn):
9695 """Converts an instance from plain to drbd.
9698 feedback_fn("Converting template to drbd")
9699 instance = self.instance
9700 pnode = instance.primary_node
9701 snode = self.op.remote_node
9703 # create a fake disk info for _GenerateDiskTemplate
9704 disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9705 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9706 instance.name, pnode, [snode],
9707 disk_info, None, None, 0, feedback_fn)
9708 info = _GetInstanceInfoText(instance)
9709 feedback_fn("Creating aditional volumes...")
9710 # first, create the missing data and meta devices
9711 for disk in new_disks:
9712 # unfortunately this is... not too nice
9713 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9715 for child in disk.children:
9716 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9717 # at this stage, all new LVs have been created, we can rename the
9719 feedback_fn("Renaming original volumes...")
9720 rename_list = [(o, n.children[0].logical_id)
9721 for (o, n) in zip(instance.disks, new_disks)]
9722 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9723 result.Raise("Failed to rename original LVs")
9725 feedback_fn("Initializing DRBD devices...")
9726 # all child devices are in place, we can now create the DRBD devices
9727 for disk in new_disks:
9728 for node in [pnode, snode]:
9729 f_create = node == pnode
9730 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9732 # at this point, the instance has been modified
9733 instance.disk_template = constants.DT_DRBD8
9734 instance.disks = new_disks
9735 self.cfg.Update(instance, feedback_fn)
9737 # disks are created, waiting for sync
9738 disk_abort = not _WaitForSync(self, instance)
9740 raise errors.OpExecError("There are some degraded disks for"
9741 " this instance, please cleanup manually")
9743 def _ConvertDrbdToPlain(self, feedback_fn):
9744 """Converts an instance from drbd to plain.
9747 instance = self.instance
9748 assert len(instance.secondary_nodes) == 1
9749 pnode = instance.primary_node
9750 snode = instance.secondary_nodes[0]
9751 feedback_fn("Converting template to plain")
9753 old_disks = instance.disks
9754 new_disks = [d.children[0] for d in old_disks]
9756 # copy over size and mode
9757 for parent, child in zip(old_disks, new_disks):
9758 child.size = parent.size
9759 child.mode = parent.mode
9761 # update instance structure
9762 instance.disks = new_disks
9763 instance.disk_template = constants.DT_PLAIN
9764 self.cfg.Update(instance, feedback_fn)
9766 feedback_fn("Removing volumes on the secondary node...")
9767 for disk in old_disks:
9768 self.cfg.SetDiskID(disk, snode)
9769 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9771 self.LogWarning("Could not remove block device %s on node %s,"
9772 " continuing anyway: %s", disk.iv_name, snode, msg)
9774 feedback_fn("Removing unneeded volumes on the primary node...")
9775 for idx, disk in enumerate(old_disks):
9776 meta = disk.children[1]
9777 self.cfg.SetDiskID(meta, pnode)
9778 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9780 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9781 " continuing anyway: %s", idx, pnode, msg)
9783 def Exec(self, feedback_fn):
9784 """Modifies an instance.
9786 All parameters take effect only at the next restart of the instance.
9789 # Process here the warnings from CheckPrereq, as we don't have a
9790 # feedback_fn there.
9791 for warn in self.warn:
9792 feedback_fn("WARNING: %s" % warn)
9795 instance = self.instance
9797 for disk_op, disk_dict in self.op.disks:
9798 if disk_op == constants.DDM_REMOVE:
9799 # remove the last disk
9800 device = instance.disks.pop()
9801 device_idx = len(instance.disks)
9802 for node, disk in device.ComputeNodeTree(instance.primary_node):
9803 self.cfg.SetDiskID(disk, node)
9804 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9806 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9807 " continuing anyway", device_idx, node, msg)
9808 result.append(("disk/%d" % device_idx, "remove"))
9809 elif disk_op == constants.DDM_ADD:
9811 if instance.disk_template in (constants.DT_FILE,
9812 constants.DT_SHARED_FILE):
9813 file_driver, file_path = instance.disks[0].logical_id
9814 file_path = os.path.dirname(file_path)
9816 file_driver = file_path = None
9817 disk_idx_base = len(instance.disks)
9818 new_disk = _GenerateDiskTemplate(self,
9819 instance.disk_template,
9820 instance.name, instance.primary_node,
9821 instance.secondary_nodes,
9825 disk_idx_base, feedback_fn)[0]
9826 instance.disks.append(new_disk)
9827 info = _GetInstanceInfoText(instance)
9829 logging.info("Creating volume %s for instance %s",
9830 new_disk.iv_name, instance.name)
9831 # Note: this needs to be kept in sync with _CreateDisks
9833 for node in instance.all_nodes:
9834 f_create = node == instance.primary_node
9836 _CreateBlockDev(self, node, instance, new_disk,
9837 f_create, info, f_create)
9838 except errors.OpExecError, err:
9839 self.LogWarning("Failed to create volume %s (%s) on"
9841 new_disk.iv_name, new_disk, node, err)
9842 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9843 (new_disk.size, new_disk.mode)))
9845 # change a given disk
9846 instance.disks[disk_op].mode = disk_dict['mode']
9847 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9849 if self.op.disk_template:
9850 r_shut = _ShutdownInstanceDisks(self, instance)
9852 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9853 " proceed with disk template conversion")
9854 mode = (instance.disk_template, self.op.disk_template)
9856 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9858 self.cfg.ReleaseDRBDMinors(instance.name)
9860 result.append(("disk_template", self.op.disk_template))
9863 for nic_op, nic_dict in self.op.nics:
9864 if nic_op == constants.DDM_REMOVE:
9865 # remove the last nic
9866 del instance.nics[-1]
9867 result.append(("nic.%d" % len(instance.nics), "remove"))
9868 elif nic_op == constants.DDM_ADD:
9869 # mac and bridge should be set, by now
9870 mac = nic_dict['mac']
9871 ip = nic_dict.get('ip', None)
9872 nicparams = self.nic_pinst[constants.DDM_ADD]
9873 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9874 instance.nics.append(new_nic)
9875 result.append(("nic.%d" % (len(instance.nics) - 1),
9876 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9877 (new_nic.mac, new_nic.ip,
9878 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9879 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9882 for key in 'mac', 'ip':
9884 setattr(instance.nics[nic_op], key, nic_dict[key])
9885 if nic_op in self.nic_pinst:
9886 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9887 for key, val in nic_dict.iteritems():
9888 result.append(("nic.%s/%d" % (key, nic_op), val))
9891 if self.op.hvparams:
9892 instance.hvparams = self.hv_inst
9893 for key, val in self.op.hvparams.iteritems():
9894 result.append(("hv/%s" % key, val))
9897 if self.op.beparams:
9898 instance.beparams = self.be_inst
9899 for key, val in self.op.beparams.iteritems():
9900 result.append(("be/%s" % key, val))
9904 instance.os = self.op.os_name
9907 if self.op.osparams:
9908 instance.osparams = self.os_inst
9909 for key, val in self.op.osparams.iteritems():
9910 result.append(("os/%s" % key, val))
9912 self.cfg.Update(instance, feedback_fn)
9916 _DISK_CONVERSIONS = {
9917 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9918 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9922 class LUBackupQuery(NoHooksLU):
9923 """Query the exports list
9928 def ExpandNames(self):
9929 self.needed_locks = {}
9930 self.share_locks[locking.LEVEL_NODE] = 1
9931 if not self.op.nodes:
9932 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9934 self.needed_locks[locking.LEVEL_NODE] = \
9935 _GetWantedNodes(self, self.op.nodes)
9937 def Exec(self, feedback_fn):
9938 """Compute the list of all the exported system images.
9941 @return: a dictionary with the structure node->(export-list)
9942 where export-list is a list of the instances exported on
9946 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9947 rpcresult = self.rpc.call_export_list(self.nodes)
9949 for node in rpcresult:
9950 if rpcresult[node].fail_msg:
9951 result[node] = False
9953 result[node] = rpcresult[node].payload
9958 class LUBackupPrepare(NoHooksLU):
9959 """Prepares an instance for an export and returns useful information.
9964 def ExpandNames(self):
9965 self._ExpandAndLockInstance()
9967 def CheckPrereq(self):
9968 """Check prerequisites.
9971 instance_name = self.op.instance_name
9973 self.instance = self.cfg.GetInstanceInfo(instance_name)
9974 assert self.instance is not None, \
9975 "Cannot retrieve locked instance %s" % self.op.instance_name
9976 _CheckNodeOnline(self, self.instance.primary_node)
9978 self._cds = _GetClusterDomainSecret()
9980 def Exec(self, feedback_fn):
9981 """Prepares an instance for an export.
9984 instance = self.instance
9986 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9987 salt = utils.GenerateSecret(8)
9989 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9990 result = self.rpc.call_x509_cert_create(instance.primary_node,
9991 constants.RIE_CERT_VALIDITY)
9992 result.Raise("Can't create X509 key and certificate on %s" % result.node)
9994 (name, cert_pem) = result.payload
9996 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
10000 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
10001 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
10003 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
10009 class LUBackupExport(LogicalUnit):
10010 """Export an instance to an image in the cluster.
10013 HPATH = "instance-export"
10014 HTYPE = constants.HTYPE_INSTANCE
10017 def CheckArguments(self):
10018 """Check the arguments.
10021 self.x509_key_name = self.op.x509_key_name
10022 self.dest_x509_ca_pem = self.op.destination_x509_ca
10024 if self.op.mode == constants.EXPORT_MODE_REMOTE:
10025 if not self.x509_key_name:
10026 raise errors.OpPrereqError("Missing X509 key name for encryption",
10027 errors.ECODE_INVAL)
10029 if not self.dest_x509_ca_pem:
10030 raise errors.OpPrereqError("Missing destination X509 CA",
10031 errors.ECODE_INVAL)
10033 def ExpandNames(self):
10034 self._ExpandAndLockInstance()
10036 # Lock all nodes for local exports
10037 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10038 # FIXME: lock only instance primary and destination node
10040 # Sad but true, for now we have do lock all nodes, as we don't know where
10041 # the previous export might be, and in this LU we search for it and
10042 # remove it from its current node. In the future we could fix this by:
10043 # - making a tasklet to search (share-lock all), then create the
10044 # new one, then one to remove, after
10045 # - removing the removal operation altogether
10046 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10048 def DeclareLocks(self, level):
10049 """Last minute lock declaration."""
10050 # All nodes are locked anyway, so nothing to do here.
10052 def BuildHooksEnv(self):
10053 """Build hooks env.
10055 This will run on the master, primary node and target node.
10059 "EXPORT_MODE": self.op.mode,
10060 "EXPORT_NODE": self.op.target_node,
10061 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
10062 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
10063 # TODO: Generic function for boolean env variables
10064 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
10067 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10069 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
10071 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10072 nl.append(self.op.target_node)
10076 def CheckPrereq(self):
10077 """Check prerequisites.
10079 This checks that the instance and node names are valid.
10082 instance_name = self.op.instance_name
10084 self.instance = self.cfg.GetInstanceInfo(instance_name)
10085 assert self.instance is not None, \
10086 "Cannot retrieve locked instance %s" % self.op.instance_name
10087 _CheckNodeOnline(self, self.instance.primary_node)
10089 if (self.op.remove_instance and self.instance.admin_up and
10090 not self.op.shutdown):
10091 raise errors.OpPrereqError("Can not remove instance without shutting it"
10094 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10095 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
10096 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
10097 assert self.dst_node is not None
10099 _CheckNodeOnline(self, self.dst_node.name)
10100 _CheckNodeNotDrained(self, self.dst_node.name)
10103 self.dest_disk_info = None
10104 self.dest_x509_ca = None
10106 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10107 self.dst_node = None
10109 if len(self.op.target_node) != len(self.instance.disks):
10110 raise errors.OpPrereqError(("Received destination information for %s"
10111 " disks, but instance %s has %s disks") %
10112 (len(self.op.target_node), instance_name,
10113 len(self.instance.disks)),
10114 errors.ECODE_INVAL)
10116 cds = _GetClusterDomainSecret()
10118 # Check X509 key name
10120 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
10121 except (TypeError, ValueError), err:
10122 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
10124 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10125 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10126 errors.ECODE_INVAL)
10128 # Load and verify CA
10130 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10131 except OpenSSL.crypto.Error, err:
10132 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10133 (err, ), errors.ECODE_INVAL)
10135 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10136 if errcode is not None:
10137 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10138 (msg, ), errors.ECODE_INVAL)
10140 self.dest_x509_ca = cert
10142 # Verify target information
10144 for idx, disk_data in enumerate(self.op.target_node):
10146 (host, port, magic) = \
10147 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10148 except errors.GenericError, err:
10149 raise errors.OpPrereqError("Target info for disk %s: %s" %
10150 (idx, err), errors.ECODE_INVAL)
10152 disk_info.append((host, port, magic))
10154 assert len(disk_info) == len(self.op.target_node)
10155 self.dest_disk_info = disk_info
10158 raise errors.ProgrammerError("Unhandled export mode %r" %
10161 # instance disk type verification
10162 # TODO: Implement export support for file-based disks
10163 for disk in self.instance.disks:
10164 if disk.dev_type == constants.LD_FILE:
10165 raise errors.OpPrereqError("Export not supported for instances with"
10166 " file-based disks", errors.ECODE_INVAL)
10168 def _CleanupExports(self, feedback_fn):
10169 """Removes exports of current instance from all other nodes.
10171 If an instance in a cluster with nodes A..D was exported to node C, its
10172 exports will be removed from the nodes A, B and D.
10175 assert self.op.mode != constants.EXPORT_MODE_REMOTE
10177 nodelist = self.cfg.GetNodeList()
10178 nodelist.remove(self.dst_node.name)
10180 # on one-node clusters nodelist will be empty after the removal
10181 # if we proceed the backup would be removed because OpBackupQuery
10182 # substitutes an empty list with the full cluster node list.
10183 iname = self.instance.name
10185 feedback_fn("Removing old exports for instance %s" % iname)
10186 exportlist = self.rpc.call_export_list(nodelist)
10187 for node in exportlist:
10188 if exportlist[node].fail_msg:
10190 if iname in exportlist[node].payload:
10191 msg = self.rpc.call_export_remove(node, iname).fail_msg
10193 self.LogWarning("Could not remove older export for instance %s"
10194 " on node %s: %s", iname, node, msg)
10196 def Exec(self, feedback_fn):
10197 """Export an instance to an image in the cluster.
10200 assert self.op.mode in constants.EXPORT_MODES
10202 instance = self.instance
10203 src_node = instance.primary_node
10205 if self.op.shutdown:
10206 # shutdown the instance, but not the disks
10207 feedback_fn("Shutting down instance %s" % instance.name)
10208 result = self.rpc.call_instance_shutdown(src_node, instance,
10209 self.op.shutdown_timeout)
10210 # TODO: Maybe ignore failures if ignore_remove_failures is set
10211 result.Raise("Could not shutdown instance %s on"
10212 " node %s" % (instance.name, src_node))
10214 # set the disks ID correctly since call_instance_start needs the
10215 # correct drbd minor to create the symlinks
10216 for disk in instance.disks:
10217 self.cfg.SetDiskID(disk, src_node)
10219 activate_disks = (not instance.admin_up)
10222 # Activate the instance disks if we'exporting a stopped instance
10223 feedback_fn("Activating disks for %s" % instance.name)
10224 _StartInstanceDisks(self, instance, None)
10227 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10230 helper.CreateSnapshots()
10232 if (self.op.shutdown and instance.admin_up and
10233 not self.op.remove_instance):
10234 assert not activate_disks
10235 feedback_fn("Starting instance %s" % instance.name)
10236 result = self.rpc.call_instance_start(src_node, instance, None, None)
10237 msg = result.fail_msg
10239 feedback_fn("Failed to start instance: %s" % msg)
10240 _ShutdownInstanceDisks(self, instance)
10241 raise errors.OpExecError("Could not start instance: %s" % msg)
10243 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10244 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10245 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10246 connect_timeout = constants.RIE_CONNECT_TIMEOUT
10247 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10249 (key_name, _, _) = self.x509_key_name
10252 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10255 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10256 key_name, dest_ca_pem,
10261 # Check for backwards compatibility
10262 assert len(dresults) == len(instance.disks)
10263 assert compat.all(isinstance(i, bool) for i in dresults), \
10264 "Not all results are boolean: %r" % dresults
10268 feedback_fn("Deactivating disks for %s" % instance.name)
10269 _ShutdownInstanceDisks(self, instance)
10271 if not (compat.all(dresults) and fin_resu):
10274 failures.append("export finalization")
10275 if not compat.all(dresults):
10276 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10278 failures.append("disk export: disk(s) %s" % fdsk)
10280 raise errors.OpExecError("Export failed, errors in %s" %
10281 utils.CommaJoin(failures))
10283 # At this point, the export was successful, we can cleanup/finish
10285 # Remove instance if requested
10286 if self.op.remove_instance:
10287 feedback_fn("Removing instance %s" % instance.name)
10288 _RemoveInstance(self, feedback_fn, instance,
10289 self.op.ignore_remove_failures)
10291 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10292 self._CleanupExports(feedback_fn)
10294 return fin_resu, dresults
10297 class LUBackupRemove(NoHooksLU):
10298 """Remove exports related to the named instance.
10303 def ExpandNames(self):
10304 self.needed_locks = {}
10305 # We need all nodes to be locked in order for RemoveExport to work, but we
10306 # don't need to lock the instance itself, as nothing will happen to it (and
10307 # we can remove exports also for a removed instance)
10308 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10310 def Exec(self, feedback_fn):
10311 """Remove any export.
10314 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10315 # If the instance was not found we'll try with the name that was passed in.
10316 # This will only work if it was an FQDN, though.
10318 if not instance_name:
10320 instance_name = self.op.instance_name
10322 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10323 exportlist = self.rpc.call_export_list(locked_nodes)
10325 for node in exportlist:
10326 msg = exportlist[node].fail_msg
10328 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10330 if instance_name in exportlist[node].payload:
10332 result = self.rpc.call_export_remove(node, instance_name)
10333 msg = result.fail_msg
10335 logging.error("Could not remove export for instance %s"
10336 " on node %s: %s", instance_name, node, msg)
10338 if fqdn_warn and not found:
10339 feedback_fn("Export not found. If trying to remove an export belonging"
10340 " to a deleted instance please use its Fully Qualified"
10344 class LUGroupAdd(LogicalUnit):
10345 """Logical unit for creating node groups.
10348 HPATH = "group-add"
10349 HTYPE = constants.HTYPE_GROUP
10352 def ExpandNames(self):
10353 # We need the new group's UUID here so that we can create and acquire the
10354 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10355 # that it should not check whether the UUID exists in the configuration.
10356 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10357 self.needed_locks = {}
10358 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10360 def CheckPrereq(self):
10361 """Check prerequisites.
10363 This checks that the given group name is not an existing node group
10368 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10369 except errors.OpPrereqError:
10372 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10373 " node group (UUID: %s)" %
10374 (self.op.group_name, existing_uuid),
10375 errors.ECODE_EXISTS)
10377 if self.op.ndparams:
10378 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10380 def BuildHooksEnv(self):
10381 """Build hooks env.
10385 "GROUP_NAME": self.op.group_name,
10387 mn = self.cfg.GetMasterNode()
10388 return env, [mn], [mn]
10390 def Exec(self, feedback_fn):
10391 """Add the node group to the cluster.
10394 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10395 uuid=self.group_uuid,
10396 alloc_policy=self.op.alloc_policy,
10397 ndparams=self.op.ndparams)
10399 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10400 del self.remove_locks[locking.LEVEL_NODEGROUP]
10403 class LUGroupAssignNodes(NoHooksLU):
10404 """Logical unit for assigning nodes to groups.
10409 def ExpandNames(self):
10410 # These raise errors.OpPrereqError on their own:
10411 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10412 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10414 # We want to lock all the affected nodes and groups. We have readily
10415 # available the list of nodes, and the *destination* group. To gather the
10416 # list of "source" groups, we need to fetch node information.
10417 self.node_data = self.cfg.GetAllNodesInfo()
10418 affected_groups = set(self.node_data[node].group for node in self.op.nodes)
10419 affected_groups.add(self.group_uuid)
10421 self.needed_locks = {
10422 locking.LEVEL_NODEGROUP: list(affected_groups),
10423 locking.LEVEL_NODE: self.op.nodes,
10426 def CheckPrereq(self):
10427 """Check prerequisites.
10430 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10431 instance_data = self.cfg.GetAllInstancesInfo()
10433 if self.group is None:
10434 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10435 (self.op.group_name, self.group_uuid))
10437 (new_splits, previous_splits) = \
10438 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10439 for node in self.op.nodes],
10440 self.node_data, instance_data)
10443 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10445 if not self.op.force:
10446 raise errors.OpExecError("The following instances get split by this"
10447 " change and --force was not given: %s" %
10450 self.LogWarning("This operation will split the following instances: %s",
10453 if previous_splits:
10454 self.LogWarning("In addition, these already-split instances continue"
10455 " to be spit across groups: %s",
10456 utils.CommaJoin(utils.NiceSort(previous_splits)))
10458 def Exec(self, feedback_fn):
10459 """Assign nodes to a new group.
10462 for node in self.op.nodes:
10463 self.node_data[node].group = self.group_uuid
10465 self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10468 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10469 """Check for split instances after a node assignment.
10471 This method considers a series of node assignments as an atomic operation,
10472 and returns information about split instances after applying the set of
10475 In particular, it returns information about newly split instances, and
10476 instances that were already split, and remain so after the change.
10478 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
10481 @type changes: list of (node_name, new_group_uuid) pairs.
10482 @param changes: list of node assignments to consider.
10483 @param node_data: a dict with data for all nodes
10484 @param instance_data: a dict with all instances to consider
10485 @rtype: a two-tuple
10486 @return: a list of instances that were previously okay and result split as a
10487 consequence of this change, and a list of instances that were previously
10488 split and this change does not fix.
10491 changed_nodes = dict((node, group) for node, group in changes
10492 if node_data[node].group != group)
10494 all_split_instances = set()
10495 previously_split_instances = set()
10497 def InstanceNodes(instance):
10498 return [instance.primary_node] + list(instance.secondary_nodes)
10500 for inst in instance_data.values():
10501 if inst.disk_template not in constants.DTS_INT_MIRROR:
10504 instance_nodes = InstanceNodes(inst)
10506 if len(set(node_data[node].group for node in instance_nodes)) > 1:
10507 previously_split_instances.add(inst.name)
10509 if len(set(changed_nodes.get(node, node_data[node].group)
10510 for node in instance_nodes)) > 1:
10511 all_split_instances.add(inst.name)
10513 return (list(all_split_instances - previously_split_instances),
10514 list(previously_split_instances & all_split_instances))
10517 class _GroupQuery(_QueryBase):
10518 FIELDS = query.GROUP_FIELDS
10520 def ExpandNames(self, lu):
10521 lu.needed_locks = {}
10523 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10524 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10527 self.wanted = [name_to_uuid[name]
10528 for name in utils.NiceSort(name_to_uuid.keys())]
10530 # Accept names to be either names or UUIDs.
10533 all_uuid = frozenset(self._all_groups.keys())
10535 for name in self.names:
10536 if name in all_uuid:
10537 self.wanted.append(name)
10538 elif name in name_to_uuid:
10539 self.wanted.append(name_to_uuid[name])
10541 missing.append(name)
10544 raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
10545 errors.ECODE_NOENT)
10547 def DeclareLocks(self, lu, level):
10550 def _GetQueryData(self, lu):
10551 """Computes the list of node groups and their attributes.
10554 do_nodes = query.GQ_NODE in self.requested_data
10555 do_instances = query.GQ_INST in self.requested_data
10557 group_to_nodes = None
10558 group_to_instances = None
10560 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10561 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10562 # latter GetAllInstancesInfo() is not enough, for we have to go through
10563 # instance->node. Hence, we will need to process nodes even if we only need
10564 # instance information.
10565 if do_nodes or do_instances:
10566 all_nodes = lu.cfg.GetAllNodesInfo()
10567 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10570 for node in all_nodes.values():
10571 if node.group in group_to_nodes:
10572 group_to_nodes[node.group].append(node.name)
10573 node_to_group[node.name] = node.group
10576 all_instances = lu.cfg.GetAllInstancesInfo()
10577 group_to_instances = dict((uuid, []) for uuid in self.wanted)
10579 for instance in all_instances.values():
10580 node = instance.primary_node
10581 if node in node_to_group:
10582 group_to_instances[node_to_group[node]].append(instance.name)
10585 # Do not pass on node information if it was not requested.
10586 group_to_nodes = None
10588 return query.GroupQueryData([self._all_groups[uuid]
10589 for uuid in self.wanted],
10590 group_to_nodes, group_to_instances)
10593 class LUGroupQuery(NoHooksLU):
10594 """Logical unit for querying node groups.
10599 def CheckArguments(self):
10600 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
10601 self.op.output_fields, False)
10603 def ExpandNames(self):
10604 self.gq.ExpandNames(self)
10606 def Exec(self, feedback_fn):
10607 return self.gq.OldStyleQuery(self)
10610 class LUGroupSetParams(LogicalUnit):
10611 """Modifies the parameters of a node group.
10614 HPATH = "group-modify"
10615 HTYPE = constants.HTYPE_GROUP
10618 def CheckArguments(self):
10621 self.op.alloc_policy,
10624 if all_changes.count(None) == len(all_changes):
10625 raise errors.OpPrereqError("Please pass at least one modification",
10626 errors.ECODE_INVAL)
10628 def ExpandNames(self):
10629 # This raises errors.OpPrereqError on its own:
10630 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10632 self.needed_locks = {
10633 locking.LEVEL_NODEGROUP: [self.group_uuid],
10636 def CheckPrereq(self):
10637 """Check prerequisites.
10640 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10642 if self.group is None:
10643 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10644 (self.op.group_name, self.group_uuid))
10646 if self.op.ndparams:
10647 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10648 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10649 self.new_ndparams = new_ndparams
10651 def BuildHooksEnv(self):
10652 """Build hooks env.
10656 "GROUP_NAME": self.op.group_name,
10657 "NEW_ALLOC_POLICY": self.op.alloc_policy,
10659 mn = self.cfg.GetMasterNode()
10660 return env, [mn], [mn]
10662 def Exec(self, feedback_fn):
10663 """Modifies the node group.
10668 if self.op.ndparams:
10669 self.group.ndparams = self.new_ndparams
10670 result.append(("ndparams", str(self.group.ndparams)))
10672 if self.op.alloc_policy:
10673 self.group.alloc_policy = self.op.alloc_policy
10675 self.cfg.Update(self.group, feedback_fn)
10680 class LUGroupRemove(LogicalUnit):
10681 HPATH = "group-remove"
10682 HTYPE = constants.HTYPE_GROUP
10685 def ExpandNames(self):
10686 # This will raises errors.OpPrereqError on its own:
10687 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10688 self.needed_locks = {
10689 locking.LEVEL_NODEGROUP: [self.group_uuid],
10692 def CheckPrereq(self):
10693 """Check prerequisites.
10695 This checks that the given group name exists as a node group, that is
10696 empty (i.e., contains no nodes), and that is not the last group of the
10700 # Verify that the group is empty.
10701 group_nodes = [node.name
10702 for node in self.cfg.GetAllNodesInfo().values()
10703 if node.group == self.group_uuid]
10706 raise errors.OpPrereqError("Group '%s' not empty, has the following"
10708 (self.op.group_name,
10709 utils.CommaJoin(utils.NiceSort(group_nodes))),
10710 errors.ECODE_STATE)
10712 # Verify the cluster would not be left group-less.
10713 if len(self.cfg.GetNodeGroupList()) == 1:
10714 raise errors.OpPrereqError("Group '%s' is the only group,"
10715 " cannot be removed" %
10716 self.op.group_name,
10717 errors.ECODE_STATE)
10719 def BuildHooksEnv(self):
10720 """Build hooks env.
10724 "GROUP_NAME": self.op.group_name,
10726 mn = self.cfg.GetMasterNode()
10727 return env, [mn], [mn]
10729 def Exec(self, feedback_fn):
10730 """Remove the node group.
10734 self.cfg.RemoveNodeGroup(self.group_uuid)
10735 except errors.ConfigurationError:
10736 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10737 (self.op.group_name, self.group_uuid))
10739 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10742 class LUGroupRename(LogicalUnit):
10743 HPATH = "group-rename"
10744 HTYPE = constants.HTYPE_GROUP
10747 def ExpandNames(self):
10748 # This raises errors.OpPrereqError on its own:
10749 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10751 self.needed_locks = {
10752 locking.LEVEL_NODEGROUP: [self.group_uuid],
10755 def CheckPrereq(self):
10756 """Check prerequisites.
10758 Ensures requested new name is not yet used.
10762 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10763 except errors.OpPrereqError:
10766 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10767 " node group (UUID: %s)" %
10768 (self.op.new_name, new_name_uuid),
10769 errors.ECODE_EXISTS)
10771 def BuildHooksEnv(self):
10772 """Build hooks env.
10776 "OLD_NAME": self.op.group_name,
10777 "NEW_NAME": self.op.new_name,
10780 mn = self.cfg.GetMasterNode()
10781 all_nodes = self.cfg.GetAllNodesInfo()
10783 all_nodes.pop(mn, None)
10785 for node in all_nodes.values():
10786 if node.group == self.group_uuid:
10787 run_nodes.append(node.name)
10789 return env, run_nodes, run_nodes
10791 def Exec(self, feedback_fn):
10792 """Rename the node group.
10795 group = self.cfg.GetNodeGroup(self.group_uuid)
10798 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10799 (self.op.group_name, self.group_uuid))
10801 group.name = self.op.new_name
10802 self.cfg.Update(group, feedback_fn)
10804 return self.op.new_name
10807 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10808 """Generic tags LU.
10810 This is an abstract class which is the parent of all the other tags LUs.
10814 def ExpandNames(self):
10815 self.needed_locks = {}
10816 if self.op.kind == constants.TAG_NODE:
10817 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10818 self.needed_locks[locking.LEVEL_NODE] = self.op.name
10819 elif self.op.kind == constants.TAG_INSTANCE:
10820 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10821 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10823 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10824 # not possible to acquire the BGL based on opcode parameters)
10826 def CheckPrereq(self):
10827 """Check prerequisites.
10830 if self.op.kind == constants.TAG_CLUSTER:
10831 self.target = self.cfg.GetClusterInfo()
10832 elif self.op.kind == constants.TAG_NODE:
10833 self.target = self.cfg.GetNodeInfo(self.op.name)
10834 elif self.op.kind == constants.TAG_INSTANCE:
10835 self.target = self.cfg.GetInstanceInfo(self.op.name)
10837 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10838 str(self.op.kind), errors.ECODE_INVAL)
10841 class LUTagsGet(TagsLU):
10842 """Returns the tags of a given object.
10847 def ExpandNames(self):
10848 TagsLU.ExpandNames(self)
10850 # Share locks as this is only a read operation
10851 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10853 def Exec(self, feedback_fn):
10854 """Returns the tag list.
10857 return list(self.target.GetTags())
10860 class LUTagsSearch(NoHooksLU):
10861 """Searches the tags for a given pattern.
10866 def ExpandNames(self):
10867 self.needed_locks = {}
10869 def CheckPrereq(self):
10870 """Check prerequisites.
10872 This checks the pattern passed for validity by compiling it.
10876 self.re = re.compile(self.op.pattern)
10877 except re.error, err:
10878 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10879 (self.op.pattern, err), errors.ECODE_INVAL)
10881 def Exec(self, feedback_fn):
10882 """Returns the tag list.
10886 tgts = [("/cluster", cfg.GetClusterInfo())]
10887 ilist = cfg.GetAllInstancesInfo().values()
10888 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10889 nlist = cfg.GetAllNodesInfo().values()
10890 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10892 for path, target in tgts:
10893 for tag in target.GetTags():
10894 if self.re.search(tag):
10895 results.append((path, tag))
10899 class LUTagsSet(TagsLU):
10900 """Sets a tag on a given object.
10905 def CheckPrereq(self):
10906 """Check prerequisites.
10908 This checks the type and length of the tag name and value.
10911 TagsLU.CheckPrereq(self)
10912 for tag in self.op.tags:
10913 objects.TaggableObject.ValidateTag(tag)
10915 def Exec(self, feedback_fn):
10920 for tag in self.op.tags:
10921 self.target.AddTag(tag)
10922 except errors.TagError, err:
10923 raise errors.OpExecError("Error while setting tag: %s" % str(err))
10924 self.cfg.Update(self.target, feedback_fn)
10927 class LUTagsDel(TagsLU):
10928 """Delete a list of tags from a given object.
10933 def CheckPrereq(self):
10934 """Check prerequisites.
10936 This checks that we have the given tag.
10939 TagsLU.CheckPrereq(self)
10940 for tag in self.op.tags:
10941 objects.TaggableObject.ValidateTag(tag)
10942 del_tags = frozenset(self.op.tags)
10943 cur_tags = self.target.GetTags()
10945 diff_tags = del_tags - cur_tags
10947 diff_names = ("'%s'" % i for i in sorted(diff_tags))
10948 raise errors.OpPrereqError("Tag(s) %s not found" %
10949 (utils.CommaJoin(diff_names), ),
10950 errors.ECODE_NOENT)
10952 def Exec(self, feedback_fn):
10953 """Remove the tag from the object.
10956 for tag in self.op.tags:
10957 self.target.RemoveTag(tag)
10958 self.cfg.Update(self.target, feedback_fn)
10961 class LUTestDelay(NoHooksLU):
10962 """Sleep for a specified amount of time.
10964 This LU sleeps on the master and/or nodes for a specified amount of
10970 def ExpandNames(self):
10971 """Expand names and set required locks.
10973 This expands the node list, if any.
10976 self.needed_locks = {}
10977 if self.op.on_nodes:
10978 # _GetWantedNodes can be used here, but is not always appropriate to use
10979 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10980 # more information.
10981 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10982 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10984 def _TestDelay(self):
10985 """Do the actual sleep.
10988 if self.op.on_master:
10989 if not utils.TestDelay(self.op.duration):
10990 raise errors.OpExecError("Error during master delay test")
10991 if self.op.on_nodes:
10992 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10993 for node, node_result in result.items():
10994 node_result.Raise("Failure during rpc call to node %s" % node)
10996 def Exec(self, feedback_fn):
10997 """Execute the test delay opcode, with the wanted repetitions.
11000 if self.op.repeat == 0:
11003 top_value = self.op.repeat - 1
11004 for i in range(self.op.repeat):
11005 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
11009 class LUTestJqueue(NoHooksLU):
11010 """Utility LU to test some aspects of the job queue.
11015 # Must be lower than default timeout for WaitForJobChange to see whether it
11016 # notices changed jobs
11017 _CLIENT_CONNECT_TIMEOUT = 20.0
11018 _CLIENT_CONFIRM_TIMEOUT = 60.0
11021 def _NotifyUsingSocket(cls, cb, errcls):
11022 """Opens a Unix socket and waits for another program to connect.
11025 @param cb: Callback to send socket name to client
11026 @type errcls: class
11027 @param errcls: Exception class to use for errors
11030 # Using a temporary directory as there's no easy way to create temporary
11031 # sockets without writing a custom loop around tempfile.mktemp and
11033 tmpdir = tempfile.mkdtemp()
11035 tmpsock = utils.PathJoin(tmpdir, "sock")
11037 logging.debug("Creating temporary socket at %s", tmpsock)
11038 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
11043 # Send details to client
11046 # Wait for client to connect before continuing
11047 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
11049 (conn, _) = sock.accept()
11050 except socket.error, err:
11051 raise errcls("Client didn't connect in time (%s)" % err)
11055 # Remove as soon as client is connected
11056 shutil.rmtree(tmpdir)
11058 # Wait for client to close
11061 # pylint: disable-msg=E1101
11062 # Instance of '_socketobject' has no ... member
11063 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
11065 except socket.error, err:
11066 raise errcls("Client failed to confirm notification (%s)" % err)
11070 def _SendNotification(self, test, arg, sockname):
11071 """Sends a notification to the client.
11074 @param test: Test name
11075 @param arg: Test argument (depends on test)
11076 @type sockname: string
11077 @param sockname: Socket path
11080 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
11082 def _Notify(self, prereq, test, arg):
11083 """Notifies the client of a test.
11086 @param prereq: Whether this is a prereq-phase test
11088 @param test: Test name
11089 @param arg: Test argument (depends on test)
11093 errcls = errors.OpPrereqError
11095 errcls = errors.OpExecError
11097 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11101 def CheckArguments(self):
11102 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11103 self.expandnames_calls = 0
11105 def ExpandNames(self):
11106 checkargs_calls = getattr(self, "checkargs_calls", 0)
11107 if checkargs_calls < 1:
11108 raise errors.ProgrammerError("CheckArguments was not called")
11110 self.expandnames_calls += 1
11112 if self.op.notify_waitlock:
11113 self._Notify(True, constants.JQT_EXPANDNAMES, None)
11115 self.LogInfo("Expanding names")
11117 # Get lock on master node (just to get a lock, not for a particular reason)
11118 self.needed_locks = {
11119 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11122 def Exec(self, feedback_fn):
11123 if self.expandnames_calls < 1:
11124 raise errors.ProgrammerError("ExpandNames was not called")
11126 if self.op.notify_exec:
11127 self._Notify(False, constants.JQT_EXEC, None)
11129 self.LogInfo("Executing")
11131 if self.op.log_messages:
11132 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11133 for idx, msg in enumerate(self.op.log_messages):
11134 self.LogInfo("Sending log message %s", idx + 1)
11135 feedback_fn(constants.JQT_MSGPREFIX + msg)
11136 # Report how many test messages have been sent
11137 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11140 raise errors.OpExecError("Opcode failure was requested")
11145 class IAllocator(object):
11146 """IAllocator framework.
11148 An IAllocator instance has three sets of attributes:
11149 - cfg that is needed to query the cluster
11150 - input data (all members of the _KEYS class attribute are required)
11151 - four buffer attributes (in|out_data|text), that represent the
11152 input (to the external script) in text and data structure format,
11153 and the output from it, again in two formats
11154 - the result variables from the script (success, info, nodes) for
11158 # pylint: disable-msg=R0902
11159 # lots of instance attributes
11161 "name", "mem_size", "disks", "disk_template",
11162 "os", "tags", "nics", "vcpus", "hypervisor",
11165 "name", "relocate_from",
11171 def __init__(self, cfg, rpc, mode, **kwargs):
11174 # init buffer variables
11175 self.in_text = self.out_text = self.in_data = self.out_data = None
11176 # init all input fields so that pylint is happy
11178 self.mem_size = self.disks = self.disk_template = None
11179 self.os = self.tags = self.nics = self.vcpus = None
11180 self.hypervisor = None
11181 self.relocate_from = None
11183 self.evac_nodes = None
11185 self.required_nodes = None
11186 # init result fields
11187 self.success = self.info = self.result = None
11188 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11189 keyset = self._ALLO_KEYS
11190 fn = self._AddNewInstance
11191 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11192 keyset = self._RELO_KEYS
11193 fn = self._AddRelocateInstance
11194 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11195 keyset = self._EVAC_KEYS
11196 fn = self._AddEvacuateNodes
11198 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11199 " IAllocator" % self.mode)
11201 if key not in keyset:
11202 raise errors.ProgrammerError("Invalid input parameter '%s' to"
11203 " IAllocator" % key)
11204 setattr(self, key, kwargs[key])
11207 if key not in kwargs:
11208 raise errors.ProgrammerError("Missing input parameter '%s' to"
11209 " IAllocator" % key)
11210 self._BuildInputData(fn)
11212 def _ComputeClusterData(self):
11213 """Compute the generic allocator input data.
11215 This is the data that is independent of the actual operation.
11219 cluster_info = cfg.GetClusterInfo()
11222 "version": constants.IALLOCATOR_VERSION,
11223 "cluster_name": cfg.GetClusterName(),
11224 "cluster_tags": list(cluster_info.GetTags()),
11225 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11226 # we don't have job IDs
11228 ninfo = cfg.GetAllNodesInfo()
11229 iinfo = cfg.GetAllInstancesInfo().values()
11230 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11233 node_list = [n.name for n in ninfo.values() if n.vm_capable]
11235 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11236 hypervisor_name = self.hypervisor
11237 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11238 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11239 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11240 hypervisor_name = cluster_info.enabled_hypervisors[0]
11242 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11245 self.rpc.call_all_instances_info(node_list,
11246 cluster_info.enabled_hypervisors)
11248 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11250 config_ndata = self._ComputeBasicNodeData(ninfo)
11251 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11252 i_list, config_ndata)
11253 assert len(data["nodes"]) == len(ninfo), \
11254 "Incomplete node data computed"
11256 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11258 self.in_data = data
11261 def _ComputeNodeGroupData(cfg):
11262 """Compute node groups data.
11266 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
11268 "name": gdata.name,
11269 "alloc_policy": gdata.alloc_policy,
11274 def _ComputeBasicNodeData(node_cfg):
11275 """Compute global node data.
11278 @returns: a dict of name: (node dict, node config)
11282 for ninfo in node_cfg.values():
11283 # fill in static (config-based) values
11285 "tags": list(ninfo.GetTags()),
11286 "primary_ip": ninfo.primary_ip,
11287 "secondary_ip": ninfo.secondary_ip,
11288 "offline": ninfo.offline,
11289 "drained": ninfo.drained,
11290 "master_candidate": ninfo.master_candidate,
11291 "group": ninfo.group,
11292 "master_capable": ninfo.master_capable,
11293 "vm_capable": ninfo.vm_capable,
11296 node_results[ninfo.name] = pnr
11298 return node_results
11301 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11303 """Compute global node data.
11305 @param node_results: the basic node structures as filled from the config
11308 # make a copy of the current dict
11309 node_results = dict(node_results)
11310 for nname, nresult in node_data.items():
11311 assert nname in node_results, "Missing basic data for node %s" % nname
11312 ninfo = node_cfg[nname]
11314 if not (ninfo.offline or ninfo.drained):
11315 nresult.Raise("Can't get data for node %s" % nname)
11316 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11318 remote_info = nresult.payload
11320 for attr in ['memory_total', 'memory_free', 'memory_dom0',
11321 'vg_size', 'vg_free', 'cpu_total']:
11322 if attr not in remote_info:
11323 raise errors.OpExecError("Node '%s' didn't return attribute"
11324 " '%s'" % (nname, attr))
11325 if not isinstance(remote_info[attr], int):
11326 raise errors.OpExecError("Node '%s' returned invalid value"
11328 (nname, attr, remote_info[attr]))
11329 # compute memory used by primary instances
11330 i_p_mem = i_p_up_mem = 0
11331 for iinfo, beinfo in i_list:
11332 if iinfo.primary_node == nname:
11333 i_p_mem += beinfo[constants.BE_MEMORY]
11334 if iinfo.name not in node_iinfo[nname].payload:
11337 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11338 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11339 remote_info['memory_free'] -= max(0, i_mem_diff)
11342 i_p_up_mem += beinfo[constants.BE_MEMORY]
11344 # compute memory used by instances
11346 "total_memory": remote_info['memory_total'],
11347 "reserved_memory": remote_info['memory_dom0'],
11348 "free_memory": remote_info['memory_free'],
11349 "total_disk": remote_info['vg_size'],
11350 "free_disk": remote_info['vg_free'],
11351 "total_cpus": remote_info['cpu_total'],
11352 "i_pri_memory": i_p_mem,
11353 "i_pri_up_memory": i_p_up_mem,
11355 pnr_dyn.update(node_results[nname])
11356 node_results[nname] = pnr_dyn
11358 return node_results
11361 def _ComputeInstanceData(cluster_info, i_list):
11362 """Compute global instance data.
11366 for iinfo, beinfo in i_list:
11368 for nic in iinfo.nics:
11369 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11370 nic_dict = {"mac": nic.mac,
11372 "mode": filled_params[constants.NIC_MODE],
11373 "link": filled_params[constants.NIC_LINK],
11375 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11376 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11377 nic_data.append(nic_dict)
11379 "tags": list(iinfo.GetTags()),
11380 "admin_up": iinfo.admin_up,
11381 "vcpus": beinfo[constants.BE_VCPUS],
11382 "memory": beinfo[constants.BE_MEMORY],
11384 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11386 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11387 "disk_template": iinfo.disk_template,
11388 "hypervisor": iinfo.hypervisor,
11390 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11392 instance_data[iinfo.name] = pir
11394 return instance_data
11396 def _AddNewInstance(self):
11397 """Add new instance data to allocator structure.
11399 This in combination with _AllocatorGetClusterData will create the
11400 correct structure needed as input for the allocator.
11402 The checks for the completeness of the opcode must have already been
11406 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11408 if self.disk_template in constants.DTS_INT_MIRROR:
11409 self.required_nodes = 2
11411 self.required_nodes = 1
11414 "disk_template": self.disk_template,
11417 "vcpus": self.vcpus,
11418 "memory": self.mem_size,
11419 "disks": self.disks,
11420 "disk_space_total": disk_space,
11422 "required_nodes": self.required_nodes,
11426 def _AddRelocateInstance(self):
11427 """Add relocate instance data to allocator structure.
11429 This in combination with _IAllocatorGetClusterData will create the
11430 correct structure needed as input for the allocator.
11432 The checks for the completeness of the opcode must have already been
11436 instance = self.cfg.GetInstanceInfo(self.name)
11437 if instance is None:
11438 raise errors.ProgrammerError("Unknown instance '%s' passed to"
11439 " IAllocator" % self.name)
11441 if instance.disk_template not in constants.DTS_MIRRORED:
11442 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11443 errors.ECODE_INVAL)
11445 if instance.disk_template in constants.DTS_INT_MIRROR and \
11446 len(instance.secondary_nodes) != 1:
11447 raise errors.OpPrereqError("Instance has not exactly one secondary node",
11448 errors.ECODE_STATE)
11450 self.required_nodes = 1
11451 disk_sizes = [{'size': disk.size} for disk in instance.disks]
11452 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11456 "disk_space_total": disk_space,
11457 "required_nodes": self.required_nodes,
11458 "relocate_from": self.relocate_from,
11462 def _AddEvacuateNodes(self):
11463 """Add evacuate nodes data to allocator structure.
11467 "evac_nodes": self.evac_nodes
11471 def _BuildInputData(self, fn):
11472 """Build input data structures.
11475 self._ComputeClusterData()
11478 request["type"] = self.mode
11479 self.in_data["request"] = request
11481 self.in_text = serializer.Dump(self.in_data)
11483 def Run(self, name, validate=True, call_fn=None):
11484 """Run an instance allocator and return the results.
11487 if call_fn is None:
11488 call_fn = self.rpc.call_iallocator_runner
11490 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11491 result.Raise("Failure while running the iallocator script")
11493 self.out_text = result.payload
11495 self._ValidateResult()
11497 def _ValidateResult(self):
11498 """Process the allocator results.
11500 This will process and if successful save the result in
11501 self.out_data and the other parameters.
11505 rdict = serializer.Load(self.out_text)
11506 except Exception, err:
11507 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11509 if not isinstance(rdict, dict):
11510 raise errors.OpExecError("Can't parse iallocator results: not a dict")
11512 # TODO: remove backwards compatiblity in later versions
11513 if "nodes" in rdict and "result" not in rdict:
11514 rdict["result"] = rdict["nodes"]
11517 for key in "success", "info", "result":
11518 if key not in rdict:
11519 raise errors.OpExecError("Can't parse iallocator results:"
11520 " missing key '%s'" % key)
11521 setattr(self, key, rdict[key])
11523 if not isinstance(rdict["result"], list):
11524 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11526 self.out_data = rdict
11529 class LUTestAllocator(NoHooksLU):
11530 """Run allocator tests.
11532 This LU runs the allocator tests
11535 def CheckPrereq(self):
11536 """Check prerequisites.
11538 This checks the opcode parameters depending on the director and mode test.
11541 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11542 for attr in ["mem_size", "disks", "disk_template",
11543 "os", "tags", "nics", "vcpus"]:
11544 if not hasattr(self.op, attr):
11545 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11546 attr, errors.ECODE_INVAL)
11547 iname = self.cfg.ExpandInstanceName(self.op.name)
11548 if iname is not None:
11549 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11550 iname, errors.ECODE_EXISTS)
11551 if not isinstance(self.op.nics, list):
11552 raise errors.OpPrereqError("Invalid parameter 'nics'",
11553 errors.ECODE_INVAL)
11554 if not isinstance(self.op.disks, list):
11555 raise errors.OpPrereqError("Invalid parameter 'disks'",
11556 errors.ECODE_INVAL)
11557 for row in self.op.disks:
11558 if (not isinstance(row, dict) or
11559 "size" not in row or
11560 not isinstance(row["size"], int) or
11561 "mode" not in row or
11562 row["mode"] not in ['r', 'w']):
11563 raise errors.OpPrereqError("Invalid contents of the 'disks'"
11564 " parameter", errors.ECODE_INVAL)
11565 if self.op.hypervisor is None:
11566 self.op.hypervisor = self.cfg.GetHypervisorType()
11567 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11568 fname = _ExpandInstanceName(self.cfg, self.op.name)
11569 self.op.name = fname
11570 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11571 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11572 if not hasattr(self.op, "evac_nodes"):
11573 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11574 " opcode input", errors.ECODE_INVAL)
11576 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11577 self.op.mode, errors.ECODE_INVAL)
11579 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11580 if self.op.allocator is None:
11581 raise errors.OpPrereqError("Missing allocator name",
11582 errors.ECODE_INVAL)
11583 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11584 raise errors.OpPrereqError("Wrong allocator test '%s'" %
11585 self.op.direction, errors.ECODE_INVAL)
11587 def Exec(self, feedback_fn):
11588 """Run the allocator test.
11591 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11592 ial = IAllocator(self.cfg, self.rpc,
11595 mem_size=self.op.mem_size,
11596 disks=self.op.disks,
11597 disk_template=self.op.disk_template,
11601 vcpus=self.op.vcpus,
11602 hypervisor=self.op.hypervisor,
11604 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11605 ial = IAllocator(self.cfg, self.rpc,
11608 relocate_from=list(self.relocate_from),
11610 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11611 ial = IAllocator(self.cfg, self.rpc,
11613 evac_nodes=self.op.evac_nodes)
11615 raise errors.ProgrammerError("Uncatched mode %s in"
11616 " LUTestAllocator.Exec", self.op.mode)
11618 if self.op.direction == constants.IALLOCATOR_DIR_IN:
11619 result = ial.in_text
11621 ial.Run(self.op.allocator, validate=False)
11622 result = ial.out_text
11626 #: Query type implementations
11628 constants.QR_INSTANCE: _InstanceQuery,
11629 constants.QR_NODE: _NodeQuery,
11630 constants.QR_GROUP: _GroupQuery,
11634 def _GetQueryImplementation(name):
11635 """Returns the implemtnation for a query type.
11637 @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11641 return _QUERY_IMPL[name]
11643 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11644 errors.ECODE_INVAL)