4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 import ganeti.masterd.instance # pylint: disable-msg=W0611
64 def _SupportsOob(cfg, node):
65 """Tells if node supports OOB.
67 @type cfg: L{config.ConfigWriter}
68 @param cfg: The cluster configuration
69 @type node: L{objects.Node}
71 @return: The OOB script if supported or an empty string otherwise
74 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
78 class LogicalUnit(object):
79 """Logical Unit base class.
81 Subclasses must follow these rules:
82 - implement ExpandNames
83 - implement CheckPrereq (except when tasklets are used)
84 - implement Exec (except when tasklets are used)
85 - implement BuildHooksEnv
86 - redefine HPATH and HTYPE
87 - optionally redefine their run requirements:
88 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
90 Note that all commands require root permissions.
92 @ivar dry_run_result: the value (if any) that will be returned to the caller
93 in dry-run mode (signalled by opcode dry_run parameter)
100 def __init__(self, processor, op, context, rpc):
101 """Constructor for LogicalUnit.
103 This needs to be overridden in derived classes in order to check op
107 self.proc = processor
109 self.cfg = context.cfg
110 self.context = context
112 # Dicts used to declare locking needs to mcpu
113 self.needed_locks = None
114 self.acquired_locks = {}
115 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
117 self.remove_locks = {}
118 # Used to force good behavior when calling helper functions
119 self.recalculate_locks = {}
122 self.Log = processor.Log # pylint: disable-msg=C0103
123 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126 # support for dry-run
127 self.dry_run_result = None
128 # support for generic debug attribute
129 if (not hasattr(self.op, "debug_level") or
130 not isinstance(self.op.debug_level, int)):
131 self.op.debug_level = 0
136 # Validate opcode parameters and set defaults
137 self.op.Validate(True)
139 self.CheckArguments()
142 """Returns the SshRunner object
146 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
149 ssh = property(fget=__GetSSH)
151 def CheckArguments(self):
152 """Check syntactic validity for the opcode arguments.
154 This method is for doing a simple syntactic check and ensure
155 validity of opcode parameters, without any cluster-related
156 checks. While the same can be accomplished in ExpandNames and/or
157 CheckPrereq, doing these separate is better because:
159 - ExpandNames is left as as purely a lock-related function
160 - CheckPrereq is run after we have acquired locks (and possible
163 The function is allowed to change the self.op attribute so that
164 later methods can no longer worry about missing parameters.
169 def ExpandNames(self):
170 """Expand names for this LU.
172 This method is called before starting to execute the opcode, and it should
173 update all the parameters of the opcode to their canonical form (e.g. a
174 short node name must be fully expanded after this method has successfully
175 completed). This way locking, hooks, logging, etc. can work correctly.
177 LUs which implement this method must also populate the self.needed_locks
178 member, as a dict with lock levels as keys, and a list of needed lock names
181 - use an empty dict if you don't need any lock
182 - if you don't need any lock at a particular level omit that level
183 - don't put anything for the BGL level
184 - if you want all locks at a level use locking.ALL_SET as a value
186 If you need to share locks (rather than acquire them exclusively) at one
187 level you can modify self.share_locks, setting a true value (usually 1) for
188 that level. By default locks are not shared.
190 This function can also define a list of tasklets, which then will be
191 executed in order instead of the usual LU-level CheckPrereq and Exec
192 functions, if those are not defined by the LU.
196 # Acquire all nodes and one instance
197 self.needed_locks = {
198 locking.LEVEL_NODE: locking.ALL_SET,
199 locking.LEVEL_INSTANCE: ['instance1.example.com'],
201 # Acquire just two nodes
202 self.needed_locks = {
203 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206 self.needed_locks = {} # No, you can't leave it to the default value None
209 # The implementation of this method is mandatory only if the new LU is
210 # concurrent, so that old LUs don't need to be changed all at the same
213 self.needed_locks = {} # Exclusive LUs don't need locks.
215 raise NotImplementedError
217 def DeclareLocks(self, level):
218 """Declare LU locking needs for a level
220 While most LUs can just declare their locking needs at ExpandNames time,
221 sometimes there's the need to calculate some locks after having acquired
222 the ones before. This function is called just before acquiring locks at a
223 particular level, but after acquiring the ones at lower levels, and permits
224 such calculations. It can be used to modify self.needed_locks, and by
225 default it does nothing.
227 This function is only called if you have something already set in
228 self.needed_locks for the level.
230 @param level: Locking level which is going to be locked
231 @type level: member of ganeti.locking.LEVELS
235 def CheckPrereq(self):
236 """Check prerequisites for this LU.
238 This method should check that the prerequisites for the execution
239 of this LU are fulfilled. It can do internode communication, but
240 it should be idempotent - no cluster or system changes are
243 The method should raise errors.OpPrereqError in case something is
244 not fulfilled. Its return value is ignored.
246 This method should also update all the parameters of the opcode to
247 their canonical form if it hasn't been done by ExpandNames before.
250 if self.tasklets is not None:
251 for (idx, tl) in enumerate(self.tasklets):
252 logging.debug("Checking prerequisites for tasklet %s/%s",
253 idx + 1, len(self.tasklets))
258 def Exec(self, feedback_fn):
261 This method should implement the actual work. It should raise
262 errors.OpExecError for failures that are somewhat dealt with in
266 if self.tasklets is not None:
267 for (idx, tl) in enumerate(self.tasklets):
268 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271 raise NotImplementedError
273 def BuildHooksEnv(self):
274 """Build hooks environment for this LU.
276 This method should return a three-node tuple consisting of: a dict
277 containing the environment that will be used for running the
278 specific hook for this LU, a list of node names on which the hook
279 should run before the execution, and a list of node names on which
280 the hook should run after the execution.
282 The keys of the dict must not have 'GANETI_' prefixed as this will
283 be handled in the hooks runner. Also note additional keys will be
284 added by the hooks runner. If the LU doesn't define any
285 environment, an empty dict (and not None) should be returned.
287 No nodes should be returned as an empty list (and not None).
289 Note that if the HPATH for a LU class is None, this function will
293 raise NotImplementedError
295 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296 """Notify the LU about the results of its hooks.
298 This method is called every time a hooks phase is executed, and notifies
299 the Logical Unit about the hooks' result. The LU can then use it to alter
300 its result based on the hooks. By default the method does nothing and the
301 previous result is passed back unchanged but any LU can define it if it
302 wants to use the local cluster hook-scripts somehow.
304 @param phase: one of L{constants.HOOKS_PHASE_POST} or
305 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306 @param hook_results: the results of the multi-node hooks rpc call
307 @param feedback_fn: function used send feedback back to the caller
308 @param lu_result: the previous Exec result this LU had, or None
310 @return: the new Exec result, based on the previous result
314 # API must be kept, thus we ignore the unused argument and could
315 # be a function warnings
316 # pylint: disable-msg=W0613,R0201
319 def _ExpandAndLockInstance(self):
320 """Helper function to expand and lock an instance.
322 Many LUs that work on an instance take its name in self.op.instance_name
323 and need to expand it and then declare the expanded name for locking. This
324 function does it, and then updates self.op.instance_name to the expanded
325 name. It also initializes needed_locks as a dict, if this hasn't been done
329 if self.needed_locks is None:
330 self.needed_locks = {}
332 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333 "_ExpandAndLockInstance called with instance-level locks set"
334 self.op.instance_name = _ExpandInstanceName(self.cfg,
335 self.op.instance_name)
336 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
338 def _LockInstancesNodes(self, primary_only=False):
339 """Helper function to declare instances' nodes for locking.
341 This function should be called after locking one or more instances to lock
342 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343 with all primary or secondary nodes for instances already locked and
344 present in self.needed_locks[locking.LEVEL_INSTANCE].
346 It should be called from DeclareLocks, and for safety only works if
347 self.recalculate_locks[locking.LEVEL_NODE] is set.
349 In the future it may grow parameters to just lock some instance's nodes, or
350 to just lock primaries or secondary nodes, if needed.
352 If should be called in DeclareLocks in a way similar to::
354 if level == locking.LEVEL_NODE:
355 self._LockInstancesNodes()
357 @type primary_only: boolean
358 @param primary_only: only lock primary nodes of locked instances
361 assert locking.LEVEL_NODE in self.recalculate_locks, \
362 "_LockInstancesNodes helper function called with no nodes to recalculate"
364 # TODO: check if we're really been called with the instance locks held
366 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367 # future we might want to have different behaviors depending on the value
368 # of self.recalculate_locks[locking.LEVEL_NODE]
370 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371 instance = self.context.cfg.GetInstanceInfo(instance_name)
372 wanted_nodes.append(instance.primary_node)
374 wanted_nodes.extend(instance.secondary_nodes)
376 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
381 del self.recalculate_locks[locking.LEVEL_NODE]
384 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385 """Simple LU which runs no hooks.
387 This LU is intended as a parent for other LogicalUnits which will
388 run no hooks, in order to reduce duplicate code.
394 def BuildHooksEnv(self):
395 """Empty BuildHooksEnv for NoHooksLu.
397 This just raises an error.
400 assert False, "BuildHooksEnv called for NoHooksLUs"
404 """Tasklet base class.
406 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407 they can mix legacy code with tasklets. Locking needs to be done in the LU,
408 tasklets know nothing about locks.
410 Subclasses must follow these rules:
411 - Implement CheckPrereq
415 def __init__(self, lu):
422 def CheckPrereq(self):
423 """Check prerequisites for this tasklets.
425 This method should check whether the prerequisites for the execution of
426 this tasklet are fulfilled. It can do internode communication, but it
427 should be idempotent - no cluster or system changes are allowed.
429 The method should raise errors.OpPrereqError in case something is not
430 fulfilled. Its return value is ignored.
432 This method should also update all parameters to their canonical form if it
433 hasn't been done before.
438 def Exec(self, feedback_fn):
439 """Execute the tasklet.
441 This method should implement the actual work. It should raise
442 errors.OpExecError for failures that are somewhat dealt with in code, or
446 raise NotImplementedError
450 """Base for query utility classes.
453 #: Attribute holding field definitions
456 def __init__(self, filter_, fields, use_locking):
457 """Initializes this class.
460 self.use_locking = use_locking
462 self.query = query.Query(self.FIELDS, fields, filter_=filter_,
464 self.requested_data = self.query.RequestedData()
465 self.names = self.query.RequestedNames()
467 # Sort only if no names were requested
468 self.sort_by_name = not self.names
470 self.do_locking = None
473 def _GetNames(self, lu, all_names, lock_level):
474 """Helper function to determine names asked for in the query.
478 names = lu.acquired_locks[lock_level]
482 if self.wanted == locking.ALL_SET:
483 assert not self.names
484 # caller didn't specify names, so ordering is not important
485 return utils.NiceSort(names)
487 # caller specified names and we must keep the same order
489 assert not self.do_locking or lu.acquired_locks[lock_level]
491 missing = set(self.wanted).difference(names)
493 raise errors.OpExecError("Some items were removed before retrieving"
494 " their data: %s" % missing)
496 # Return expanded names
500 def FieldsQuery(cls, fields):
501 """Returns list of available fields.
503 @return: List of L{objects.QueryFieldDefinition}
506 return query.QueryFields(cls.FIELDS, fields)
508 def ExpandNames(self, lu):
509 """Expand names for this query.
511 See L{LogicalUnit.ExpandNames}.
514 raise NotImplementedError()
516 def DeclareLocks(self, lu, level):
517 """Declare locks for this query.
519 See L{LogicalUnit.DeclareLocks}.
522 raise NotImplementedError()
524 def _GetQueryData(self, lu):
525 """Collects all data for this query.
527 @return: Query data object
530 raise NotImplementedError()
532 def NewStyleQuery(self, lu):
533 """Collect data and execute query.
536 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
537 sort_by_name=self.sort_by_name)
539 def OldStyleQuery(self, lu):
540 """Collect data and execute query.
543 return self.query.OldStyleQuery(self._GetQueryData(lu),
544 sort_by_name=self.sort_by_name)
547 def _GetWantedNodes(lu, nodes):
548 """Returns list of checked and expanded node names.
550 @type lu: L{LogicalUnit}
551 @param lu: the logical unit on whose behalf we execute
553 @param nodes: list of node names or None for all nodes
555 @return: the list of nodes, sorted
556 @raise errors.ProgrammerError: if the nodes parameter is wrong type
560 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
562 return utils.NiceSort(lu.cfg.GetNodeList())
565 def _GetWantedInstances(lu, instances):
566 """Returns list of checked and expanded instance names.
568 @type lu: L{LogicalUnit}
569 @param lu: the logical unit on whose behalf we execute
570 @type instances: list
571 @param instances: list of instance names or None for all instances
573 @return: the list of instances, sorted
574 @raise errors.OpPrereqError: if the instances parameter is wrong type
575 @raise errors.OpPrereqError: if any of the passed instances is not found
579 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
581 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
585 def _GetUpdatedParams(old_params, update_dict,
586 use_default=True, use_none=False):
587 """Return the new version of a parameter dictionary.
589 @type old_params: dict
590 @param old_params: old parameters
591 @type update_dict: dict
592 @param update_dict: dict containing new parameter values, or
593 constants.VALUE_DEFAULT to reset the parameter to its default
595 @param use_default: boolean
596 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
597 values as 'to be deleted' values
598 @param use_none: boolean
599 @type use_none: whether to recognise C{None} values as 'to be
602 @return: the new parameter dictionary
605 params_copy = copy.deepcopy(old_params)
606 for key, val in update_dict.iteritems():
607 if ((use_default and val == constants.VALUE_DEFAULT) or
608 (use_none and val is None)):
614 params_copy[key] = val
618 def _CheckOutputFields(static, dynamic, selected):
619 """Checks whether all selected fields are valid.
621 @type static: L{utils.FieldSet}
622 @param static: static fields set
623 @type dynamic: L{utils.FieldSet}
624 @param dynamic: dynamic fields set
631 delta = f.NonMatching(selected)
633 raise errors.OpPrereqError("Unknown output fields selected: %s"
634 % ",".join(delta), errors.ECODE_INVAL)
637 def _CheckGlobalHvParams(params):
638 """Validates that given hypervisor params are not global ones.
640 This will ensure that instances don't get customised versions of
644 used_globals = constants.HVC_GLOBALS.intersection(params)
646 msg = ("The following hypervisor parameters are global and cannot"
647 " be customized at instance level, please modify them at"
648 " cluster level: %s" % utils.CommaJoin(used_globals))
649 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
652 def _CheckNodeOnline(lu, node, msg=None):
653 """Ensure that a given node is online.
655 @param lu: the LU on behalf of which we make the check
656 @param node: the node to check
657 @param msg: if passed, should be a message to replace the default one
658 @raise errors.OpPrereqError: if the node is offline
662 msg = "Can't use offline node"
663 if lu.cfg.GetNodeInfo(node).offline:
664 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
667 def _CheckNodeNotDrained(lu, node):
668 """Ensure that a given node is not drained.
670 @param lu: the LU on behalf of which we make the check
671 @param node: the node to check
672 @raise errors.OpPrereqError: if the node is drained
675 if lu.cfg.GetNodeInfo(node).drained:
676 raise errors.OpPrereqError("Can't use drained node %s" % node,
680 def _CheckNodeVmCapable(lu, node):
681 """Ensure that a given node is vm capable.
683 @param lu: the LU on behalf of which we make the check
684 @param node: the node to check
685 @raise errors.OpPrereqError: if the node is not vm capable
688 if not lu.cfg.GetNodeInfo(node).vm_capable:
689 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
693 def _CheckNodeHasOS(lu, node, os_name, force_variant):
694 """Ensure that a node supports a given OS.
696 @param lu: the LU on behalf of which we make the check
697 @param node: the node to check
698 @param os_name: the OS to query about
699 @param force_variant: whether to ignore variant errors
700 @raise errors.OpPrereqError: if the node is not supporting the OS
703 result = lu.rpc.call_os_get(node, os_name)
704 result.Raise("OS '%s' not in supported OS list for node %s" %
706 prereq=True, ecode=errors.ECODE_INVAL)
707 if not force_variant:
708 _CheckOSVariant(result.payload, os_name)
711 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
712 """Ensure that a node has the given secondary ip.
714 @type lu: L{LogicalUnit}
715 @param lu: the LU on behalf of which we make the check
717 @param node: the node to check
718 @type secondary_ip: string
719 @param secondary_ip: the ip to check
720 @type prereq: boolean
721 @param prereq: whether to throw a prerequisite or an execute error
722 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
723 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
726 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
727 result.Raise("Failure checking secondary ip on node %s" % node,
728 prereq=prereq, ecode=errors.ECODE_ENVIRON)
729 if not result.payload:
730 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
731 " please fix and re-run this command" % secondary_ip)
733 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
735 raise errors.OpExecError(msg)
738 def _GetClusterDomainSecret():
739 """Reads the cluster domain secret.
742 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
746 def _CheckInstanceDown(lu, instance, reason):
747 """Ensure that an instance is not running."""
748 if instance.admin_up:
749 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
750 (instance.name, reason), errors.ECODE_STATE)
752 pnode = instance.primary_node
753 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
754 ins_l.Raise("Can't contact node %s for instance information" % pnode,
755 prereq=True, ecode=errors.ECODE_ENVIRON)
757 if instance.name in ins_l.payload:
758 raise errors.OpPrereqError("Instance %s is running, %s" %
759 (instance.name, reason), errors.ECODE_STATE)
762 def _ExpandItemName(fn, name, kind):
763 """Expand an item name.
765 @param fn: the function to use for expansion
766 @param name: requested item name
767 @param kind: text description ('Node' or 'Instance')
768 @return: the resolved (full) name
769 @raise errors.OpPrereqError: if the item is not found
773 if full_name is None:
774 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
779 def _ExpandNodeName(cfg, name):
780 """Wrapper over L{_ExpandItemName} for nodes."""
781 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
784 def _ExpandInstanceName(cfg, name):
785 """Wrapper over L{_ExpandItemName} for instance."""
786 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
789 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
790 memory, vcpus, nics, disk_template, disks,
791 bep, hvp, hypervisor_name):
792 """Builds instance related env variables for hooks
794 This builds the hook environment from individual variables.
797 @param name: the name of the instance
798 @type primary_node: string
799 @param primary_node: the name of the instance's primary node
800 @type secondary_nodes: list
801 @param secondary_nodes: list of secondary nodes as strings
802 @type os_type: string
803 @param os_type: the name of the instance's OS
804 @type status: boolean
805 @param status: the should_run status of the instance
807 @param memory: the memory size of the instance
809 @param vcpus: the count of VCPUs the instance has
811 @param nics: list of tuples (ip, mac, mode, link) representing
812 the NICs the instance has
813 @type disk_template: string
814 @param disk_template: the disk template of the instance
816 @param disks: the list of (size, mode) pairs
818 @param bep: the backend parameters for the instance
820 @param hvp: the hypervisor parameters for the instance
821 @type hypervisor_name: string
822 @param hypervisor_name: the hypervisor for the instance
824 @return: the hook environment for this instance
833 "INSTANCE_NAME": name,
834 "INSTANCE_PRIMARY": primary_node,
835 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
836 "INSTANCE_OS_TYPE": os_type,
837 "INSTANCE_STATUS": str_status,
838 "INSTANCE_MEMORY": memory,
839 "INSTANCE_VCPUS": vcpus,
840 "INSTANCE_DISK_TEMPLATE": disk_template,
841 "INSTANCE_HYPERVISOR": hypervisor_name,
845 nic_count = len(nics)
846 for idx, (ip, mac, mode, link) in enumerate(nics):
849 env["INSTANCE_NIC%d_IP" % idx] = ip
850 env["INSTANCE_NIC%d_MAC" % idx] = mac
851 env["INSTANCE_NIC%d_MODE" % idx] = mode
852 env["INSTANCE_NIC%d_LINK" % idx] = link
853 if mode == constants.NIC_MODE_BRIDGED:
854 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
858 env["INSTANCE_NIC_COUNT"] = nic_count
861 disk_count = len(disks)
862 for idx, (size, mode) in enumerate(disks):
863 env["INSTANCE_DISK%d_SIZE" % idx] = size
864 env["INSTANCE_DISK%d_MODE" % idx] = mode
868 env["INSTANCE_DISK_COUNT"] = disk_count
870 for source, kind in [(bep, "BE"), (hvp, "HV")]:
871 for key, value in source.items():
872 env["INSTANCE_%s_%s" % (kind, key)] = value
877 def _NICListToTuple(lu, nics):
878 """Build a list of nic information tuples.
880 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
881 value in LUInstanceQueryData.
883 @type lu: L{LogicalUnit}
884 @param lu: the logical unit on whose behalf we execute
885 @type nics: list of L{objects.NIC}
886 @param nics: list of nics to convert to hooks tuples
890 cluster = lu.cfg.GetClusterInfo()
894 filled_params = cluster.SimpleFillNIC(nic.nicparams)
895 mode = filled_params[constants.NIC_MODE]
896 link = filled_params[constants.NIC_LINK]
897 hooks_nics.append((ip, mac, mode, link))
901 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
902 """Builds instance related env variables for hooks from an object.
904 @type lu: L{LogicalUnit}
905 @param lu: the logical unit on whose behalf we execute
906 @type instance: L{objects.Instance}
907 @param instance: the instance for which we should build the
910 @param override: dictionary with key/values that will override
913 @return: the hook environment dictionary
916 cluster = lu.cfg.GetClusterInfo()
917 bep = cluster.FillBE(instance)
918 hvp = cluster.FillHV(instance)
920 'name': instance.name,
921 'primary_node': instance.primary_node,
922 'secondary_nodes': instance.secondary_nodes,
923 'os_type': instance.os,
924 'status': instance.admin_up,
925 'memory': bep[constants.BE_MEMORY],
926 'vcpus': bep[constants.BE_VCPUS],
927 'nics': _NICListToTuple(lu, instance.nics),
928 'disk_template': instance.disk_template,
929 'disks': [(disk.size, disk.mode) for disk in instance.disks],
932 'hypervisor_name': instance.hypervisor,
935 args.update(override)
936 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
939 def _AdjustCandidatePool(lu, exceptions):
940 """Adjust the candidate pool after node operations.
943 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
945 lu.LogInfo("Promoted nodes to master candidate role: %s",
946 utils.CommaJoin(node.name for node in mod_list))
947 for name in mod_list:
948 lu.context.ReaddNode(name)
949 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
951 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
955 def _DecideSelfPromotion(lu, exceptions=None):
956 """Decide whether I should promote myself as a master candidate.
959 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
960 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
961 # the new node will increase mc_max with one, so:
962 mc_should = min(mc_should + 1, cp_size)
963 return mc_now < mc_should
966 def _CheckNicsBridgesExist(lu, target_nics, target_node):
967 """Check that the brigdes needed by a list of nics exist.
970 cluster = lu.cfg.GetClusterInfo()
971 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
972 brlist = [params[constants.NIC_LINK] for params in paramslist
973 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
975 result = lu.rpc.call_bridges_exist(target_node, brlist)
976 result.Raise("Error checking bridges on destination node '%s'" %
977 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
980 def _CheckInstanceBridgesExist(lu, instance, node=None):
981 """Check that the brigdes needed by an instance exist.
985 node = instance.primary_node
986 _CheckNicsBridgesExist(lu, instance.nics, node)
989 def _CheckOSVariant(os_obj, name):
990 """Check whether an OS name conforms to the os variants specification.
992 @type os_obj: L{objects.OS}
993 @param os_obj: OS object to check
995 @param name: OS name passed by the user, to check for validity
998 if not os_obj.supported_variants:
1000 variant = objects.OS.GetVariant(name)
1002 raise errors.OpPrereqError("OS name must include a variant",
1005 if variant not in os_obj.supported_variants:
1006 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1009 def _GetNodeInstancesInner(cfg, fn):
1010 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1013 def _GetNodeInstances(cfg, node_name):
1014 """Returns a list of all primary and secondary instances on a node.
1018 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1021 def _GetNodePrimaryInstances(cfg, node_name):
1022 """Returns primary instances on a node.
1025 return _GetNodeInstancesInner(cfg,
1026 lambda inst: node_name == inst.primary_node)
1029 def _GetNodeSecondaryInstances(cfg, node_name):
1030 """Returns secondary instances on a node.
1033 return _GetNodeInstancesInner(cfg,
1034 lambda inst: node_name in inst.secondary_nodes)
1037 def _GetStorageTypeArgs(cfg, storage_type):
1038 """Returns the arguments for a storage type.
1041 # Special case for file storage
1042 if storage_type == constants.ST_FILE:
1043 # storage.FileStorage wants a list of storage directories
1044 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1049 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1052 for dev in instance.disks:
1053 cfg.SetDiskID(dev, node_name)
1055 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1056 result.Raise("Failed to get disk status from node %s" % node_name,
1057 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1059 for idx, bdev_status in enumerate(result.payload):
1060 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1066 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1067 """Check the sanity of iallocator and node arguments and use the
1068 cluster-wide iallocator if appropriate.
1070 Check that at most one of (iallocator, node) is specified. If none is
1071 specified, then the LU's opcode's iallocator slot is filled with the
1072 cluster-wide default iallocator.
1074 @type iallocator_slot: string
1075 @param iallocator_slot: the name of the opcode iallocator slot
1076 @type node_slot: string
1077 @param node_slot: the name of the opcode target node slot
1080 node = getattr(lu.op, node_slot, None)
1081 iallocator = getattr(lu.op, iallocator_slot, None)
1083 if node is not None and iallocator is not None:
1084 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1086 elif node is None and iallocator is None:
1087 default_iallocator = lu.cfg.GetDefaultIAllocator()
1088 if default_iallocator:
1089 setattr(lu.op, iallocator_slot, default_iallocator)
1091 raise errors.OpPrereqError("No iallocator or node given and no"
1092 " cluster-wide default iallocator found."
1093 " Please specify either an iallocator or a"
1094 " node, or set a cluster-wide default"
1098 class LUClusterPostInit(LogicalUnit):
1099 """Logical unit for running hooks after cluster initialization.
1102 HPATH = "cluster-init"
1103 HTYPE = constants.HTYPE_CLUSTER
1105 def BuildHooksEnv(self):
1109 env = {"OP_TARGET": self.cfg.GetClusterName()}
1110 mn = self.cfg.GetMasterNode()
1111 return env, [], [mn]
1113 def Exec(self, feedback_fn):
1120 class LUClusterDestroy(LogicalUnit):
1121 """Logical unit for destroying the cluster.
1124 HPATH = "cluster-destroy"
1125 HTYPE = constants.HTYPE_CLUSTER
1127 def BuildHooksEnv(self):
1131 env = {"OP_TARGET": self.cfg.GetClusterName()}
1134 def CheckPrereq(self):
1135 """Check prerequisites.
1137 This checks whether the cluster is empty.
1139 Any errors are signaled by raising errors.OpPrereqError.
1142 master = self.cfg.GetMasterNode()
1144 nodelist = self.cfg.GetNodeList()
1145 if len(nodelist) != 1 or nodelist[0] != master:
1146 raise errors.OpPrereqError("There are still %d node(s) in"
1147 " this cluster." % (len(nodelist) - 1),
1149 instancelist = self.cfg.GetInstanceList()
1151 raise errors.OpPrereqError("There are still %d instance(s) in"
1152 " this cluster." % len(instancelist),
1155 def Exec(self, feedback_fn):
1156 """Destroys the cluster.
1159 master = self.cfg.GetMasterNode()
1161 # Run post hooks on master node before it's removed
1162 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1164 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1166 # pylint: disable-msg=W0702
1167 self.LogWarning("Errors occurred running hooks on %s" % master)
1169 result = self.rpc.call_node_stop_master(master, False)
1170 result.Raise("Could not disable the master role")
1175 def _VerifyCertificate(filename):
1176 """Verifies a certificate for LUClusterVerify.
1178 @type filename: string
1179 @param filename: Path to PEM file
1183 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1184 utils.ReadFile(filename))
1185 except Exception, err: # pylint: disable-msg=W0703
1186 return (LUClusterVerify.ETYPE_ERROR,
1187 "Failed to load X509 certificate %s: %s" % (filename, err))
1190 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1191 constants.SSL_CERT_EXPIRATION_ERROR)
1194 fnamemsg = "While verifying %s: %s" % (filename, msg)
1199 return (None, fnamemsg)
1200 elif errcode == utils.CERT_WARNING:
1201 return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1202 elif errcode == utils.CERT_ERROR:
1203 return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1205 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1208 class LUClusterVerify(LogicalUnit):
1209 """Verifies the cluster status.
1212 HPATH = "cluster-verify"
1213 HTYPE = constants.HTYPE_CLUSTER
1216 TCLUSTER = "cluster"
1218 TINSTANCE = "instance"
1220 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1221 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1222 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1223 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1224 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1225 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1226 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1227 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1228 EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1229 ENODEDRBD = (TNODE, "ENODEDRBD")
1230 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1231 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1232 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1233 ENODEHV = (TNODE, "ENODEHV")
1234 ENODELVM = (TNODE, "ENODELVM")
1235 ENODEN1 = (TNODE, "ENODEN1")
1236 ENODENET = (TNODE, "ENODENET")
1237 ENODEOS = (TNODE, "ENODEOS")
1238 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1239 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1240 ENODERPC = (TNODE, "ENODERPC")
1241 ENODESSH = (TNODE, "ENODESSH")
1242 ENODEVERSION = (TNODE, "ENODEVERSION")
1243 ENODESETUP = (TNODE, "ENODESETUP")
1244 ENODETIME = (TNODE, "ENODETIME")
1245 ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1247 ETYPE_FIELD = "code"
1248 ETYPE_ERROR = "ERROR"
1249 ETYPE_WARNING = "WARNING"
1251 _HOOKS_INDENT_RE = re.compile("^", re.M)
1253 class NodeImage(object):
1254 """A class representing the logical and physical status of a node.
1257 @ivar name: the node name to which this object refers
1258 @ivar volumes: a structure as returned from
1259 L{ganeti.backend.GetVolumeList} (runtime)
1260 @ivar instances: a list of running instances (runtime)
1261 @ivar pinst: list of configured primary instances (config)
1262 @ivar sinst: list of configured secondary instances (config)
1263 @ivar sbp: dictionary of {primary-node: list of instances} for all
1264 instances for which this node is secondary (config)
1265 @ivar mfree: free memory, as reported by hypervisor (runtime)
1266 @ivar dfree: free disk, as reported by the node (runtime)
1267 @ivar offline: the offline status (config)
1268 @type rpc_fail: boolean
1269 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1270 not whether the individual keys were correct) (runtime)
1271 @type lvm_fail: boolean
1272 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1273 @type hyp_fail: boolean
1274 @ivar hyp_fail: whether the RPC call didn't return the instance list
1275 @type ghost: boolean
1276 @ivar ghost: whether this is a known node or not (config)
1277 @type os_fail: boolean
1278 @ivar os_fail: whether the RPC call didn't return valid OS data
1280 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1281 @type vm_capable: boolean
1282 @ivar vm_capable: whether the node can host instances
1285 def __init__(self, offline=False, name=None, vm_capable=True):
1294 self.offline = offline
1295 self.vm_capable = vm_capable
1296 self.rpc_fail = False
1297 self.lvm_fail = False
1298 self.hyp_fail = False
1300 self.os_fail = False
1303 def ExpandNames(self):
1304 self.needed_locks = {
1305 locking.LEVEL_NODE: locking.ALL_SET,
1306 locking.LEVEL_INSTANCE: locking.ALL_SET,
1308 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1310 def _Error(self, ecode, item, msg, *args, **kwargs):
1311 """Format an error message.
1313 Based on the opcode's error_codes parameter, either format a
1314 parseable error code, or a simpler error string.
1316 This must be called only from Exec and functions called from Exec.
1319 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1321 # first complete the msg
1324 # then format the whole message
1325 if self.op.error_codes:
1326 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1332 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1333 # and finally report it via the feedback_fn
1334 self._feedback_fn(" - %s" % msg)
1336 def _ErrorIf(self, cond, *args, **kwargs):
1337 """Log an error message if the passed condition is True.
1340 cond = bool(cond) or self.op.debug_simulate_errors
1342 self._Error(*args, **kwargs)
1343 # do not mark the operation as failed for WARN cases only
1344 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1345 self.bad = self.bad or cond
1347 def _VerifyNode(self, ninfo, nresult):
1348 """Perform some basic validation on data returned from a node.
1350 - check the result data structure is well formed and has all the
1352 - check ganeti version
1354 @type ninfo: L{objects.Node}
1355 @param ninfo: the node to check
1356 @param nresult: the results from the node
1358 @return: whether overall this call was successful (and we can expect
1359 reasonable values in the respose)
1363 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1365 # main result, nresult should be a non-empty dict
1366 test = not nresult or not isinstance(nresult, dict)
1367 _ErrorIf(test, self.ENODERPC, node,
1368 "unable to verify node: no data returned")
1372 # compares ganeti version
1373 local_version = constants.PROTOCOL_VERSION
1374 remote_version = nresult.get("version", None)
1375 test = not (remote_version and
1376 isinstance(remote_version, (list, tuple)) and
1377 len(remote_version) == 2)
1378 _ErrorIf(test, self.ENODERPC, node,
1379 "connection to node returned invalid data")
1383 test = local_version != remote_version[0]
1384 _ErrorIf(test, self.ENODEVERSION, node,
1385 "incompatible protocol versions: master %s,"
1386 " node %s", local_version, remote_version[0])
1390 # node seems compatible, we can actually try to look into its results
1392 # full package version
1393 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1394 self.ENODEVERSION, node,
1395 "software version mismatch: master %s, node %s",
1396 constants.RELEASE_VERSION, remote_version[1],
1397 code=self.ETYPE_WARNING)
1399 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1400 if ninfo.vm_capable and isinstance(hyp_result, dict):
1401 for hv_name, hv_result in hyp_result.iteritems():
1402 test = hv_result is not None
1403 _ErrorIf(test, self.ENODEHV, node,
1404 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1406 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1407 if ninfo.vm_capable and isinstance(hvp_result, list):
1408 for item, hv_name, hv_result in hvp_result:
1409 _ErrorIf(True, self.ENODEHV, node,
1410 "hypervisor %s parameter verify failure (source %s): %s",
1411 hv_name, item, hv_result)
1413 test = nresult.get(constants.NV_NODESETUP,
1414 ["Missing NODESETUP results"])
1415 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1420 def _VerifyNodeTime(self, ninfo, nresult,
1421 nvinfo_starttime, nvinfo_endtime):
1422 """Check the node time.
1424 @type ninfo: L{objects.Node}
1425 @param ninfo: the node to check
1426 @param nresult: the remote results for the node
1427 @param nvinfo_starttime: the start time of the RPC call
1428 @param nvinfo_endtime: the end time of the RPC call
1432 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1434 ntime = nresult.get(constants.NV_TIME, None)
1436 ntime_merged = utils.MergeTime(ntime)
1437 except (ValueError, TypeError):
1438 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1441 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1442 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1443 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1444 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1448 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1449 "Node time diverges by at least %s from master node time",
1452 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1453 """Check the node time.
1455 @type ninfo: L{objects.Node}
1456 @param ninfo: the node to check
1457 @param nresult: the remote results for the node
1458 @param vg_name: the configured VG name
1465 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1467 # checks vg existence and size > 20G
1468 vglist = nresult.get(constants.NV_VGLIST, None)
1470 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1472 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1473 constants.MIN_VG_SIZE)
1474 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1477 pvlist = nresult.get(constants.NV_PVLIST, None)
1478 test = pvlist is None
1479 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1481 # check that ':' is not present in PV names, since it's a
1482 # special character for lvcreate (denotes the range of PEs to
1484 for _, pvname, owner_vg in pvlist:
1485 test = ":" in pvname
1486 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1487 " '%s' of VG '%s'", pvname, owner_vg)
1489 def _VerifyNodeNetwork(self, ninfo, nresult):
1490 """Check the node time.
1492 @type ninfo: L{objects.Node}
1493 @param ninfo: the node to check
1494 @param nresult: the remote results for the node
1498 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1500 test = constants.NV_NODELIST not in nresult
1501 _ErrorIf(test, self.ENODESSH, node,
1502 "node hasn't returned node ssh connectivity data")
1504 if nresult[constants.NV_NODELIST]:
1505 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1506 _ErrorIf(True, self.ENODESSH, node,
1507 "ssh communication with node '%s': %s", a_node, a_msg)
1509 test = constants.NV_NODENETTEST not in nresult
1510 _ErrorIf(test, self.ENODENET, node,
1511 "node hasn't returned node tcp connectivity data")
1513 if nresult[constants.NV_NODENETTEST]:
1514 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1516 _ErrorIf(True, self.ENODENET, node,
1517 "tcp communication with node '%s': %s",
1518 anode, nresult[constants.NV_NODENETTEST][anode])
1520 test = constants.NV_MASTERIP not in nresult
1521 _ErrorIf(test, self.ENODENET, node,
1522 "node hasn't returned node master IP reachability data")
1524 if not nresult[constants.NV_MASTERIP]:
1525 if node == self.master_node:
1526 msg = "the master node cannot reach the master IP (not configured?)"
1528 msg = "cannot reach the master IP"
1529 _ErrorIf(True, self.ENODENET, node, msg)
1531 def _VerifyInstance(self, instance, instanceconfig, node_image,
1533 """Verify an instance.
1535 This function checks to see if the required block devices are
1536 available on the instance's node.
1539 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1540 node_current = instanceconfig.primary_node
1542 node_vol_should = {}
1543 instanceconfig.MapLVsByNode(node_vol_should)
1545 for node in node_vol_should:
1546 n_img = node_image[node]
1547 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1548 # ignore missing volumes on offline or broken nodes
1550 for volume in node_vol_should[node]:
1551 test = volume not in n_img.volumes
1552 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1553 "volume %s missing on node %s", volume, node)
1555 if instanceconfig.admin_up:
1556 pri_img = node_image[node_current]
1557 test = instance not in pri_img.instances and not pri_img.offline
1558 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1559 "instance not running on its primary node %s",
1562 for node, n_img in node_image.items():
1563 if node != node_current:
1564 test = instance in n_img.instances
1565 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1566 "instance should not run on node %s", node)
1568 diskdata = [(nname, success, status, idx)
1569 for (nname, disks) in diskstatus.items()
1570 for idx, (success, status) in enumerate(disks)]
1572 for nname, success, bdev_status, idx in diskdata:
1573 # the 'ghost node' construction in Exec() ensures that we have a
1575 snode = node_image[nname]
1576 bad_snode = snode.ghost or snode.offline
1577 _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1578 self.EINSTANCEFAULTYDISK, instance,
1579 "couldn't retrieve status for disk/%s on %s: %s",
1580 idx, nname, bdev_status)
1581 _ErrorIf((instanceconfig.admin_up and success and
1582 bdev_status.ldisk_status == constants.LDS_FAULTY),
1583 self.EINSTANCEFAULTYDISK, instance,
1584 "disk/%s on %s is faulty", idx, nname)
1586 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1587 """Verify if there are any unknown volumes in the cluster.
1589 The .os, .swap and backup volumes are ignored. All other volumes are
1590 reported as unknown.
1592 @type reserved: L{ganeti.utils.FieldSet}
1593 @param reserved: a FieldSet of reserved volume names
1596 for node, n_img in node_image.items():
1597 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1598 # skip non-healthy nodes
1600 for volume in n_img.volumes:
1601 test = ((node not in node_vol_should or
1602 volume not in node_vol_should[node]) and
1603 not reserved.Matches(volume))
1604 self._ErrorIf(test, self.ENODEORPHANLV, node,
1605 "volume %s is unknown", volume)
1607 def _VerifyOrphanInstances(self, instancelist, node_image):
1608 """Verify the list of running instances.
1610 This checks what instances are running but unknown to the cluster.
1613 for node, n_img in node_image.items():
1614 for o_inst in n_img.instances:
1615 test = o_inst not in instancelist
1616 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1617 "instance %s on node %s should not exist", o_inst, node)
1619 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1620 """Verify N+1 Memory Resilience.
1622 Check that if one single node dies we can still start all the
1623 instances it was primary for.
1626 cluster_info = self.cfg.GetClusterInfo()
1627 for node, n_img in node_image.items():
1628 # This code checks that every node which is now listed as
1629 # secondary has enough memory to host all instances it is
1630 # supposed to should a single other node in the cluster fail.
1631 # FIXME: not ready for failover to an arbitrary node
1632 # FIXME: does not support file-backed instances
1633 # WARNING: we currently take into account down instances as well
1634 # as up ones, considering that even if they're down someone
1635 # might want to start them even in the event of a node failure.
1637 # we're skipping offline nodes from the N+1 warning, since
1638 # most likely we don't have good memory infromation from them;
1639 # we already list instances living on such nodes, and that's
1642 for prinode, instances in n_img.sbp.items():
1644 for instance in instances:
1645 bep = cluster_info.FillBE(instance_cfg[instance])
1646 if bep[constants.BE_AUTO_BALANCE]:
1647 needed_mem += bep[constants.BE_MEMORY]
1648 test = n_img.mfree < needed_mem
1649 self._ErrorIf(test, self.ENODEN1, node,
1650 "not enough memory to accomodate instance failovers"
1651 " should node %s fail", prinode)
1653 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1655 """Verifies and computes the node required file checksums.
1657 @type ninfo: L{objects.Node}
1658 @param ninfo: the node to check
1659 @param nresult: the remote results for the node
1660 @param file_list: required list of files
1661 @param local_cksum: dictionary of local files and their checksums
1662 @param master_files: list of files that only masters should have
1666 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1668 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1669 test = not isinstance(remote_cksum, dict)
1670 _ErrorIf(test, self.ENODEFILECHECK, node,
1671 "node hasn't returned file checksum data")
1675 for file_name in file_list:
1676 node_is_mc = ninfo.master_candidate
1677 must_have = (file_name not in master_files) or node_is_mc
1679 test1 = file_name not in remote_cksum
1681 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1683 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1684 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1685 "file '%s' missing", file_name)
1686 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1687 "file '%s' has wrong checksum", file_name)
1688 # not candidate and this is not a must-have file
1689 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1690 "file '%s' should not exist on non master"
1691 " candidates (and the file is outdated)", file_name)
1692 # all good, except non-master/non-must have combination
1693 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1694 "file '%s' should not exist"
1695 " on non master candidates", file_name)
1697 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1699 """Verifies and the node DRBD status.
1701 @type ninfo: L{objects.Node}
1702 @param ninfo: the node to check
1703 @param nresult: the remote results for the node
1704 @param instanceinfo: the dict of instances
1705 @param drbd_helper: the configured DRBD usermode helper
1706 @param drbd_map: the DRBD map as returned by
1707 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1711 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1714 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1715 test = (helper_result == None)
1716 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1717 "no drbd usermode helper returned")
1719 status, payload = helper_result
1721 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1722 "drbd usermode helper check unsuccessful: %s", payload)
1723 test = status and (payload != drbd_helper)
1724 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1725 "wrong drbd usermode helper: %s", payload)
1727 # compute the DRBD minors
1729 for minor, instance in drbd_map[node].items():
1730 test = instance not in instanceinfo
1731 _ErrorIf(test, self.ECLUSTERCFG, None,
1732 "ghost instance '%s' in temporary DRBD map", instance)
1733 # ghost instance should not be running, but otherwise we
1734 # don't give double warnings (both ghost instance and
1735 # unallocated minor in use)
1737 node_drbd[minor] = (instance, False)
1739 instance = instanceinfo[instance]
1740 node_drbd[minor] = (instance.name, instance.admin_up)
1742 # and now check them
1743 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1744 test = not isinstance(used_minors, (tuple, list))
1745 _ErrorIf(test, self.ENODEDRBD, node,
1746 "cannot parse drbd status file: %s", str(used_minors))
1748 # we cannot check drbd status
1751 for minor, (iname, must_exist) in node_drbd.items():
1752 test = minor not in used_minors and must_exist
1753 _ErrorIf(test, self.ENODEDRBD, node,
1754 "drbd minor %d of instance %s is not active", minor, iname)
1755 for minor in used_minors:
1756 test = minor not in node_drbd
1757 _ErrorIf(test, self.ENODEDRBD, node,
1758 "unallocated drbd minor %d is in use", minor)
1760 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1761 """Builds the node OS structures.
1763 @type ninfo: L{objects.Node}
1764 @param ninfo: the node to check
1765 @param nresult: the remote results for the node
1766 @param nimg: the node image object
1770 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1772 remote_os = nresult.get(constants.NV_OSLIST, None)
1773 test = (not isinstance(remote_os, list) or
1774 not compat.all(isinstance(v, list) and len(v) == 7
1775 for v in remote_os))
1777 _ErrorIf(test, self.ENODEOS, node,
1778 "node hasn't returned valid OS data")
1787 for (name, os_path, status, diagnose,
1788 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1790 if name not in os_dict:
1793 # parameters is a list of lists instead of list of tuples due to
1794 # JSON lacking a real tuple type, fix it:
1795 parameters = [tuple(v) for v in parameters]
1796 os_dict[name].append((os_path, status, diagnose,
1797 set(variants), set(parameters), set(api_ver)))
1799 nimg.oslist = os_dict
1801 def _VerifyNodeOS(self, ninfo, nimg, base):
1802 """Verifies the node OS list.
1804 @type ninfo: L{objects.Node}
1805 @param ninfo: the node to check
1806 @param nimg: the node image object
1807 @param base: the 'template' node we match against (e.g. from the master)
1811 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1813 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1815 for os_name, os_data in nimg.oslist.items():
1816 assert os_data, "Empty OS status for OS %s?!" % os_name
1817 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1818 _ErrorIf(not f_status, self.ENODEOS, node,
1819 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1820 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1821 "OS '%s' has multiple entries (first one shadows the rest): %s",
1822 os_name, utils.CommaJoin([v[0] for v in os_data]))
1823 # this will catched in backend too
1824 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1825 and not f_var, self.ENODEOS, node,
1826 "OS %s with API at least %d does not declare any variant",
1827 os_name, constants.OS_API_V15)
1828 # comparisons with the 'base' image
1829 test = os_name not in base.oslist
1830 _ErrorIf(test, self.ENODEOS, node,
1831 "Extra OS %s not present on reference node (%s)",
1835 assert base.oslist[os_name], "Base node has empty OS status?"
1836 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1838 # base OS is invalid, skipping
1840 for kind, a, b in [("API version", f_api, b_api),
1841 ("variants list", f_var, b_var),
1842 ("parameters", f_param, b_param)]:
1843 _ErrorIf(a != b, self.ENODEOS, node,
1844 "OS %s %s differs from reference node %s: %s vs. %s",
1845 kind, os_name, base.name,
1846 utils.CommaJoin(a), utils.CommaJoin(b))
1848 # check any missing OSes
1849 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1850 _ErrorIf(missing, self.ENODEOS, node,
1851 "OSes present on reference node %s but missing on this node: %s",
1852 base.name, utils.CommaJoin(missing))
1854 def _VerifyOob(self, ninfo, nresult):
1855 """Verifies out of band functionality of a node.
1857 @type ninfo: L{objects.Node}
1858 @param ninfo: the node to check
1859 @param nresult: the remote results for the node
1863 # We just have to verify the paths on master and/or master candidates
1864 # as the oob helper is invoked on the master
1865 if ((ninfo.master_candidate or ninfo.master_capable) and
1866 constants.NV_OOB_PATHS in nresult):
1867 for path_result in nresult[constants.NV_OOB_PATHS]:
1868 self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1870 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1871 """Verifies and updates the node volume data.
1873 This function will update a L{NodeImage}'s internal structures
1874 with data from the remote call.
1876 @type ninfo: L{objects.Node}
1877 @param ninfo: the node to check
1878 @param nresult: the remote results for the node
1879 @param nimg: the node image object
1880 @param vg_name: the configured VG name
1884 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1886 nimg.lvm_fail = True
1887 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1890 elif isinstance(lvdata, basestring):
1891 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1892 utils.SafeEncode(lvdata))
1893 elif not isinstance(lvdata, dict):
1894 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1896 nimg.volumes = lvdata
1897 nimg.lvm_fail = False
1899 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1900 """Verifies and updates the node instance list.
1902 If the listing was successful, then updates this node's instance
1903 list. Otherwise, it marks the RPC call as failed for the instance
1906 @type ninfo: L{objects.Node}
1907 @param ninfo: the node to check
1908 @param nresult: the remote results for the node
1909 @param nimg: the node image object
1912 idata = nresult.get(constants.NV_INSTANCELIST, None)
1913 test = not isinstance(idata, list)
1914 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1915 " (instancelist): %s", utils.SafeEncode(str(idata)))
1917 nimg.hyp_fail = True
1919 nimg.instances = idata
1921 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1922 """Verifies and computes a node information map
1924 @type ninfo: L{objects.Node}
1925 @param ninfo: the node to check
1926 @param nresult: the remote results for the node
1927 @param nimg: the node image object
1928 @param vg_name: the configured VG name
1932 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1934 # try to read free memory (from the hypervisor)
1935 hv_info = nresult.get(constants.NV_HVINFO, None)
1936 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1937 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1940 nimg.mfree = int(hv_info["memory_free"])
1941 except (ValueError, TypeError):
1942 _ErrorIf(True, self.ENODERPC, node,
1943 "node returned invalid nodeinfo, check hypervisor")
1945 # FIXME: devise a free space model for file based instances as well
1946 if vg_name is not None:
1947 test = (constants.NV_VGLIST not in nresult or
1948 vg_name not in nresult[constants.NV_VGLIST])
1949 _ErrorIf(test, self.ENODELVM, node,
1950 "node didn't return data for the volume group '%s'"
1951 " - it is either missing or broken", vg_name)
1954 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1955 except (ValueError, TypeError):
1956 _ErrorIf(True, self.ENODERPC, node,
1957 "node returned invalid LVM info, check LVM status")
1959 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1960 """Gets per-disk status information for all instances.
1962 @type nodelist: list of strings
1963 @param nodelist: Node names
1964 @type node_image: dict of (name, L{objects.Node})
1965 @param node_image: Node objects
1966 @type instanceinfo: dict of (name, L{objects.Instance})
1967 @param instanceinfo: Instance objects
1968 @rtype: {instance: {node: [(succes, payload)]}}
1969 @return: a dictionary of per-instance dictionaries with nodes as
1970 keys and disk information as values; the disk information is a
1971 list of tuples (success, payload)
1974 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1977 node_disks_devonly = {}
1978 diskless_instances = set()
1979 diskless = constants.DT_DISKLESS
1981 for nname in nodelist:
1982 node_instances = list(itertools.chain(node_image[nname].pinst,
1983 node_image[nname].sinst))
1984 diskless_instances.update(inst for inst in node_instances
1985 if instanceinfo[inst].disk_template == diskless)
1986 disks = [(inst, disk)
1987 for inst in node_instances
1988 for disk in instanceinfo[inst].disks]
1991 # No need to collect data
1994 node_disks[nname] = disks
1996 # Creating copies as SetDiskID below will modify the objects and that can
1997 # lead to incorrect data returned from nodes
1998 devonly = [dev.Copy() for (_, dev) in disks]
2001 self.cfg.SetDiskID(dev, nname)
2003 node_disks_devonly[nname] = devonly
2005 assert len(node_disks) == len(node_disks_devonly)
2007 # Collect data from all nodes with disks
2008 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2011 assert len(result) == len(node_disks)
2015 for (nname, nres) in result.items():
2016 disks = node_disks[nname]
2019 # No data from this node
2020 data = len(disks) * [(False, "node offline")]
2023 _ErrorIf(msg, self.ENODERPC, nname,
2024 "while getting disk information: %s", msg)
2026 # No data from this node
2027 data = len(disks) * [(False, msg)]
2030 for idx, i in enumerate(nres.payload):
2031 if isinstance(i, (tuple, list)) and len(i) == 2:
2034 logging.warning("Invalid result from node %s, entry %d: %s",
2036 data.append((False, "Invalid result from the remote node"))
2038 for ((inst, _), status) in zip(disks, data):
2039 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2041 # Add empty entries for diskless instances.
2042 for inst in diskless_instances:
2043 assert inst not in instdisk
2046 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2047 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2048 compat.all(isinstance(s, (tuple, list)) and
2049 len(s) == 2 for s in statuses)
2050 for inst, nnames in instdisk.items()
2051 for nname, statuses in nnames.items())
2052 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2056 def _VerifyHVP(self, hvp_data):
2057 """Verifies locally the syntax of the hypervisor parameters.
2060 for item, hv_name, hv_params in hvp_data:
2061 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2064 hv_class = hypervisor.GetHypervisor(hv_name)
2065 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2066 hv_class.CheckParameterSyntax(hv_params)
2067 except errors.GenericError, err:
2068 self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2071 def BuildHooksEnv(self):
2074 Cluster-Verify hooks just ran in the post phase and their failure makes
2075 the output be logged in the verify output and the verification to fail.
2078 all_nodes = self.cfg.GetNodeList()
2080 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2082 for node in self.cfg.GetAllNodesInfo().values():
2083 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2085 return env, [], all_nodes
2087 def Exec(self, feedback_fn):
2088 """Verify integrity of cluster, performing various test on nodes.
2091 # This method has too many local variables. pylint: disable-msg=R0914
2093 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2094 verbose = self.op.verbose
2095 self._feedback_fn = feedback_fn
2096 feedback_fn("* Verifying global settings")
2097 for msg in self.cfg.VerifyConfig():
2098 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2100 # Check the cluster certificates
2101 for cert_filename in constants.ALL_CERT_FILES:
2102 (errcode, msg) = _VerifyCertificate(cert_filename)
2103 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2105 vg_name = self.cfg.GetVGName()
2106 drbd_helper = self.cfg.GetDRBDHelper()
2107 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2108 cluster = self.cfg.GetClusterInfo()
2109 nodelist = utils.NiceSort(self.cfg.GetNodeList())
2110 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2111 nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2112 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2113 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2114 for iname in instancelist)
2115 groupinfo = self.cfg.GetAllNodeGroupsInfo()
2116 i_non_redundant = [] # Non redundant instances
2117 i_non_a_balanced = [] # Non auto-balanced instances
2118 n_offline = 0 # Count of offline nodes
2119 n_drained = 0 # Count of nodes being drained
2120 node_vol_should = {}
2122 # FIXME: verify OS list
2123 # do local checksums
2124 master_files = [constants.CLUSTER_CONF_FILE]
2125 master_node = self.master_node = self.cfg.GetMasterNode()
2126 master_ip = self.cfg.GetMasterIP()
2128 file_names = ssconf.SimpleStore().GetFileList()
2129 file_names.extend(constants.ALL_CERT_FILES)
2130 file_names.extend(master_files)
2131 if cluster.modify_etc_hosts:
2132 file_names.append(constants.ETC_HOSTS)
2134 local_checksums = utils.FingerprintFiles(file_names)
2136 # Compute the set of hypervisor parameters
2138 for hv_name in hypervisors:
2139 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2140 for os_name, os_hvp in cluster.os_hvp.items():
2141 for hv_name, hv_params in os_hvp.items():
2144 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2145 hvp_data.append(("os %s" % os_name, hv_name, full_params))
2146 # TODO: collapse identical parameter values in a single one
2147 for instance in instanceinfo.values():
2148 if not instance.hvparams:
2150 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2151 cluster.FillHV(instance)))
2152 # and verify them locally
2153 self._VerifyHVP(hvp_data)
2155 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2156 node_verify_param = {
2157 constants.NV_FILELIST: file_names,
2158 constants.NV_NODELIST: [node.name for node in nodeinfo
2159 if not node.offline],
2160 constants.NV_HYPERVISOR: hypervisors,
2161 constants.NV_HVPARAMS: hvp_data,
2162 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2163 node.secondary_ip) for node in nodeinfo
2164 if not node.offline],
2165 constants.NV_INSTANCELIST: hypervisors,
2166 constants.NV_VERSION: None,
2167 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2168 constants.NV_NODESETUP: None,
2169 constants.NV_TIME: None,
2170 constants.NV_MASTERIP: (master_node, master_ip),
2171 constants.NV_OSLIST: None,
2172 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2175 if vg_name is not None:
2176 node_verify_param[constants.NV_VGLIST] = None
2177 node_verify_param[constants.NV_LVLIST] = vg_name
2178 node_verify_param[constants.NV_PVLIST] = [vg_name]
2179 node_verify_param[constants.NV_DRBDLIST] = None
2182 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2184 # Build our expected cluster state
2185 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2187 vm_capable=node.vm_capable))
2188 for node in nodeinfo)
2192 for node in nodeinfo:
2193 path = _SupportsOob(self.cfg, node)
2194 if path and path not in oob_paths:
2195 oob_paths.append(path)
2198 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2200 for instance in instancelist:
2201 inst_config = instanceinfo[instance]
2203 for nname in inst_config.all_nodes:
2204 if nname not in node_image:
2206 gnode = self.NodeImage(name=nname)
2208 node_image[nname] = gnode
2210 inst_config.MapLVsByNode(node_vol_should)
2212 pnode = inst_config.primary_node
2213 node_image[pnode].pinst.append(instance)
2215 for snode in inst_config.secondary_nodes:
2216 nimg = node_image[snode]
2217 nimg.sinst.append(instance)
2218 if pnode not in nimg.sbp:
2219 nimg.sbp[pnode] = []
2220 nimg.sbp[pnode].append(instance)
2222 # At this point, we have the in-memory data structures complete,
2223 # except for the runtime information, which we'll gather next
2225 # Due to the way our RPC system works, exact response times cannot be
2226 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2227 # time before and after executing the request, we can at least have a time
2229 nvinfo_starttime = time.time()
2230 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2231 self.cfg.GetClusterName())
2232 nvinfo_endtime = time.time()
2234 all_drbd_map = self.cfg.ComputeDRBDMap()
2236 feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2237 instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2239 feedback_fn("* Verifying node status")
2243 for node_i in nodeinfo:
2245 nimg = node_image[node]
2249 feedback_fn("* Skipping offline node %s" % (node,))
2253 if node == master_node:
2255 elif node_i.master_candidate:
2256 ntype = "master candidate"
2257 elif node_i.drained:
2263 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2265 msg = all_nvinfo[node].fail_msg
2266 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2268 nimg.rpc_fail = True
2271 nresult = all_nvinfo[node].payload
2273 nimg.call_ok = self._VerifyNode(node_i, nresult)
2274 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2275 self._VerifyNodeNetwork(node_i, nresult)
2276 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2279 self._VerifyOob(node_i, nresult)
2282 self._VerifyNodeLVM(node_i, nresult, vg_name)
2283 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2286 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2287 self._UpdateNodeInstances(node_i, nresult, nimg)
2288 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2289 self._UpdateNodeOS(node_i, nresult, nimg)
2290 if not nimg.os_fail:
2291 if refos_img is None:
2293 self._VerifyNodeOS(node_i, nimg, refos_img)
2295 feedback_fn("* Verifying instance status")
2296 for instance in instancelist:
2298 feedback_fn("* Verifying instance %s" % instance)
2299 inst_config = instanceinfo[instance]
2300 self._VerifyInstance(instance, inst_config, node_image,
2302 inst_nodes_offline = []
2304 pnode = inst_config.primary_node
2305 pnode_img = node_image[pnode]
2306 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2307 self.ENODERPC, pnode, "instance %s, connection to"
2308 " primary node failed", instance)
2310 _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2311 "instance lives on offline node %s", inst_config.primary_node)
2313 # If the instance is non-redundant we cannot survive losing its primary
2314 # node, so we are not N+1 compliant. On the other hand we have no disk
2315 # templates with more than one secondary so that situation is not well
2317 # FIXME: does not support file-backed instances
2318 if not inst_config.secondary_nodes:
2319 i_non_redundant.append(instance)
2321 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2322 instance, "instance has multiple secondary nodes: %s",
2323 utils.CommaJoin(inst_config.secondary_nodes),
2324 code=self.ETYPE_WARNING)
2326 if inst_config.disk_template in constants.DTS_INT_MIRROR:
2327 pnode = inst_config.primary_node
2328 instance_nodes = utils.NiceSort(inst_config.all_nodes)
2329 instance_groups = {}
2331 for node in instance_nodes:
2332 instance_groups.setdefault(nodeinfo_byname[node].group,
2336 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2337 # Sort so that we always list the primary node first.
2338 for group, nodes in sorted(instance_groups.items(),
2339 key=lambda (_, nodes): pnode in nodes,
2342 self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2343 instance, "instance has primary and secondary nodes in"
2344 " different groups: %s", utils.CommaJoin(pretty_list),
2345 code=self.ETYPE_WARNING)
2347 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2348 i_non_a_balanced.append(instance)
2350 for snode in inst_config.secondary_nodes:
2351 s_img = node_image[snode]
2352 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2353 "instance %s, connection to secondary node failed", instance)
2356 inst_nodes_offline.append(snode)
2358 # warn that the instance lives on offline nodes
2359 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2360 "instance has offline secondary node(s) %s",
2361 utils.CommaJoin(inst_nodes_offline))
2362 # ... or ghost/non-vm_capable nodes
2363 for node in inst_config.all_nodes:
2364 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2365 "instance lives on ghost node %s", node)
2366 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2367 instance, "instance lives on non-vm_capable node %s", node)
2369 feedback_fn("* Verifying orphan volumes")
2370 reserved = utils.FieldSet(*cluster.reserved_lvs)
2371 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2373 feedback_fn("* Verifying orphan instances")
2374 self._VerifyOrphanInstances(instancelist, node_image)
2376 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2377 feedback_fn("* Verifying N+1 Memory redundancy")
2378 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2380 feedback_fn("* Other Notes")
2382 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2383 % len(i_non_redundant))
2385 if i_non_a_balanced:
2386 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2387 % len(i_non_a_balanced))
2390 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2393 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2397 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2398 """Analyze the post-hooks' result
2400 This method analyses the hook result, handles it, and sends some
2401 nicely-formatted feedback back to the user.
2403 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2404 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2405 @param hooks_results: the results of the multi-node hooks rpc call
2406 @param feedback_fn: function used send feedback back to the caller
2407 @param lu_result: previous Exec result
2408 @return: the new Exec result, based on the previous result
2412 # We only really run POST phase hooks, and are only interested in
2414 if phase == constants.HOOKS_PHASE_POST:
2415 # Used to change hooks' output to proper indentation
2416 feedback_fn("* Hooks Results")
2417 assert hooks_results, "invalid result from hooks"
2419 for node_name in hooks_results:
2420 res = hooks_results[node_name]
2422 test = msg and not res.offline
2423 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2424 "Communication failure in hooks execution: %s", msg)
2425 if res.offline or msg:
2426 # No need to investigate payload if node is offline or gave an error.
2427 # override manually lu_result here as _ErrorIf only
2428 # overrides self.bad
2431 for script, hkr, output in res.payload:
2432 test = hkr == constants.HKR_FAIL
2433 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2434 "Script %s failed, output:", script)
2436 output = self._HOOKS_INDENT_RE.sub(' ', output)
2437 feedback_fn("%s" % output)
2443 class LUClusterVerifyDisks(NoHooksLU):
2444 """Verifies the cluster disks status.
2449 def ExpandNames(self):
2450 self.needed_locks = {
2451 locking.LEVEL_NODE: locking.ALL_SET,
2452 locking.LEVEL_INSTANCE: locking.ALL_SET,
2454 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2456 def Exec(self, feedback_fn):
2457 """Verify integrity of cluster disks.
2459 @rtype: tuple of three items
2460 @return: a tuple of (dict of node-to-node_error, list of instances
2461 which need activate-disks, dict of instance: (node, volume) for
2465 result = res_nodes, res_instances, res_missing = {}, [], {}
2467 nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2468 instances = self.cfg.GetAllInstancesInfo().values()
2471 for inst in instances:
2473 if not inst.admin_up:
2475 inst.MapLVsByNode(inst_lvs)
2476 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2477 for node, vol_list in inst_lvs.iteritems():
2478 for vol in vol_list:
2479 nv_dict[(node, vol)] = inst
2484 node_lvs = self.rpc.call_lv_list(nodes, [])
2485 for node, node_res in node_lvs.items():
2486 if node_res.offline:
2488 msg = node_res.fail_msg
2490 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2491 res_nodes[node] = msg
2494 lvs = node_res.payload
2495 for lv_name, (_, _, lv_online) in lvs.items():
2496 inst = nv_dict.pop((node, lv_name), None)
2497 if (not lv_online and inst is not None
2498 and inst.name not in res_instances):
2499 res_instances.append(inst.name)
2501 # any leftover items in nv_dict are missing LVs, let's arrange the
2503 for key, inst in nv_dict.iteritems():
2504 if inst.name not in res_missing:
2505 res_missing[inst.name] = []
2506 res_missing[inst.name].append(key)
2511 class LUClusterRepairDiskSizes(NoHooksLU):
2512 """Verifies the cluster disks sizes.
2517 def ExpandNames(self):
2518 if self.op.instances:
2519 self.wanted_names = []
2520 for name in self.op.instances:
2521 full_name = _ExpandInstanceName(self.cfg, name)
2522 self.wanted_names.append(full_name)
2523 self.needed_locks = {
2524 locking.LEVEL_NODE: [],
2525 locking.LEVEL_INSTANCE: self.wanted_names,
2527 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2529 self.wanted_names = None
2530 self.needed_locks = {
2531 locking.LEVEL_NODE: locking.ALL_SET,
2532 locking.LEVEL_INSTANCE: locking.ALL_SET,
2534 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2536 def DeclareLocks(self, level):
2537 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2538 self._LockInstancesNodes(primary_only=True)
2540 def CheckPrereq(self):
2541 """Check prerequisites.
2543 This only checks the optional instance list against the existing names.
2546 if self.wanted_names is None:
2547 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2549 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2550 in self.wanted_names]
2552 def _EnsureChildSizes(self, disk):
2553 """Ensure children of the disk have the needed disk size.
2555 This is valid mainly for DRBD8 and fixes an issue where the
2556 children have smaller disk size.
2558 @param disk: an L{ganeti.objects.Disk} object
2561 if disk.dev_type == constants.LD_DRBD8:
2562 assert disk.children, "Empty children for DRBD8?"
2563 fchild = disk.children[0]
2564 mismatch = fchild.size < disk.size
2566 self.LogInfo("Child disk has size %d, parent %d, fixing",
2567 fchild.size, disk.size)
2568 fchild.size = disk.size
2570 # and we recurse on this child only, not on the metadev
2571 return self._EnsureChildSizes(fchild) or mismatch
2575 def Exec(self, feedback_fn):
2576 """Verify the size of cluster disks.
2579 # TODO: check child disks too
2580 # TODO: check differences in size between primary/secondary nodes
2582 for instance in self.wanted_instances:
2583 pnode = instance.primary_node
2584 if pnode not in per_node_disks:
2585 per_node_disks[pnode] = []
2586 for idx, disk in enumerate(instance.disks):
2587 per_node_disks[pnode].append((instance, idx, disk))
2590 for node, dskl in per_node_disks.items():
2591 newl = [v[2].Copy() for v in dskl]
2593 self.cfg.SetDiskID(dsk, node)
2594 result = self.rpc.call_blockdev_getsize(node, newl)
2596 self.LogWarning("Failure in blockdev_getsize call to node"
2597 " %s, ignoring", node)
2599 if len(result.payload) != len(dskl):
2600 logging.warning("Invalid result from node %s: len(dksl)=%d,"
2601 " result.payload=%s", node, len(dskl), result.payload)
2602 self.LogWarning("Invalid result from node %s, ignoring node results",
2605 for ((instance, idx, disk), size) in zip(dskl, result.payload):
2607 self.LogWarning("Disk %d of instance %s did not return size"
2608 " information, ignoring", idx, instance.name)
2610 if not isinstance(size, (int, long)):
2611 self.LogWarning("Disk %d of instance %s did not return valid"
2612 " size information, ignoring", idx, instance.name)
2615 if size != disk.size:
2616 self.LogInfo("Disk %d of instance %s has mismatched size,"
2617 " correcting: recorded %d, actual %d", idx,
2618 instance.name, disk.size, size)
2620 self.cfg.Update(instance, feedback_fn)
2621 changed.append((instance.name, idx, size))
2622 if self._EnsureChildSizes(disk):
2623 self.cfg.Update(instance, feedback_fn)
2624 changed.append((instance.name, idx, disk.size))
2628 class LUClusterRename(LogicalUnit):
2629 """Rename the cluster.
2632 HPATH = "cluster-rename"
2633 HTYPE = constants.HTYPE_CLUSTER
2635 def BuildHooksEnv(self):
2640 "OP_TARGET": self.cfg.GetClusterName(),
2641 "NEW_NAME": self.op.name,
2643 mn = self.cfg.GetMasterNode()
2644 all_nodes = self.cfg.GetNodeList()
2645 return env, [mn], all_nodes
2647 def CheckPrereq(self):
2648 """Verify that the passed name is a valid one.
2651 hostname = netutils.GetHostname(name=self.op.name,
2652 family=self.cfg.GetPrimaryIPFamily())
2654 new_name = hostname.name
2655 self.ip = new_ip = hostname.ip
2656 old_name = self.cfg.GetClusterName()
2657 old_ip = self.cfg.GetMasterIP()
2658 if new_name == old_name and new_ip == old_ip:
2659 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2660 " cluster has changed",
2662 if new_ip != old_ip:
2663 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2664 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2665 " reachable on the network" %
2666 new_ip, errors.ECODE_NOTUNIQUE)
2668 self.op.name = new_name
2670 def Exec(self, feedback_fn):
2671 """Rename the cluster.
2674 clustername = self.op.name
2677 # shutdown the master IP
2678 master = self.cfg.GetMasterNode()
2679 result = self.rpc.call_node_stop_master(master, False)
2680 result.Raise("Could not disable the master role")
2683 cluster = self.cfg.GetClusterInfo()
2684 cluster.cluster_name = clustername
2685 cluster.master_ip = ip
2686 self.cfg.Update(cluster, feedback_fn)
2688 # update the known hosts file
2689 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2690 node_list = self.cfg.GetOnlineNodeList()
2692 node_list.remove(master)
2695 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2697 result = self.rpc.call_node_start_master(master, False, False)
2698 msg = result.fail_msg
2700 self.LogWarning("Could not re-enable the master role on"
2701 " the master, please restart manually: %s", msg)
2706 class LUClusterSetParams(LogicalUnit):
2707 """Change the parameters of the cluster.
2710 HPATH = "cluster-modify"
2711 HTYPE = constants.HTYPE_CLUSTER
2714 def CheckArguments(self):
2718 if self.op.uid_pool:
2719 uidpool.CheckUidPool(self.op.uid_pool)
2721 if self.op.add_uids:
2722 uidpool.CheckUidPool(self.op.add_uids)
2724 if self.op.remove_uids:
2725 uidpool.CheckUidPool(self.op.remove_uids)
2727 def ExpandNames(self):
2728 # FIXME: in the future maybe other cluster params won't require checking on
2729 # all nodes to be modified.
2730 self.needed_locks = {
2731 locking.LEVEL_NODE: locking.ALL_SET,
2733 self.share_locks[locking.LEVEL_NODE] = 1
2735 def BuildHooksEnv(self):
2740 "OP_TARGET": self.cfg.GetClusterName(),
2741 "NEW_VG_NAME": self.op.vg_name,
2743 mn = self.cfg.GetMasterNode()
2744 return env, [mn], [mn]
2746 def CheckPrereq(self):
2747 """Check prerequisites.
2749 This checks whether the given params don't conflict and
2750 if the given volume group is valid.
2753 if self.op.vg_name is not None and not self.op.vg_name:
2754 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2755 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2756 " instances exist", errors.ECODE_INVAL)
2758 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2759 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2760 raise errors.OpPrereqError("Cannot disable drbd helper while"
2761 " drbd-based instances exist",
2764 node_list = self.acquired_locks[locking.LEVEL_NODE]
2766 # if vg_name not None, checks given volume group on all nodes
2768 vglist = self.rpc.call_vg_list(node_list)
2769 for node in node_list:
2770 msg = vglist[node].fail_msg
2772 # ignoring down node
2773 self.LogWarning("Error while gathering data on node %s"
2774 " (ignoring node): %s", node, msg)
2776 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2778 constants.MIN_VG_SIZE)
2780 raise errors.OpPrereqError("Error on node '%s': %s" %
2781 (node, vgstatus), errors.ECODE_ENVIRON)
2783 if self.op.drbd_helper:
2784 # checks given drbd helper on all nodes
2785 helpers = self.rpc.call_drbd_helper(node_list)
2786 for node in node_list:
2787 ninfo = self.cfg.GetNodeInfo(node)
2789 self.LogInfo("Not checking drbd helper on offline node %s", node)
2791 msg = helpers[node].fail_msg
2793 raise errors.OpPrereqError("Error checking drbd helper on node"
2794 " '%s': %s" % (node, msg),
2795 errors.ECODE_ENVIRON)
2796 node_helper = helpers[node].payload
2797 if node_helper != self.op.drbd_helper:
2798 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2799 (node, node_helper), errors.ECODE_ENVIRON)
2801 self.cluster = cluster = self.cfg.GetClusterInfo()
2802 # validate params changes
2803 if self.op.beparams:
2804 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2805 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2807 if self.op.ndparams:
2808 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2809 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2811 if self.op.nicparams:
2812 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2813 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2814 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2817 # check all instances for consistency
2818 for instance in self.cfg.GetAllInstancesInfo().values():
2819 for nic_idx, nic in enumerate(instance.nics):
2820 params_copy = copy.deepcopy(nic.nicparams)
2821 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2823 # check parameter syntax
2825 objects.NIC.CheckParameterSyntax(params_filled)
2826 except errors.ConfigurationError, err:
2827 nic_errors.append("Instance %s, nic/%d: %s" %
2828 (instance.name, nic_idx, err))
2830 # if we're moving instances to routed, check that they have an ip
2831 target_mode = params_filled[constants.NIC_MODE]
2832 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2833 nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2834 (instance.name, nic_idx))
2836 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2837 "\n".join(nic_errors))
2839 # hypervisor list/parameters
2840 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2841 if self.op.hvparams:
2842 for hv_name, hv_dict in self.op.hvparams.items():
2843 if hv_name not in self.new_hvparams:
2844 self.new_hvparams[hv_name] = hv_dict
2846 self.new_hvparams[hv_name].update(hv_dict)
2848 # os hypervisor parameters
2849 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2851 for os_name, hvs in self.op.os_hvp.items():
2852 if os_name not in self.new_os_hvp:
2853 self.new_os_hvp[os_name] = hvs
2855 for hv_name, hv_dict in hvs.items():
2856 if hv_name not in self.new_os_hvp[os_name]:
2857 self.new_os_hvp[os_name][hv_name] = hv_dict
2859 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2862 self.new_osp = objects.FillDict(cluster.osparams, {})
2863 if self.op.osparams:
2864 for os_name, osp in self.op.osparams.items():
2865 if os_name not in self.new_osp:
2866 self.new_osp[os_name] = {}
2868 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2871 if not self.new_osp[os_name]:
2872 # we removed all parameters
2873 del self.new_osp[os_name]
2875 # check the parameter validity (remote check)
2876 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2877 os_name, self.new_osp[os_name])
2879 # changes to the hypervisor list
2880 if self.op.enabled_hypervisors is not None:
2881 self.hv_list = self.op.enabled_hypervisors
2882 for hv in self.hv_list:
2883 # if the hypervisor doesn't already exist in the cluster
2884 # hvparams, we initialize it to empty, and then (in both
2885 # cases) we make sure to fill the defaults, as we might not
2886 # have a complete defaults list if the hypervisor wasn't
2888 if hv not in new_hvp:
2890 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2891 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2893 self.hv_list = cluster.enabled_hypervisors
2895 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2896 # either the enabled list has changed, or the parameters have, validate
2897 for hv_name, hv_params in self.new_hvparams.items():
2898 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2899 (self.op.enabled_hypervisors and
2900 hv_name in self.op.enabled_hypervisors)):
2901 # either this is a new hypervisor, or its parameters have changed
2902 hv_class = hypervisor.GetHypervisor(hv_name)
2903 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2904 hv_class.CheckParameterSyntax(hv_params)
2905 _CheckHVParams(self, node_list, hv_name, hv_params)
2908 # no need to check any newly-enabled hypervisors, since the
2909 # defaults have already been checked in the above code-block
2910 for os_name, os_hvp in self.new_os_hvp.items():
2911 for hv_name, hv_params in os_hvp.items():
2912 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2913 # we need to fill in the new os_hvp on top of the actual hv_p
2914 cluster_defaults = self.new_hvparams.get(hv_name, {})
2915 new_osp = objects.FillDict(cluster_defaults, hv_params)
2916 hv_class = hypervisor.GetHypervisor(hv_name)
2917 hv_class.CheckParameterSyntax(new_osp)
2918 _CheckHVParams(self, node_list, hv_name, new_osp)
2920 if self.op.default_iallocator:
2921 alloc_script = utils.FindFile(self.op.default_iallocator,
2922 constants.IALLOCATOR_SEARCH_PATH,
2924 if alloc_script is None:
2925 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2926 " specified" % self.op.default_iallocator,
2929 def Exec(self, feedback_fn):
2930 """Change the parameters of the cluster.
2933 if self.op.vg_name is not None:
2934 new_volume = self.op.vg_name
2937 if new_volume != self.cfg.GetVGName():
2938 self.cfg.SetVGName(new_volume)
2940 feedback_fn("Cluster LVM configuration already in desired"
2941 " state, not changing")
2942 if self.op.drbd_helper is not None:
2943 new_helper = self.op.drbd_helper
2946 if new_helper != self.cfg.GetDRBDHelper():
2947 self.cfg.SetDRBDHelper(new_helper)
2949 feedback_fn("Cluster DRBD helper already in desired state,"
2951 if self.op.hvparams:
2952 self.cluster.hvparams = self.new_hvparams
2954 self.cluster.os_hvp = self.new_os_hvp
2955 if self.op.enabled_hypervisors is not None:
2956 self.cluster.hvparams = self.new_hvparams
2957 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2958 if self.op.beparams:
2959 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2960 if self.op.nicparams:
2961 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2962 if self.op.osparams:
2963 self.cluster.osparams = self.new_osp
2964 if self.op.ndparams:
2965 self.cluster.ndparams = self.new_ndparams
2967 if self.op.candidate_pool_size is not None:
2968 self.cluster.candidate_pool_size = self.op.candidate_pool_size
2969 # we need to update the pool size here, otherwise the save will fail
2970 _AdjustCandidatePool(self, [])
2972 if self.op.maintain_node_health is not None:
2973 self.cluster.maintain_node_health = self.op.maintain_node_health
2975 if self.op.prealloc_wipe_disks is not None:
2976 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2978 if self.op.add_uids is not None:
2979 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2981 if self.op.remove_uids is not None:
2982 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2984 if self.op.uid_pool is not None:
2985 self.cluster.uid_pool = self.op.uid_pool
2987 if self.op.default_iallocator is not None:
2988 self.cluster.default_iallocator = self.op.default_iallocator
2990 if self.op.reserved_lvs is not None:
2991 self.cluster.reserved_lvs = self.op.reserved_lvs
2993 def helper_os(aname, mods, desc):
2995 lst = getattr(self.cluster, aname)
2996 for key, val in mods:
2997 if key == constants.DDM_ADD:
2999 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3002 elif key == constants.DDM_REMOVE:
3006 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3008 raise errors.ProgrammerError("Invalid modification '%s'" % key)
3010 if self.op.hidden_os:
3011 helper_os("hidden_os", self.op.hidden_os, "hidden")
3013 if self.op.blacklisted_os:
3014 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3016 if self.op.master_netdev:
3017 master = self.cfg.GetMasterNode()
3018 feedback_fn("Shutting down master ip on the current netdev (%s)" %
3019 self.cluster.master_netdev)
3020 result = self.rpc.call_node_stop_master(master, False)
3021 result.Raise("Could not disable the master ip")
3022 feedback_fn("Changing master_netdev from %s to %s" %
3023 (self.cluster.master_netdev, self.op.master_netdev))
3024 self.cluster.master_netdev = self.op.master_netdev
3026 self.cfg.Update(self.cluster, feedback_fn)
3028 if self.op.master_netdev:
3029 feedback_fn("Starting the master ip on the new master netdev (%s)" %
3030 self.op.master_netdev)
3031 result = self.rpc.call_node_start_master(master, False, False)
3033 self.LogWarning("Could not re-enable the master ip on"
3034 " the master, please restart manually: %s",
3038 def _UploadHelper(lu, nodes, fname):
3039 """Helper for uploading a file and showing warnings.
3042 if os.path.exists(fname):
3043 result = lu.rpc.call_upload_file(nodes, fname)
3044 for to_node, to_result in result.items():
3045 msg = to_result.fail_msg
3047 msg = ("Copy of file %s to node %s failed: %s" %
3048 (fname, to_node, msg))
3049 lu.proc.LogWarning(msg)
3052 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3053 """Distribute additional files which are part of the cluster configuration.
3055 ConfigWriter takes care of distributing the config and ssconf files, but
3056 there are more files which should be distributed to all nodes. This function
3057 makes sure those are copied.
3059 @param lu: calling logical unit
3060 @param additional_nodes: list of nodes not in the config to distribute to
3061 @type additional_vm: boolean
3062 @param additional_vm: whether the additional nodes are vm-capable or not
3065 # 1. Gather target nodes
3066 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3067 dist_nodes = lu.cfg.GetOnlineNodeList()
3068 nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3069 vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3070 if additional_nodes is not None:
3071 dist_nodes.extend(additional_nodes)
3073 vm_nodes.extend(additional_nodes)
3074 if myself.name in dist_nodes:
3075 dist_nodes.remove(myself.name)
3076 if myself.name in vm_nodes:
3077 vm_nodes.remove(myself.name)
3079 # 2. Gather files to distribute
3080 dist_files = set([constants.ETC_HOSTS,
3081 constants.SSH_KNOWN_HOSTS_FILE,
3082 constants.RAPI_CERT_FILE,
3083 constants.RAPI_USERS_FILE,
3084 constants.CONFD_HMAC_KEY,
3085 constants.CLUSTER_DOMAIN_SECRET_FILE,
3089 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3090 for hv_name in enabled_hypervisors:
3091 hv_class = hypervisor.GetHypervisor(hv_name)
3092 vm_files.update(hv_class.GetAncillaryFiles())
3094 # 3. Perform the files upload
3095 for fname in dist_files:
3096 _UploadHelper(lu, dist_nodes, fname)
3097 for fname in vm_files:
3098 _UploadHelper(lu, vm_nodes, fname)
3101 class LUClusterRedistConf(NoHooksLU):
3102 """Force the redistribution of cluster configuration.
3104 This is a very simple LU.
3109 def ExpandNames(self):
3110 self.needed_locks = {
3111 locking.LEVEL_NODE: locking.ALL_SET,
3113 self.share_locks[locking.LEVEL_NODE] = 1
3115 def Exec(self, feedback_fn):
3116 """Redistribute the configuration.
3119 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3120 _RedistributeAncillaryFiles(self)
3123 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3124 """Sleep and poll for an instance's disk to sync.
3127 if not instance.disks or disks is not None and not disks:
3130 disks = _ExpandCheckDisks(instance, disks)
3133 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3135 node = instance.primary_node
3138 lu.cfg.SetDiskID(dev, node)
3140 # TODO: Convert to utils.Retry
3143 degr_retries = 10 # in seconds, as we sleep 1 second each time
3147 cumul_degraded = False
3148 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3149 msg = rstats.fail_msg
3151 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3154 raise errors.RemoteError("Can't contact node %s for mirror data,"
3155 " aborting." % node)
3158 rstats = rstats.payload
3160 for i, mstat in enumerate(rstats):
3162 lu.LogWarning("Can't compute data for node %s/%s",
3163 node, disks[i].iv_name)
3166 cumul_degraded = (cumul_degraded or
3167 (mstat.is_degraded and mstat.sync_percent is None))
3168 if mstat.sync_percent is not None:
3170 if mstat.estimated_time is not None:
3171 rem_time = ("%s remaining (estimated)" %
3172 utils.FormatSeconds(mstat.estimated_time))
3173 max_time = mstat.estimated_time
3175 rem_time = "no time estimate"
3176 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3177 (disks[i].iv_name, mstat.sync_percent, rem_time))
3179 # if we're done but degraded, let's do a few small retries, to
3180 # make sure we see a stable and not transient situation; therefore
3181 # we force restart of the loop
3182 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3183 logging.info("Degraded disks found, %d retries left", degr_retries)
3191 time.sleep(min(60, max_time))
3194 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3195 return not cumul_degraded
3198 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3199 """Check that mirrors are not degraded.
3201 The ldisk parameter, if True, will change the test from the
3202 is_degraded attribute (which represents overall non-ok status for
3203 the device(s)) to the ldisk (representing the local storage status).
3206 lu.cfg.SetDiskID(dev, node)
3210 if on_primary or dev.AssembleOnSecondary():
3211 rstats = lu.rpc.call_blockdev_find(node, dev)
3212 msg = rstats.fail_msg
3214 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3216 elif not rstats.payload:
3217 lu.LogWarning("Can't find disk on node %s", node)
3221 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3223 result = result and not rstats.payload.is_degraded
3226 for child in dev.children:
3227 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3232 class LUOobCommand(NoHooksLU):
3233 """Logical unit for OOB handling.
3237 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3239 def CheckPrereq(self):
3240 """Check prerequisites.
3243 - the node exists in the configuration
3246 Any errors are signaled by raising errors.OpPrereqError.
3250 self.master_node = self.cfg.GetMasterNode()
3252 assert self.op.power_delay >= 0.0
3254 if self.op.node_names:
3255 if self.op.command in self._SKIP_MASTER:
3256 if self.master_node in self.op.node_names:
3257 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3258 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3260 if master_oob_handler:
3261 additional_text = ("Run '%s %s %s' if you want to operate on the"
3262 " master regardless") % (master_oob_handler,
3266 additional_text = "The master node does not support out-of-band"
3268 raise errors.OpPrereqError(("Operating on the master node %s is not"
3269 " allowed for %s\n%s") %
3270 (self.master_node, self.op.command,
3271 additional_text), errors.ECODE_INVAL)
3273 self.op.node_names = self.cfg.GetNodeList()
3274 if self.op.command in self._SKIP_MASTER:
3275 self.op.node_names.remove(self.master_node)
3277 if self.op.command in self._SKIP_MASTER:
3278 assert self.master_node not in self.op.node_names
3280 for node_name in self.op.node_names:
3281 node = self.cfg.GetNodeInfo(node_name)
3284 raise errors.OpPrereqError("Node %s not found" % node_name,
3287 self.nodes.append(node)
3289 if (not self.op.ignore_status and
3290 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3291 raise errors.OpPrereqError(("Cannot power off node %s because it is"
3292 " not marked offline") % node_name,
3295 def ExpandNames(self):
3296 """Gather locks we need.
3299 if self.op.node_names:
3300 self.op.node_names = [_ExpandNodeName(self.cfg, name)
3301 for name in self.op.node_names]
3302 lock_names = self.op.node_names
3304 lock_names = locking.ALL_SET
3306 self.needed_locks = {
3307 locking.LEVEL_NODE: lock_names,
3310 def Exec(self, feedback_fn):
3311 """Execute OOB and return result if we expect any.
3314 master_node = self.master_node
3317 for idx, node in enumerate(self.nodes):
3318 node_entry = [(constants.RS_NORMAL, node.name)]
3319 ret.append(node_entry)
3321 oob_program = _SupportsOob(self.cfg, node)
3324 node_entry.append((constants.RS_UNAVAIL, None))
3327 logging.info("Executing out-of-band command '%s' using '%s' on %s",
3328 self.op.command, oob_program, node.name)
3329 result = self.rpc.call_run_oob(master_node, oob_program,
3330 self.op.command, node.name,
3334 self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3335 node.name, result.fail_msg)
3336 node_entry.append((constants.RS_NODATA, None))
3339 self._CheckPayload(result)
3340 except errors.OpExecError, err:
3341 self.LogWarning("The payload returned by '%s' is not valid: %s",
3343 node_entry.append((constants.RS_NODATA, None))
3345 if self.op.command == constants.OOB_HEALTH:
3346 # For health we should log important events
3347 for item, status in result.payload:
3348 if status in [constants.OOB_STATUS_WARNING,
3349 constants.OOB_STATUS_CRITICAL]:
3350 self.LogWarning("On node '%s' item '%s' has status '%s'",
3351 node.name, item, status)
3353 if self.op.command == constants.OOB_POWER_ON:
3355 elif self.op.command == constants.OOB_POWER_OFF:
3356 node.powered = False
3357 elif self.op.command == constants.OOB_POWER_STATUS:
3358 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3359 if powered != node.powered:
3360 logging.warning(("Recorded power state (%s) of node '%s' does not"
3361 " match actual power state (%s)"), node.powered,
3364 # For configuration changing commands we should update the node
3365 if self.op.command in (constants.OOB_POWER_ON,
3366 constants.OOB_POWER_OFF):
3367 self.cfg.Update(node, feedback_fn)
3369 node_entry.append((constants.RS_NORMAL, result.payload))
3371 if (self.op.command == constants.OOB_POWER_ON and
3372 idx < len(self.nodes) - 1):
3373 time.sleep(self.op.power_delay)
3377 def _CheckPayload(self, result):
3378 """Checks if the payload is valid.
3380 @param result: RPC result
3381 @raises errors.OpExecError: If payload is not valid
3385 if self.op.command == constants.OOB_HEALTH:
3386 if not isinstance(result.payload, list):
3387 errs.append("command 'health' is expected to return a list but got %s" %
3388 type(result.payload))
3390 for item, status in result.payload:
3391 if status not in constants.OOB_STATUSES:
3392 errs.append("health item '%s' has invalid status '%s'" %
3395 if self.op.command == constants.OOB_POWER_STATUS:
3396 if not isinstance(result.payload, dict):
3397 errs.append("power-status is expected to return a dict but got %s" %
3398 type(result.payload))
3400 if self.op.command in [
3401 constants.OOB_POWER_ON,
3402 constants.OOB_POWER_OFF,
3403 constants.OOB_POWER_CYCLE,
3405 if result.payload is not None:
3406 errs.append("%s is expected to not return payload but got '%s'" %
3407 (self.op.command, result.payload))
3410 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3411 utils.CommaJoin(errs))
3413 class _OsQuery(_QueryBase):
3414 FIELDS = query.OS_FIELDS
3416 def ExpandNames(self, lu):
3417 # Lock all nodes in shared mode
3418 # Temporary removal of locks, should be reverted later
3419 # TODO: reintroduce locks when they are lighter-weight
3420 lu.needed_locks = {}
3421 #self.share_locks[locking.LEVEL_NODE] = 1
3422 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3424 # The following variables interact with _QueryBase._GetNames
3426 self.wanted = self.names
3428 self.wanted = locking.ALL_SET
3430 self.do_locking = self.use_locking
3432 def DeclareLocks(self, lu, level):
3436 def _DiagnoseByOS(rlist):
3437 """Remaps a per-node return list into an a per-os per-node dictionary
3439 @param rlist: a map with node names as keys and OS objects as values
3442 @return: a dictionary with osnames as keys and as value another
3443 map, with nodes as keys and tuples of (path, status, diagnose,
3444 variants, parameters, api_versions) as values, eg::
3446 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3447 (/srv/..., False, "invalid api")],
3448 "node2": [(/srv/..., True, "", [], [])]}
3453 # we build here the list of nodes that didn't fail the RPC (at RPC
3454 # level), so that nodes with a non-responding node daemon don't
3455 # make all OSes invalid
3456 good_nodes = [node_name for node_name in rlist
3457 if not rlist[node_name].fail_msg]
3458 for node_name, nr in rlist.items():
3459 if nr.fail_msg or not nr.payload:
3461 for (name, path, status, diagnose, variants,
3462 params, api_versions) in nr.payload:
3463 if name not in all_os:
3464 # build a list of nodes for this os containing empty lists
3465 # for each node in node_list
3467 for nname in good_nodes:
3468 all_os[name][nname] = []
3469 # convert params from [name, help] to (name, help)
3470 params = [tuple(v) for v in params]
3471 all_os[name][node_name].append((path, status, diagnose,
3472 variants, params, api_versions))
3475 def _GetQueryData(self, lu):
3476 """Computes the list of nodes and their attributes.
3479 # Locking is not used
3480 assert not (lu.acquired_locks or self.do_locking or self.use_locking)
3483 assert "valid" in self.FIELDS
3484 assert "hidden" in self.FIELDS
3485 assert "blacklisted" in self.FIELDS
3487 valid_nodes = [node.name
3488 for node in lu.cfg.GetAllNodesInfo().values()
3489 if not node.offline and node.vm_capable]
3490 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
3491 cluster = lu.cfg.GetClusterInfo()
3493 # Build list of used field names
3494 fields = [fdef.name for fdef in self.query.GetFields()]
3498 for (os_name, os_data) in pol.items():
3499 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
3500 hidden=(os_name in cluster.hidden_os),
3501 blacklisted=(os_name in cluster.blacklisted_os))
3505 api_versions = set()
3507 for idx, osl in enumerate(os_data.values()):
3508 info.valid = bool(info.valid and osl and osl[0][1])
3512 (node_variants, node_params, node_api) = osl[0][3:6]
3515 variants.update(node_variants)
3516 parameters.update(node_params)
3517 api_versions.update(node_api)
3519 # Filter out inconsistent values
3520 variants.intersection_update(node_variants)
3521 parameters.intersection_update(node_params)
3522 api_versions.intersection_update(node_api)
3524 info.variants = list(variants)
3525 info.parameters = list(parameters)
3526 info.api_versions = list(api_versions)
3528 # TODO: Move this to filters provided by the client
3529 if (("hidden" not in fields and info.hidden) or
3530 ("blacklisted" not in fields and info.blacklisted) or
3531 ("valid" not in fields and not info.valid)):
3534 data[os_name] = info
3536 # Prepare data in requested order
3537 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
3541 class LUOsDiagnose(NoHooksLU):
3542 """Logical unit for OS diagnose/query.
3547 def CheckArguments(self):
3548 self.oq = _OsQuery(qlang.MakeSimpleFilter("name", self.op.names),
3549 self.op.output_fields, False)
3551 def ExpandNames(self):
3552 self.oq.ExpandNames(self)
3554 def Exec(self, feedback_fn):
3555 return self.oq.OldStyleQuery(self)
3558 class LUNodeRemove(LogicalUnit):
3559 """Logical unit for removing a node.
3562 HPATH = "node-remove"
3563 HTYPE = constants.HTYPE_NODE
3565 def BuildHooksEnv(self):
3568 This doesn't run on the target node in the pre phase as a failed
3569 node would then be impossible to remove.
3573 "OP_TARGET": self.op.node_name,
3574 "NODE_NAME": self.op.node_name,
3576 all_nodes = self.cfg.GetNodeList()
3578 all_nodes.remove(self.op.node_name)
3580 logging.warning("Node %s which is about to be removed not found"
3581 " in the all nodes list", self.op.node_name)
3582 return env, all_nodes, all_nodes
3584 def CheckPrereq(self):
3585 """Check prerequisites.
3588 - the node exists in the configuration
3589 - it does not have primary or secondary instances
3590 - it's not the master
3592 Any errors are signaled by raising errors.OpPrereqError.
3595 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3596 node = self.cfg.GetNodeInfo(self.op.node_name)
3597 assert node is not None
3599 instance_list = self.cfg.GetInstanceList()
3601 masternode = self.cfg.GetMasterNode()
3602 if node.name == masternode:
3603 raise errors.OpPrereqError("Node is the master node,"
3604 " you need to failover first.",
3607 for instance_name in instance_list:
3608 instance = self.cfg.GetInstanceInfo(instance_name)
3609 if node.name in instance.all_nodes:
3610 raise errors.OpPrereqError("Instance %s is still running on the node,"
3611 " please remove first." % instance_name,
3613 self.op.node_name = node.name
3616 def Exec(self, feedback_fn):
3617 """Removes the node from the cluster.
3621 logging.info("Stopping the node daemon and removing configs from node %s",
3624 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3626 # Promote nodes to master candidate as needed
3627 _AdjustCandidatePool(self, exceptions=[node.name])
3628 self.context.RemoveNode(node.name)
3630 # Run post hooks on the node before it's removed
3631 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3633 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3635 # pylint: disable-msg=W0702
3636 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3638 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3639 msg = result.fail_msg
3641 self.LogWarning("Errors encountered on the remote node while leaving"
3642 " the cluster: %s", msg)
3644 # Remove node from our /etc/hosts
3645 if self.cfg.GetClusterInfo().modify_etc_hosts:
3646 master_node = self.cfg.GetMasterNode()
3647 result = self.rpc.call_etc_hosts_modify(master_node,
3648 constants.ETC_HOSTS_REMOVE,
3650 result.Raise("Can't update hosts file with new host data")
3651 _RedistributeAncillaryFiles(self)
3654 class _NodeQuery(_QueryBase):
3655 FIELDS = query.NODE_FIELDS
3657 def ExpandNames(self, lu):
3658 lu.needed_locks = {}
3659 lu.share_locks[locking.LEVEL_NODE] = 1
3662 self.wanted = _GetWantedNodes(lu, self.names)
3664 self.wanted = locking.ALL_SET
3666 self.do_locking = (self.use_locking and
3667 query.NQ_LIVE in self.requested_data)
3670 # if we don't request only static fields, we need to lock the nodes
3671 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3673 def DeclareLocks(self, lu, level):
3676 def _GetQueryData(self, lu):
3677 """Computes the list of nodes and their attributes.
3680 all_info = lu.cfg.GetAllNodesInfo()
3682 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3684 # Gather data as requested
3685 if query.NQ_LIVE in self.requested_data:
3686 # filter out non-vm_capable nodes
3687 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3689 node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3690 lu.cfg.GetHypervisorType())
3691 live_data = dict((name, nresult.payload)
3692 for (name, nresult) in node_data.items()
3693 if not nresult.fail_msg and nresult.payload)
3697 if query.NQ_INST in self.requested_data:
3698 node_to_primary = dict([(name, set()) for name in nodenames])
3699 node_to_secondary = dict([(name, set()) for name in nodenames])
3701 inst_data = lu.cfg.GetAllInstancesInfo()
3703 for inst in inst_data.values():
3704 if inst.primary_node in node_to_primary:
3705 node_to_primary[inst.primary_node].add(inst.name)
3706 for secnode in inst.secondary_nodes:
3707 if secnode in node_to_secondary:
3708 node_to_secondary[secnode].add(inst.name)
3710 node_to_primary = None
3711 node_to_secondary = None
3713 if query.NQ_OOB in self.requested_data:
3714 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3715 for name, node in all_info.iteritems())
3719 if query.NQ_GROUP in self.requested_data:
3720 groups = lu.cfg.GetAllNodeGroupsInfo()
3724 return query.NodeQueryData([all_info[name] for name in nodenames],
3725 live_data, lu.cfg.GetMasterNode(),
3726 node_to_primary, node_to_secondary, groups,
3727 oob_support, lu.cfg.GetClusterInfo())
3730 class LUNodeQuery(NoHooksLU):
3731 """Logical unit for querying nodes.
3734 # pylint: disable-msg=W0142
3737 def CheckArguments(self):
3738 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
3739 self.op.output_fields, self.op.use_locking)
3741 def ExpandNames(self):
3742 self.nq.ExpandNames(self)
3744 def Exec(self, feedback_fn):
3745 return self.nq.OldStyleQuery(self)
3748 class LUNodeQueryvols(NoHooksLU):
3749 """Logical unit for getting volumes on node(s).
3753 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3754 _FIELDS_STATIC = utils.FieldSet("node")
3756 def CheckArguments(self):
3757 _CheckOutputFields(static=self._FIELDS_STATIC,
3758 dynamic=self._FIELDS_DYNAMIC,
3759 selected=self.op.output_fields)
3761 def ExpandNames(self):
3762 self.needed_locks = {}
3763 self.share_locks[locking.LEVEL_NODE] = 1
3764 if not self.op.nodes:
3765 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3767 self.needed_locks[locking.LEVEL_NODE] = \
3768 _GetWantedNodes(self, self.op.nodes)
3770 def Exec(self, feedback_fn):
3771 """Computes the list of nodes and their attributes.
3774 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3775 volumes = self.rpc.call_node_volumes(nodenames)
3777 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3778 in self.cfg.GetInstanceList()]
3780 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3783 for node in nodenames:
3784 nresult = volumes[node]
3787 msg = nresult.fail_msg
3789 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3792 node_vols = nresult.payload[:]
3793 node_vols.sort(key=lambda vol: vol['dev'])
3795 for vol in node_vols:
3797 for field in self.op.output_fields:
3800 elif field == "phys":
3804 elif field == "name":
3806 elif field == "size":
3807 val = int(float(vol['size']))
3808 elif field == "instance":
3810 if node not in lv_by_node[inst]:
3812 if vol['name'] in lv_by_node[inst][node]:
3818 raise errors.ParameterError(field)
3819 node_output.append(str(val))
3821 output.append(node_output)
3826 class LUNodeQueryStorage(NoHooksLU):
3827 """Logical unit for getting information on storage units on node(s).
3830 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3833 def CheckArguments(self):
3834 _CheckOutputFields(static=self._FIELDS_STATIC,
3835 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3836 selected=self.op.output_fields)
3838 def ExpandNames(self):
3839 self.needed_locks = {}
3840 self.share_locks[locking.LEVEL_NODE] = 1
3843 self.needed_locks[locking.LEVEL_NODE] = \
3844 _GetWantedNodes(self, self.op.nodes)
3846 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3848 def Exec(self, feedback_fn):
3849 """Computes the list of nodes and their attributes.
3852 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3854 # Always get name to sort by
3855 if constants.SF_NAME in self.op.output_fields:
3856 fields = self.op.output_fields[:]
3858 fields = [constants.SF_NAME] + self.op.output_fields
3860 # Never ask for node or type as it's only known to the LU
3861 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3862 while extra in fields:
3863 fields.remove(extra)
3865 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3866 name_idx = field_idx[constants.SF_NAME]
3868 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3869 data = self.rpc.call_storage_list(self.nodes,
3870 self.op.storage_type, st_args,
3871 self.op.name, fields)
3875 for node in utils.NiceSort(self.nodes):
3876 nresult = data[node]
3880 msg = nresult.fail_msg
3882 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3885 rows = dict([(row[name_idx], row) for row in nresult.payload])
3887 for name in utils.NiceSort(rows.keys()):
3892 for field in self.op.output_fields:
3893 if field == constants.SF_NODE:
3895 elif field == constants.SF_TYPE:
3896 val = self.op.storage_type
3897 elif field in field_idx:
3898 val = row[field_idx[field]]
3900 raise errors.ParameterError(field)
3909 class _InstanceQuery(_QueryBase):
3910 FIELDS = query.INSTANCE_FIELDS
3912 def ExpandNames(self, lu):
3913 lu.needed_locks = {}
3914 lu.share_locks[locking.LEVEL_INSTANCE] = 1
3915 lu.share_locks[locking.LEVEL_NODE] = 1
3918 self.wanted = _GetWantedInstances(lu, self.names)
3920 self.wanted = locking.ALL_SET
3922 self.do_locking = (self.use_locking and
3923 query.IQ_LIVE in self.requested_data)
3925 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3926 lu.needed_locks[locking.LEVEL_NODE] = []
3927 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3929 def DeclareLocks(self, lu, level):
3930 if level == locking.LEVEL_NODE and self.do_locking:
3931 lu._LockInstancesNodes() # pylint: disable-msg=W0212
3933 def _GetQueryData(self, lu):
3934 """Computes the list of instances and their attributes.
3937 cluster = lu.cfg.GetClusterInfo()
3938 all_info = lu.cfg.GetAllInstancesInfo()
3940 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3942 instance_list = [all_info[name] for name in instance_names]
3943 nodes = frozenset(itertools.chain(*(inst.all_nodes
3944 for inst in instance_list)))
3945 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3948 wrongnode_inst = set()
3950 # Gather data as requested
3951 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3953 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3955 result = node_data[name]
3957 # offline nodes will be in both lists
3958 assert result.fail_msg
3959 offline_nodes.append(name)
3961 bad_nodes.append(name)
3962 elif result.payload:
3963 for inst in result.payload:
3964 if all_info[inst].primary_node == name:
3965 live_data.update(result.payload)
3967 wrongnode_inst.add(inst)
3968 # else no instance is alive
3972 if query.IQ_DISKUSAGE in self.requested_data:
3973 disk_usage = dict((inst.name,
3974 _ComputeDiskSize(inst.disk_template,
3975 [{"size": disk.size}
3976 for disk in inst.disks]))
3977 for inst in instance_list)
3981 if query.IQ_CONSOLE in self.requested_data:
3983 for inst in instance_list:
3984 if inst.name in live_data:
3985 # Instance is running
3986 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3988 consinfo[inst.name] = None
3989 assert set(consinfo.keys()) == set(instance_names)
3993 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3994 disk_usage, offline_nodes, bad_nodes,
3995 live_data, wrongnode_inst, consinfo)
3998 class LUQuery(NoHooksLU):
3999 """Query for resources/items of a certain kind.
4002 # pylint: disable-msg=W0142
4005 def CheckArguments(self):
4006 qcls = _GetQueryImplementation(self.op.what)
4008 self.impl = qcls(self.op.filter, self.op.fields, False)
4010 def ExpandNames(self):
4011 self.impl.ExpandNames(self)
4013 def DeclareLocks(self, level):
4014 self.impl.DeclareLocks(self, level)
4016 def Exec(self, feedback_fn):
4017 return self.impl.NewStyleQuery(self)
4020 class LUQueryFields(NoHooksLU):
4021 """Query for resources/items of a certain kind.
4024 # pylint: disable-msg=W0142
4027 def CheckArguments(self):
4028 self.qcls = _GetQueryImplementation(self.op.what)
4030 def ExpandNames(self):
4031 self.needed_locks = {}
4033 def Exec(self, feedback_fn):
4034 return self.qcls.FieldsQuery(self.op.fields)
4037 class LUNodeModifyStorage(NoHooksLU):
4038 """Logical unit for modifying a storage volume on a node.
4043 def CheckArguments(self):
4044 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4046 storage_type = self.op.storage_type
4049 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4051 raise errors.OpPrereqError("Storage units of type '%s' can not be"
4052 " modified" % storage_type,
4055 diff = set(self.op.changes.keys()) - modifiable
4057 raise errors.OpPrereqError("The following fields can not be modified for"
4058 " storage units of type '%s': %r" %
4059 (storage_type, list(diff)),
4062 def ExpandNames(self):
4063 self.needed_locks = {
4064 locking.LEVEL_NODE: self.op.node_name,
4067 def Exec(self, feedback_fn):
4068 """Computes the list of nodes and their attributes.
4071 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4072 result = self.rpc.call_storage_modify(self.op.node_name,
4073 self.op.storage_type, st_args,
4074 self.op.name, self.op.changes)
4075 result.Raise("Failed to modify storage unit '%s' on %s" %
4076 (self.op.name, self.op.node_name))
4079 class LUNodeAdd(LogicalUnit):
4080 """Logical unit for adding node to the cluster.
4084 HTYPE = constants.HTYPE_NODE
4085 _NFLAGS = ["master_capable", "vm_capable"]
4087 def CheckArguments(self):
4088 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4089 # validate/normalize the node name
4090 self.hostname = netutils.GetHostname(name=self.op.node_name,
4091 family=self.primary_ip_family)
4092 self.op.node_name = self.hostname.name
4093 if self.op.readd and self.op.group:
4094 raise errors.OpPrereqError("Cannot pass a node group when a node is"
4095 " being readded", errors.ECODE_INVAL)
4097 def BuildHooksEnv(self):
4100 This will run on all nodes before, and on all nodes + the new node after.
4104 "OP_TARGET": self.op.node_name,
4105 "NODE_NAME": self.op.node_name,
4106 "NODE_PIP": self.op.primary_ip,
4107 "NODE_SIP": self.op.secondary_ip,
4108 "MASTER_CAPABLE": str(self.op.master_capable),
4109 "VM_CAPABLE": str(self.op.vm_capable),
4111 nodes_0 = self.cfg.GetNodeList()
4112 nodes_1 = nodes_0 + [self.op.node_name, ]
4113 return env, nodes_0, nodes_1
4115 def CheckPrereq(self):
4116 """Check prerequisites.
4119 - the new node is not already in the config
4121 - its parameters (single/dual homed) matches the cluster
4123 Any errors are signaled by raising errors.OpPrereqError.
4127 hostname = self.hostname
4128 node = hostname.name
4129 primary_ip = self.op.primary_ip = hostname.ip
4130 if self.op.secondary_ip is None:
4131 if self.primary_ip_family == netutils.IP6Address.family:
4132 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4133 " IPv4 address must be given as secondary",
4135 self.op.secondary_ip = primary_ip
4137 secondary_ip = self.op.secondary_ip
4138 if not netutils.IP4Address.IsValid(secondary_ip):
4139 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4140 " address" % secondary_ip, errors.ECODE_INVAL)
4142 node_list = cfg.GetNodeList()
4143 if not self.op.readd and node in node_list:
4144 raise errors.OpPrereqError("Node %s is already in the configuration" %
4145 node, errors.ECODE_EXISTS)
4146 elif self.op.readd and node not in node_list:
4147 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4150 self.changed_primary_ip = False
4152 for existing_node_name in node_list:
4153 existing_node = cfg.GetNodeInfo(existing_node_name)
4155 if self.op.readd and node == existing_node_name:
4156 if existing_node.secondary_ip != secondary_ip:
4157 raise errors.OpPrereqError("Readded node doesn't have the same IP"
4158 " address configuration as before",
4160 if existing_node.primary_ip != primary_ip:
4161 self.changed_primary_ip = True
4165 if (existing_node.primary_ip == primary_ip or
4166 existing_node.secondary_ip == primary_ip or
4167 existing_node.primary_ip == secondary_ip or
4168 existing_node.secondary_ip == secondary_ip):
4169 raise errors.OpPrereqError("New node ip address(es) conflict with"
4170 " existing node %s" % existing_node.name,
4171 errors.ECODE_NOTUNIQUE)
4173 # After this 'if' block, None is no longer a valid value for the
4174 # _capable op attributes
4176 old_node = self.cfg.GetNodeInfo(node)
4177 assert old_node is not None, "Can't retrieve locked node %s" % node
4178 for attr in self._NFLAGS:
4179 if getattr(self.op, attr) is None:
4180 setattr(self.op, attr, getattr(old_node, attr))
4182 for attr in self._NFLAGS:
4183 if getattr(self.op, attr) is None:
4184 setattr(self.op, attr, True)
4186 if self.op.readd and not self.op.vm_capable:
4187 pri, sec = cfg.GetNodeInstances(node)
4189 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4190 " flag set to false, but it already holds"
4191 " instances" % node,
4194 # check that the type of the node (single versus dual homed) is the
4195 # same as for the master
4196 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4197 master_singlehomed = myself.secondary_ip == myself.primary_ip
4198 newbie_singlehomed = secondary_ip == primary_ip
4199 if master_singlehomed != newbie_singlehomed:
4200 if master_singlehomed:
4201 raise errors.OpPrereqError("The master has no secondary ip but the"
4202 " new node has one",
4205 raise errors.OpPrereqError("The master has a secondary ip but the"
4206 " new node doesn't have one",
4209 # checks reachability
4210 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4211 raise errors.OpPrereqError("Node not reachable by ping",
4212 errors.ECODE_ENVIRON)
4214 if not newbie_singlehomed:
4215 # check reachability from my secondary ip to newbie's secondary ip
4216 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4217 source=myself.secondary_ip):
4218 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4219 " based ping to node daemon port",
4220 errors.ECODE_ENVIRON)
4227 if self.op.master_capable:
4228 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4230 self.master_candidate = False
4233 self.new_node = old_node
4235 node_group = cfg.LookupNodeGroup(self.op.group)
4236 self.new_node = objects.Node(name=node,
4237 primary_ip=primary_ip,
4238 secondary_ip=secondary_ip,
4239 master_candidate=self.master_candidate,
4240 offline=False, drained=False,
4243 if self.op.ndparams:
4244 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4246 def Exec(self, feedback_fn):
4247 """Adds the new node to the cluster.
4250 new_node = self.new_node
4251 node = new_node.name
4253 # We adding a new node so we assume it's powered
4254 new_node.powered = True
4256 # for re-adds, reset the offline/drained/master-candidate flags;
4257 # we need to reset here, otherwise offline would prevent RPC calls
4258 # later in the procedure; this also means that if the re-add
4259 # fails, we are left with a non-offlined, broken node
4261 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4262 self.LogInfo("Readding a node, the offline/drained flags were reset")
4263 # if we demote the node, we do cleanup later in the procedure
4264 new_node.master_candidate = self.master_candidate
4265 if self.changed_primary_ip:
4266 new_node.primary_ip = self.op.primary_ip
4268 # copy the master/vm_capable flags
4269 for attr in self._NFLAGS:
4270 setattr(new_node, attr, getattr(self.op, attr))
4272 # notify the user about any possible mc promotion
4273 if new_node.master_candidate:
4274 self.LogInfo("Node will be a master candidate")
4276 if self.op.ndparams:
4277 new_node.ndparams = self.op.ndparams
4279 new_node.ndparams = {}
4281 # check connectivity
4282 result = self.rpc.call_version([node])[node]
4283 result.Raise("Can't get version information from node %s" % node)
4284 if constants.PROTOCOL_VERSION == result.payload:
4285 logging.info("Communication to node %s fine, sw version %s match",
4286 node, result.payload)
4288 raise errors.OpExecError("Version mismatch master version %s,"
4289 " node version %s" %
4290 (constants.PROTOCOL_VERSION, result.payload))
4292 # Add node to our /etc/hosts, and add key to known_hosts
4293 if self.cfg.GetClusterInfo().modify_etc_hosts:
4294 master_node = self.cfg.GetMasterNode()
4295 result = self.rpc.call_etc_hosts_modify(master_node,
4296 constants.ETC_HOSTS_ADD,
4299 result.Raise("Can't update hosts file with new host data")
4301 if new_node.secondary_ip != new_node.primary_ip:
4302 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4305 node_verify_list = [self.cfg.GetMasterNode()]
4306 node_verify_param = {
4307 constants.NV_NODELIST: [node],
4308 # TODO: do a node-net-test as well?
4311 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4312 self.cfg.GetClusterName())
4313 for verifier in node_verify_list:
4314 result[verifier].Raise("Cannot communicate with node %s" % verifier)
4315 nl_payload = result[verifier].payload[constants.NV_NODELIST]
4317 for failed in nl_payload:
4318 feedback_fn("ssh/hostname verification failed"
4319 " (checking from %s): %s" %
4320 (verifier, nl_payload[failed]))
4321 raise errors.OpExecError("ssh/hostname verification failed.")
4324 _RedistributeAncillaryFiles(self)
4325 self.context.ReaddNode(new_node)
4326 # make sure we redistribute the config
4327 self.cfg.Update(new_node, feedback_fn)
4328 # and make sure the new node will not have old files around
4329 if not new_node.master_candidate:
4330 result = self.rpc.call_node_demote_from_mc(new_node.name)
4331 msg = result.fail_msg
4333 self.LogWarning("Node failed to demote itself from master"
4334 " candidate status: %s" % msg)
4336 _RedistributeAncillaryFiles(self, additional_nodes=[node],
4337 additional_vm=self.op.vm_capable)
4338 self.context.AddNode(new_node, self.proc.GetECId())
4341 class LUNodeSetParams(LogicalUnit):
4342 """Modifies the parameters of a node.
4344 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4345 to the node role (as _ROLE_*)
4346 @cvar _R2F: a dictionary from node role to tuples of flags
4347 @cvar _FLAGS: a list of attribute names corresponding to the flags
4350 HPATH = "node-modify"
4351 HTYPE = constants.HTYPE_NODE
4353 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4355 (True, False, False): _ROLE_CANDIDATE,
4356 (False, True, False): _ROLE_DRAINED,
4357 (False, False, True): _ROLE_OFFLINE,
4358 (False, False, False): _ROLE_REGULAR,
4360 _R2F = dict((v, k) for k, v in _F2R.items())
4361 _FLAGS = ["master_candidate", "drained", "offline"]
4363 def CheckArguments(self):
4364 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4365 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4366 self.op.master_capable, self.op.vm_capable,
4367 self.op.secondary_ip, self.op.ndparams]
4368 if all_mods.count(None) == len(all_mods):
4369 raise errors.OpPrereqError("Please pass at least one modification",
4371 if all_mods.count(True) > 1:
4372 raise errors.OpPrereqError("Can't set the node into more than one"
4373 " state at the same time",
4376 # Boolean value that tells us whether we might be demoting from MC
4377 self.might_demote = (self.op.master_candidate == False or
4378 self.op.offline == True or
4379 self.op.drained == True or
4380 self.op.master_capable == False)
4382 if self.op.secondary_ip:
4383 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4384 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4385 " address" % self.op.secondary_ip,
4388 self.lock_all = self.op.auto_promote and self.might_demote
4389 self.lock_instances = self.op.secondary_ip is not None
4391 def ExpandNames(self):
4393 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4395 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4397 if self.lock_instances:
4398 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4400 def DeclareLocks(self, level):
4401 # If we have locked all instances, before waiting to lock nodes, release
4402 # all the ones living on nodes unrelated to the current operation.
4403 if level == locking.LEVEL_NODE and self.lock_instances:
4404 instances_release = []
4406 self.affected_instances = []
4407 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4408 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4409 instance = self.context.cfg.GetInstanceInfo(instance_name)
4410 i_mirrored = instance.disk_template in constants.DTS_INT_MIRROR
4411 if i_mirrored and self.op.node_name in instance.all_nodes:
4412 instances_keep.append(instance_name)
4413 self.affected_instances.append(instance)
4415 instances_release.append(instance_name)
4416 if instances_release:
4417 self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4418 self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4420 def BuildHooksEnv(self):
4423 This runs on the master node.
4427 "OP_TARGET": self.op.node_name,
4428 "MASTER_CANDIDATE": str(self.op.master_candidate),
4429 "OFFLINE": str(self.op.offline),
4430 "DRAINED": str(self.op.drained),
4431 "MASTER_CAPABLE": str(self.op.master_capable),
4432 "VM_CAPABLE": str(self.op.vm_capable),
4434 nl = [self.cfg.GetMasterNode(),
4438 def CheckPrereq(self):
4439 """Check prerequisites.
4441 This only checks the instance list against the existing names.
4444 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4446 if (self.op.master_candidate is not None or
4447 self.op.drained is not None or
4448 self.op.offline is not None):
4449 # we can't change the master's node flags
4450 if self.op.node_name == self.cfg.GetMasterNode():
4451 raise errors.OpPrereqError("The master role can be changed"
4452 " only via master-failover",
4455 if self.op.master_candidate and not node.master_capable:
4456 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4457 " it a master candidate" % node.name,
4460 if self.op.vm_capable == False:
4461 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4463 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4464 " the vm_capable flag" % node.name,
4467 if node.master_candidate and self.might_demote and not self.lock_all:
4468 assert not self.op.auto_promote, "auto_promote set but lock_all not"
4469 # check if after removing the current node, we're missing master
4471 (mc_remaining, mc_should, _) = \
4472 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4473 if mc_remaining < mc_should:
4474 raise errors.OpPrereqError("Not enough master candidates, please"
4475 " pass auto promote option to allow"
4476 " promotion", errors.ECODE_STATE)
4478 self.old_flags = old_flags = (node.master_candidate,
4479 node.drained, node.offline)
4480 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4481 self.old_role = old_role = self._F2R[old_flags]
4483 # Check for ineffective changes
4484 for attr in self._FLAGS:
4485 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4486 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4487 setattr(self.op, attr, None)
4489 # Past this point, any flag change to False means a transition
4490 # away from the respective state, as only real changes are kept
4492 # TODO: We might query the real power state if it supports OOB
4493 if _SupportsOob(self.cfg, node):
4494 if self.op.offline is False and not (node.powered or
4495 self.op.powered == True):
4496 raise errors.OpPrereqError(("Please power on node %s first before you"
4497 " can reset offline state") %
4499 elif self.op.powered is not None:
4500 raise errors.OpPrereqError(("Unable to change powered state for node %s"
4501 " which does not support out-of-band"
4502 " handling") % self.op.node_name)
4504 # If we're being deofflined/drained, we'll MC ourself if needed
4505 if (self.op.drained == False or self.op.offline == False or
4506 (self.op.master_capable and not node.master_capable)):
4507 if _DecideSelfPromotion(self):
4508 self.op.master_candidate = True
4509 self.LogInfo("Auto-promoting node to master candidate")
4511 # If we're no longer master capable, we'll demote ourselves from MC
4512 if self.op.master_capable == False and node.master_candidate:
4513 self.LogInfo("Demoting from master candidate")
4514 self.op.master_candidate = False
4517 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4518 if self.op.master_candidate:
4519 new_role = self._ROLE_CANDIDATE
4520 elif self.op.drained:
4521 new_role = self._ROLE_DRAINED
4522 elif self.op.offline:
4523 new_role = self._ROLE_OFFLINE
4524 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4525 # False is still in new flags, which means we're un-setting (the
4527 new_role = self._ROLE_REGULAR
4528 else: # no new flags, nothing, keep old role
4531 self.new_role = new_role
4533 if old_role == self._ROLE_OFFLINE and new_role != old_role:
4534 # Trying to transition out of offline status
4535 result = self.rpc.call_version([node.name])[node.name]
4537 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4538 " to report its version: %s" %
4539 (node.name, result.fail_msg),
4542 self.LogWarning("Transitioning node from offline to online state"
4543 " without using re-add. Please make sure the node"
4546 if self.op.secondary_ip:
4547 # Ok even without locking, because this can't be changed by any LU
4548 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4549 master_singlehomed = master.secondary_ip == master.primary_ip
4550 if master_singlehomed and self.op.secondary_ip:
4551 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4552 " homed cluster", errors.ECODE_INVAL)
4555 if self.affected_instances:
4556 raise errors.OpPrereqError("Cannot change secondary ip: offline"
4557 " node has instances (%s) configured"
4558 " to use it" % self.affected_instances)
4560 # On online nodes, check that no instances are running, and that
4561 # the node has the new ip and we can reach it.
4562 for instance in self.affected_instances:
4563 _CheckInstanceDown(self, instance, "cannot change secondary ip")
4565 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4566 if master.name != node.name:
4567 # check reachability from master secondary ip to new secondary ip
4568 if not netutils.TcpPing(self.op.secondary_ip,
4569 constants.DEFAULT_NODED_PORT,
4570 source=master.secondary_ip):
4571 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4572 " based ping to node daemon port",
4573 errors.ECODE_ENVIRON)
4575 if self.op.ndparams:
4576 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4577 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4578 self.new_ndparams = new_ndparams
4580 def Exec(self, feedback_fn):
4585 old_role = self.old_role
4586 new_role = self.new_role
4590 if self.op.ndparams:
4591 node.ndparams = self.new_ndparams
4593 if self.op.powered is not None:
4594 node.powered = self.op.powered
4596 for attr in ["master_capable", "vm_capable"]:
4597 val = getattr(self.op, attr)
4599 setattr(node, attr, val)
4600 result.append((attr, str(val)))
4602 if new_role != old_role:
4603 # Tell the node to demote itself, if no longer MC and not offline
4604 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4605 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4607 self.LogWarning("Node failed to demote itself: %s", msg)
4609 new_flags = self._R2F[new_role]
4610 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4612 result.append((desc, str(nf)))
4613 (node.master_candidate, node.drained, node.offline) = new_flags
4615 # we locked all nodes, we adjust the CP before updating this node
4617 _AdjustCandidatePool(self, [node.name])
4619 if self.op.secondary_ip:
4620 node.secondary_ip = self.op.secondary_ip
4621 result.append(("secondary_ip", self.op.secondary_ip))
4623 # this will trigger configuration file update, if needed
4624 self.cfg.Update(node, feedback_fn)
4626 # this will trigger job queue propagation or cleanup if the mc
4628 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4629 self.context.ReaddNode(node)
4634 class LUNodePowercycle(NoHooksLU):
4635 """Powercycles a node.
4640 def CheckArguments(self):
4641 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4642 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4643 raise errors.OpPrereqError("The node is the master and the force"
4644 " parameter was not set",
4647 def ExpandNames(self):
4648 """Locking for PowercycleNode.
4650 This is a last-resort option and shouldn't block on other
4651 jobs. Therefore, we grab no locks.
4654 self.needed_locks = {}
4656 def Exec(self, feedback_fn):
4660 result = self.rpc.call_node_powercycle(self.op.node_name,
4661 self.cfg.GetHypervisorType())
4662 result.Raise("Failed to schedule the reboot")
4663 return result.payload
4666 class LUClusterQuery(NoHooksLU):
4667 """Query cluster configuration.
4672 def ExpandNames(self):
4673 self.needed_locks = {}
4675 def Exec(self, feedback_fn):
4676 """Return cluster config.
4679 cluster = self.cfg.GetClusterInfo()
4682 # Filter just for enabled hypervisors
4683 for os_name, hv_dict in cluster.os_hvp.items():
4684 os_hvp[os_name] = {}
4685 for hv_name, hv_params in hv_dict.items():
4686 if hv_name in cluster.enabled_hypervisors:
4687 os_hvp[os_name][hv_name] = hv_params
4689 # Convert ip_family to ip_version
4690 primary_ip_version = constants.IP4_VERSION
4691 if cluster.primary_ip_family == netutils.IP6Address.family:
4692 primary_ip_version = constants.IP6_VERSION
4695 "software_version": constants.RELEASE_VERSION,
4696 "protocol_version": constants.PROTOCOL_VERSION,
4697 "config_version": constants.CONFIG_VERSION,
4698 "os_api_version": max(constants.OS_API_VERSIONS),
4699 "export_version": constants.EXPORT_VERSION,
4700 "architecture": (platform.architecture()[0], platform.machine()),
4701 "name": cluster.cluster_name,
4702 "master": cluster.master_node,
4703 "default_hypervisor": cluster.enabled_hypervisors[0],
4704 "enabled_hypervisors": cluster.enabled_hypervisors,
4705 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4706 for hypervisor_name in cluster.enabled_hypervisors]),
4708 "beparams": cluster.beparams,
4709 "osparams": cluster.osparams,
4710 "nicparams": cluster.nicparams,
4711 "ndparams": cluster.ndparams,
4712 "candidate_pool_size": cluster.candidate_pool_size,
4713 "master_netdev": cluster.master_netdev,
4714 "volume_group_name": cluster.volume_group_name,
4715 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4716 "file_storage_dir": cluster.file_storage_dir,
4717 "shared_file_storage_dir": cluster.shared_file_storage_dir,
4718 "maintain_node_health": cluster.maintain_node_health,
4719 "ctime": cluster.ctime,
4720 "mtime": cluster.mtime,
4721 "uuid": cluster.uuid,
4722 "tags": list(cluster.GetTags()),
4723 "uid_pool": cluster.uid_pool,
4724 "default_iallocator": cluster.default_iallocator,
4725 "reserved_lvs": cluster.reserved_lvs,
4726 "primary_ip_version": primary_ip_version,
4727 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4728 "hidden_os": cluster.hidden_os,
4729 "blacklisted_os": cluster.blacklisted_os,
4735 class LUClusterConfigQuery(NoHooksLU):
4736 """Return configuration values.
4740 _FIELDS_DYNAMIC = utils.FieldSet()
4741 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4742 "watcher_pause", "volume_group_name")
4744 def CheckArguments(self):
4745 _CheckOutputFields(static=self._FIELDS_STATIC,
4746 dynamic=self._FIELDS_DYNAMIC,
4747 selected=self.op.output_fields)
4749 def ExpandNames(self):
4750 self.needed_locks = {}
4752 def Exec(self, feedback_fn):
4753 """Dump a representation of the cluster config to the standard output.
4757 for field in self.op.output_fields:
4758 if field == "cluster_name":
4759 entry = self.cfg.GetClusterName()
4760 elif field == "master_node":
4761 entry = self.cfg.GetMasterNode()
4762 elif field == "drain_flag":
4763 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4764 elif field == "watcher_pause":
4765 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4766 elif field == "volume_group_name":
4767 entry = self.cfg.GetVGName()
4769 raise errors.ParameterError(field)
4770 values.append(entry)
4774 class LUInstanceActivateDisks(NoHooksLU):
4775 """Bring up an instance's disks.
4780 def ExpandNames(self):
4781 self._ExpandAndLockInstance()
4782 self.needed_locks[locking.LEVEL_NODE] = []
4783 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4785 def DeclareLocks(self, level):
4786 if level == locking.LEVEL_NODE:
4787 self._LockInstancesNodes()
4789 def CheckPrereq(self):
4790 """Check prerequisites.
4792 This checks that the instance is in the cluster.
4795 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4796 assert self.instance is not None, \
4797 "Cannot retrieve locked instance %s" % self.op.instance_name
4798 _CheckNodeOnline(self, self.instance.primary_node)
4800 def Exec(self, feedback_fn):
4801 """Activate the disks.
4804 disks_ok, disks_info = \
4805 _AssembleInstanceDisks(self, self.instance,
4806 ignore_size=self.op.ignore_size)
4808 raise errors.OpExecError("Cannot activate block devices")
4813 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4815 """Prepare the block devices for an instance.
4817 This sets up the block devices on all nodes.
4819 @type lu: L{LogicalUnit}
4820 @param lu: the logical unit on whose behalf we execute
4821 @type instance: L{objects.Instance}
4822 @param instance: the instance for whose disks we assemble
4823 @type disks: list of L{objects.Disk} or None
4824 @param disks: which disks to assemble (or all, if None)
4825 @type ignore_secondaries: boolean
4826 @param ignore_secondaries: if true, errors on secondary nodes
4827 won't result in an error return from the function
4828 @type ignore_size: boolean
4829 @param ignore_size: if true, the current known size of the disk
4830 will not be used during the disk activation, useful for cases
4831 when the size is wrong
4832 @return: False if the operation failed, otherwise a list of
4833 (host, instance_visible_name, node_visible_name)
4834 with the mapping from node devices to instance devices
4839 iname = instance.name
4840 disks = _ExpandCheckDisks(instance, disks)
4842 # With the two passes mechanism we try to reduce the window of
4843 # opportunity for the race condition of switching DRBD to primary
4844 # before handshaking occured, but we do not eliminate it
4846 # The proper fix would be to wait (with some limits) until the
4847 # connection has been made and drbd transitions from WFConnection
4848 # into any other network-connected state (Connected, SyncTarget,
4851 # 1st pass, assemble on all nodes in secondary mode
4852 for idx, inst_disk in enumerate(disks):
4853 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4855 node_disk = node_disk.Copy()
4856 node_disk.UnsetSize()
4857 lu.cfg.SetDiskID(node_disk, node)
4858 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4859 msg = result.fail_msg
4861 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4862 " (is_primary=False, pass=1): %s",
4863 inst_disk.iv_name, node, msg)
4864 if not ignore_secondaries:
4867 # FIXME: race condition on drbd migration to primary
4869 # 2nd pass, do only the primary node
4870 for idx, inst_disk in enumerate(disks):
4873 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4874 if node != instance.primary_node:
4877 node_disk = node_disk.Copy()
4878 node_disk.UnsetSize()
4879 lu.cfg.SetDiskID(node_disk, node)
4880 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4881 msg = result.fail_msg
4883 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4884 " (is_primary=True, pass=2): %s",
4885 inst_disk.iv_name, node, msg)
4888 dev_path = result.payload
4890 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4892 # leave the disks configured for the primary node
4893 # this is a workaround that would be fixed better by
4894 # improving the logical/physical id handling
4896 lu.cfg.SetDiskID(disk, instance.primary_node)
4898 return disks_ok, device_info
4901 def _StartInstanceDisks(lu, instance, force):
4902 """Start the disks of an instance.
4905 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4906 ignore_secondaries=force)
4908 _ShutdownInstanceDisks(lu, instance)
4909 if force is not None and not force:
4910 lu.proc.LogWarning("", hint="If the message above refers to a"
4912 " you can retry the operation using '--force'.")
4913 raise errors.OpExecError("Disk consistency error")
4916 class LUInstanceDeactivateDisks(NoHooksLU):
4917 """Shutdown an instance's disks.
4922 def ExpandNames(self):
4923 self._ExpandAndLockInstance()
4924 self.needed_locks[locking.LEVEL_NODE] = []
4925 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4927 def DeclareLocks(self, level):
4928 if level == locking.LEVEL_NODE:
4929 self._LockInstancesNodes()
4931 def CheckPrereq(self):
4932 """Check prerequisites.
4934 This checks that the instance is in the cluster.
4937 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4938 assert self.instance is not None, \
4939 "Cannot retrieve locked instance %s" % self.op.instance_name
4941 def Exec(self, feedback_fn):
4942 """Deactivate the disks
4945 instance = self.instance
4947 _ShutdownInstanceDisks(self, instance)
4949 _SafeShutdownInstanceDisks(self, instance)
4952 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4953 """Shutdown block devices of an instance.
4955 This function checks if an instance is running, before calling
4956 _ShutdownInstanceDisks.
4959 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4960 _ShutdownInstanceDisks(lu, instance, disks=disks)
4963 def _ExpandCheckDisks(instance, disks):
4964 """Return the instance disks selected by the disks list
4966 @type disks: list of L{objects.Disk} or None
4967 @param disks: selected disks
4968 @rtype: list of L{objects.Disk}
4969 @return: selected instance disks to act on
4973 return instance.disks
4975 if not set(disks).issubset(instance.disks):
4976 raise errors.ProgrammerError("Can only act on disks belonging to the"
4981 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4982 """Shutdown block devices of an instance.
4984 This does the shutdown on all nodes of the instance.
4986 If the ignore_primary is false, errors on the primary node are
4991 disks = _ExpandCheckDisks(instance, disks)
4994 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4995 lu.cfg.SetDiskID(top_disk, node)
4996 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4997 msg = result.fail_msg
4999 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5000 disk.iv_name, node, msg)
5001 if ((node == instance.primary_node and not ignore_primary) or
5002 (node != instance.primary_node and not result.offline)):
5007 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5008 """Checks if a node has enough free memory.
5010 This function check if a given node has the needed amount of free
5011 memory. In case the node has less memory or we cannot get the
5012 information from the node, this function raise an OpPrereqError
5015 @type lu: C{LogicalUnit}
5016 @param lu: a logical unit from which we get configuration data
5018 @param node: the node to check
5019 @type reason: C{str}
5020 @param reason: string to use in the error message
5021 @type requested: C{int}
5022 @param requested: the amount of memory in MiB to check for
5023 @type hypervisor_name: C{str}
5024 @param hypervisor_name: the hypervisor to ask for memory stats
5025 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5026 we cannot check the node
5029 nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5030 nodeinfo[node].Raise("Can't get data from node %s" % node,
5031 prereq=True, ecode=errors.ECODE_ENVIRON)
5032 free_mem = nodeinfo[node].payload.get('memory_free', None)
5033 if not isinstance(free_mem, int):
5034 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5035 " was '%s'" % (node, free_mem),
5036 errors.ECODE_ENVIRON)
5037 if requested > free_mem:
5038 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5039 " needed %s MiB, available %s MiB" %
5040 (node, reason, requested, free_mem),
5044 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5045 """Checks if nodes have enough free disk space in the all VGs.
5047 This function check if all given nodes have the needed amount of
5048 free disk. In case any node has less disk or we cannot get the
5049 information from the node, this function raise an OpPrereqError
5052 @type lu: C{LogicalUnit}
5053 @param lu: a logical unit from which we get configuration data
5054 @type nodenames: C{list}
5055 @param nodenames: the list of node names to check
5056 @type req_sizes: C{dict}
5057 @param req_sizes: the hash of vg and corresponding amount of disk in
5059 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5060 or we cannot check the node
5063 for vg, req_size in req_sizes.items():
5064 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5067 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5068 """Checks if nodes have enough free disk space in the specified VG.
5070 This function check if all given nodes have the needed amount of
5071 free disk. In case any node has less disk or we cannot get the
5072 information from the node, this function raise an OpPrereqError
5075 @type lu: C{LogicalUnit}
5076 @param lu: a logical unit from which we get configuration data
5077 @type nodenames: C{list}
5078 @param nodenames: the list of node names to check
5080 @param vg: the volume group to check
5081 @type requested: C{int}
5082 @param requested: the amount of disk in MiB to check for
5083 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5084 or we cannot check the node
5087 nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5088 for node in nodenames:
5089 info = nodeinfo[node]
5090 info.Raise("Cannot get current information from node %s" % node,
5091 prereq=True, ecode=errors.ECODE_ENVIRON)
5092 vg_free = info.payload.get("vg_free", None)
5093 if not isinstance(vg_free, int):
5094 raise errors.OpPrereqError("Can't compute free disk space on node"
5095 " %s for vg %s, result was '%s'" %
5096 (node, vg, vg_free), errors.ECODE_ENVIRON)
5097 if requested > vg_free:
5098 raise errors.OpPrereqError("Not enough disk space on target node %s"
5099 " vg %s: required %d MiB, available %d MiB" %
5100 (node, vg, requested, vg_free),
5104 class LUInstanceStartup(LogicalUnit):
5105 """Starts an instance.
5108 HPATH = "instance-start"
5109 HTYPE = constants.HTYPE_INSTANCE
5112 def CheckArguments(self):
5114 if self.op.beparams:
5115 # fill the beparams dict
5116 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5118 def ExpandNames(self):
5119 self._ExpandAndLockInstance()
5121 def BuildHooksEnv(self):
5124 This runs on master, primary and secondary nodes of the instance.
5128 "FORCE": self.op.force,
5130 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5131 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5134 def CheckPrereq(self):
5135 """Check prerequisites.
5137 This checks that the instance is in the cluster.
5140 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5141 assert self.instance is not None, \
5142 "Cannot retrieve locked instance %s" % self.op.instance_name
5145 if self.op.hvparams:
5146 # check hypervisor parameter syntax (locally)
5147 cluster = self.cfg.GetClusterInfo()
5148 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5149 filled_hvp = cluster.FillHV(instance)
5150 filled_hvp.update(self.op.hvparams)
5151 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5152 hv_type.CheckParameterSyntax(filled_hvp)
5153 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5155 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5157 if self.primary_offline and self.op.ignore_offline_nodes:
5158 self.proc.LogWarning("Ignoring offline primary node")
5160 if self.op.hvparams or self.op.beparams:
5161 self.proc.LogWarning("Overridden parameters are ignored")
5163 _CheckNodeOnline(self, instance.primary_node)
5165 bep = self.cfg.GetClusterInfo().FillBE(instance)
5167 # check bridges existence
5168 _CheckInstanceBridgesExist(self, instance)
5170 remote_info = self.rpc.call_instance_info(instance.primary_node,
5172 instance.hypervisor)
5173 remote_info.Raise("Error checking node %s" % instance.primary_node,
5174 prereq=True, ecode=errors.ECODE_ENVIRON)
5175 if not remote_info.payload: # not running already
5176 _CheckNodeFreeMemory(self, instance.primary_node,
5177 "starting instance %s" % instance.name,
5178 bep[constants.BE_MEMORY], instance.hypervisor)
5180 def Exec(self, feedback_fn):
5181 """Start the instance.
5184 instance = self.instance
5185 force = self.op.force
5187 self.cfg.MarkInstanceUp(instance.name)
5189 if self.primary_offline:
5190 assert self.op.ignore_offline_nodes
5191 self.proc.LogInfo("Primary node offline, marked instance as started")
5193 node_current = instance.primary_node
5195 _StartInstanceDisks(self, instance, force)
5197 result = self.rpc.call_instance_start(node_current, instance,
5198 self.op.hvparams, self.op.beparams)
5199 msg = result.fail_msg
5201 _ShutdownInstanceDisks(self, instance)
5202 raise errors.OpExecError("Could not start instance: %s" % msg)
5205 class LUInstanceReboot(LogicalUnit):
5206 """Reboot an instance.
5209 HPATH = "instance-reboot"
5210 HTYPE = constants.HTYPE_INSTANCE
5213 def ExpandNames(self):
5214 self._ExpandAndLockInstance()
5216 def BuildHooksEnv(self):
5219 This runs on master, primary and secondary nodes of the instance.
5223 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5224 "REBOOT_TYPE": self.op.reboot_type,
5225 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5227 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5228 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5231 def CheckPrereq(self):
5232 """Check prerequisites.
5234 This checks that the instance is in the cluster.
5237 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5238 assert self.instance is not None, \
5239 "Cannot retrieve locked instance %s" % self.op.instance_name
5241 _CheckNodeOnline(self, instance.primary_node)
5243 # check bridges existence
5244 _CheckInstanceBridgesExist(self, instance)
5246 def Exec(self, feedback_fn):
5247 """Reboot the instance.
5250 instance = self.instance
5251 ignore_secondaries = self.op.ignore_secondaries
5252 reboot_type = self.op.reboot_type
5254 remote_info = self.rpc.call_instance_info(instance.primary_node,
5256 instance.hypervisor)
5257 remote_info.Raise("Error checking node %s" % instance.primary_node)
5258 instance_running = bool(remote_info.payload)
5260 node_current = instance.primary_node
5262 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5263 constants.INSTANCE_REBOOT_HARD]:
5264 for disk in instance.disks:
5265 self.cfg.SetDiskID(disk, node_current)
5266 result = self.rpc.call_instance_reboot(node_current, instance,
5268 self.op.shutdown_timeout)
5269 result.Raise("Could not reboot instance")
5271 if instance_running:
5272 result = self.rpc.call_instance_shutdown(node_current, instance,
5273 self.op.shutdown_timeout)
5274 result.Raise("Could not shutdown instance for full reboot")
5275 _ShutdownInstanceDisks(self, instance)
5277 self.LogInfo("Instance %s was already stopped, starting now",
5279 _StartInstanceDisks(self, instance, ignore_secondaries)
5280 result = self.rpc.call_instance_start(node_current, instance, None, None)
5281 msg = result.fail_msg
5283 _ShutdownInstanceDisks(self, instance)
5284 raise errors.OpExecError("Could not start instance for"
5285 " full reboot: %s" % msg)
5287 self.cfg.MarkInstanceUp(instance.name)
5290 class LUInstanceShutdown(LogicalUnit):
5291 """Shutdown an instance.
5294 HPATH = "instance-stop"
5295 HTYPE = constants.HTYPE_INSTANCE
5298 def ExpandNames(self):
5299 self._ExpandAndLockInstance()
5301 def BuildHooksEnv(self):
5304 This runs on master, primary and secondary nodes of the instance.
5307 env = _BuildInstanceHookEnvByObject(self, self.instance)
5308 env["TIMEOUT"] = self.op.timeout
5309 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5312 def CheckPrereq(self):
5313 """Check prerequisites.
5315 This checks that the instance is in the cluster.
5318 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5319 assert self.instance is not None, \
5320 "Cannot retrieve locked instance %s" % self.op.instance_name
5322 self.primary_offline = \
5323 self.cfg.GetNodeInfo(self.instance.primary_node).offline
5325 if self.primary_offline and self.op.ignore_offline_nodes:
5326 self.proc.LogWarning("Ignoring offline primary node")
5328 _CheckNodeOnline(self, self.instance.primary_node)
5330 def Exec(self, feedback_fn):
5331 """Shutdown the instance.
5334 instance = self.instance
5335 node_current = instance.primary_node
5336 timeout = self.op.timeout
5338 self.cfg.MarkInstanceDown(instance.name)
5340 if self.primary_offline:
5341 assert self.op.ignore_offline_nodes
5342 self.proc.LogInfo("Primary node offline, marked instance as stopped")
5344 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5345 msg = result.fail_msg
5347 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5349 _ShutdownInstanceDisks(self, instance)
5352 class LUInstanceReinstall(LogicalUnit):
5353 """Reinstall an instance.
5356 HPATH = "instance-reinstall"
5357 HTYPE = constants.HTYPE_INSTANCE
5360 def ExpandNames(self):
5361 self._ExpandAndLockInstance()
5363 def BuildHooksEnv(self):
5366 This runs on master, primary and secondary nodes of the instance.
5369 env = _BuildInstanceHookEnvByObject(self, self.instance)
5370 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5373 def CheckPrereq(self):
5374 """Check prerequisites.
5376 This checks that the instance is in the cluster and is not running.
5379 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5380 assert instance is not None, \
5381 "Cannot retrieve locked instance %s" % self.op.instance_name
5382 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5383 " offline, cannot reinstall")
5384 for node in instance.secondary_nodes:
5385 _CheckNodeOnline(self, node, "Instance secondary node offline,"
5386 " cannot reinstall")
5388 if instance.disk_template == constants.DT_DISKLESS:
5389 raise errors.OpPrereqError("Instance '%s' has no disks" %
5390 self.op.instance_name,
5392 _CheckInstanceDown(self, instance, "cannot reinstall")
5394 if self.op.os_type is not None:
5396 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5397 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5398 instance_os = self.op.os_type
5400 instance_os = instance.os
5402 nodelist = list(instance.all_nodes)
5404 if self.op.osparams:
5405 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5406 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5407 self.os_inst = i_osdict # the new dict (without defaults)
5411 self.instance = instance
5413 def Exec(self, feedback_fn):
5414 """Reinstall the instance.
5417 inst = self.instance
5419 if self.op.os_type is not None:
5420 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5421 inst.os = self.op.os_type
5422 # Write to configuration
5423 self.cfg.Update(inst, feedback_fn)
5425 _StartInstanceDisks(self, inst, None)
5427 feedback_fn("Running the instance OS create scripts...")
5428 # FIXME: pass debug option from opcode to backend
5429 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5430 self.op.debug_level,
5431 osparams=self.os_inst)
5432 result.Raise("Could not install OS for instance %s on node %s" %
5433 (inst.name, inst.primary_node))
5435 _ShutdownInstanceDisks(self, inst)
5438 class LUInstanceRecreateDisks(LogicalUnit):
5439 """Recreate an instance's missing disks.
5442 HPATH = "instance-recreate-disks"
5443 HTYPE = constants.HTYPE_INSTANCE
5446 def ExpandNames(self):
5447 self._ExpandAndLockInstance()
5449 def BuildHooksEnv(self):
5452 This runs on master, primary and secondary nodes of the instance.
5455 env = _BuildInstanceHookEnvByObject(self, self.instance)
5456 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5459 def CheckPrereq(self):
5460 """Check prerequisites.
5462 This checks that the instance is in the cluster and is not running.
5465 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5466 assert instance is not None, \
5467 "Cannot retrieve locked instance %s" % self.op.instance_name
5468 _CheckNodeOnline(self, instance.primary_node)
5470 if instance.disk_template == constants.DT_DISKLESS:
5471 raise errors.OpPrereqError("Instance '%s' has no disks" %
5472 self.op.instance_name, errors.ECODE_INVAL)
5473 _CheckInstanceDown(self, instance, "cannot recreate disks")
5475 if not self.op.disks:
5476 self.op.disks = range(len(instance.disks))
5478 for idx in self.op.disks:
5479 if idx >= len(instance.disks):
5480 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5483 self.instance = instance
5485 def Exec(self, feedback_fn):
5486 """Recreate the disks.
5490 for idx, _ in enumerate(self.instance.disks):
5491 if idx not in self.op.disks: # disk idx has not been passed in
5495 _CreateDisks(self, self.instance, to_skip=to_skip)
5498 class LUInstanceRename(LogicalUnit):
5499 """Rename an instance.
5502 HPATH = "instance-rename"
5503 HTYPE = constants.HTYPE_INSTANCE
5505 def CheckArguments(self):
5509 if self.op.ip_check and not self.op.name_check:
5510 # TODO: make the ip check more flexible and not depend on the name check
5511 raise errors.OpPrereqError("Cannot do ip check without a name check",
5514 def BuildHooksEnv(self):
5517 This runs on master, primary and secondary nodes of the instance.
5520 env = _BuildInstanceHookEnvByObject(self, self.instance)
5521 env["INSTANCE_NEW_NAME"] = self.op.new_name
5522 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5525 def CheckPrereq(self):
5526 """Check prerequisites.
5528 This checks that the instance is in the cluster and is not running.
5531 self.op.instance_name = _ExpandInstanceName(self.cfg,
5532 self.op.instance_name)
5533 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5534 assert instance is not None
5535 _CheckNodeOnline(self, instance.primary_node)
5536 _CheckInstanceDown(self, instance, "cannot rename")
5537 self.instance = instance
5539 new_name = self.op.new_name
5540 if self.op.name_check:
5541 hostname = netutils.GetHostname(name=new_name)
5542 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5544 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
5545 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
5546 " same as given hostname '%s'") %
5547 (hostname.name, self.op.new_name),
5549 new_name = self.op.new_name = hostname.name
5550 if (self.op.ip_check and
5551 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5552 raise errors.OpPrereqError("IP %s of instance %s already in use" %
5553 (hostname.ip, new_name),
5554 errors.ECODE_NOTUNIQUE)
5556 instance_list = self.cfg.GetInstanceList()
5557 if new_name in instance_list and new_name != instance.name:
5558 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5559 new_name, errors.ECODE_EXISTS)
5561 def Exec(self, feedback_fn):
5562 """Rename the instance.
5565 inst = self.instance
5566 old_name = inst.name
5568 rename_file_storage = False
5569 if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
5570 self.op.new_name != inst.name):
5571 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5572 rename_file_storage = True
5574 self.cfg.RenameInstance(inst.name, self.op.new_name)
5575 # Change the instance lock. This is definitely safe while we hold the BGL
5576 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5577 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5579 # re-read the instance from the configuration after rename
5580 inst = self.cfg.GetInstanceInfo(self.op.new_name)
5582 if rename_file_storage:
5583 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5584 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5585 old_file_storage_dir,
5586 new_file_storage_dir)
5587 result.Raise("Could not rename on node %s directory '%s' to '%s'"
5588 " (but the instance has been renamed in Ganeti)" %
5589 (inst.primary_node, old_file_storage_dir,
5590 new_file_storage_dir))
5592 _StartInstanceDisks(self, inst, None)
5594 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5595 old_name, self.op.debug_level)
5596 msg = result.fail_msg
5598 msg = ("Could not run OS rename script for instance %s on node %s"
5599 " (but the instance has been renamed in Ganeti): %s" %
5600 (inst.name, inst.primary_node, msg))
5601 self.proc.LogWarning(msg)
5603 _ShutdownInstanceDisks(self, inst)
5608 class LUInstanceRemove(LogicalUnit):
5609 """Remove an instance.
5612 HPATH = "instance-remove"
5613 HTYPE = constants.HTYPE_INSTANCE
5616 def ExpandNames(self):
5617 self._ExpandAndLockInstance()
5618 self.needed_locks[locking.LEVEL_NODE] = []
5619 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5621 def DeclareLocks(self, level):
5622 if level == locking.LEVEL_NODE:
5623 self._LockInstancesNodes()
5625 def BuildHooksEnv(self):
5628 This runs on master, primary and secondary nodes of the instance.
5631 env = _BuildInstanceHookEnvByObject(self, self.instance)
5632 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5633 nl = [self.cfg.GetMasterNode()]
5634 nl_post = list(self.instance.all_nodes) + nl
5635 return env, nl, nl_post
5637 def CheckPrereq(self):
5638 """Check prerequisites.
5640 This checks that the instance is in the cluster.
5643 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5644 assert self.instance is not None, \
5645 "Cannot retrieve locked instance %s" % self.op.instance_name
5647 def Exec(self, feedback_fn):
5648 """Remove the instance.
5651 instance = self.instance
5652 logging.info("Shutting down instance %s on node %s",
5653 instance.name, instance.primary_node)
5655 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5656 self.op.shutdown_timeout)
5657 msg = result.fail_msg
5659 if self.op.ignore_failures:
5660 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5662 raise errors.OpExecError("Could not shutdown instance %s on"
5664 (instance.name, instance.primary_node, msg))
5666 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5669 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5670 """Utility function to remove an instance.
5673 logging.info("Removing block devices for instance %s", instance.name)
5675 if not _RemoveDisks(lu, instance):
5676 if not ignore_failures:
5677 raise errors.OpExecError("Can't remove instance's disks")
5678 feedback_fn("Warning: can't remove instance's disks")
5680 logging.info("Removing instance %s out of cluster config", instance.name)
5682 lu.cfg.RemoveInstance(instance.name)
5684 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5685 "Instance lock removal conflict"
5687 # Remove lock for the instance
5688 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5691 class LUInstanceQuery(NoHooksLU):
5692 """Logical unit for querying instances.
5695 # pylint: disable-msg=W0142
5698 def CheckArguments(self):
5699 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
5700 self.op.output_fields, self.op.use_locking)
5702 def ExpandNames(self):
5703 self.iq.ExpandNames(self)
5705 def DeclareLocks(self, level):
5706 self.iq.DeclareLocks(self, level)
5708 def Exec(self, feedback_fn):
5709 return self.iq.OldStyleQuery(self)
5712 class LUInstanceFailover(LogicalUnit):
5713 """Failover an instance.
5716 HPATH = "instance-failover"
5717 HTYPE = constants.HTYPE_INSTANCE
5720 def CheckArguments(self):
5721 """Check the arguments.
5724 self.iallocator = getattr(self.op, "iallocator", None)
5725 self.target_node = getattr(self.op, "target_node", None)
5727 def ExpandNames(self):
5728 self._ExpandAndLockInstance()
5730 if self.op.target_node is not None:
5731 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5733 self.needed_locks[locking.LEVEL_NODE] = []
5734 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5736 def DeclareLocks(self, level):
5737 if level == locking.LEVEL_NODE:
5738 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
5739 if instance.disk_template in constants.DTS_EXT_MIRROR:
5740 if self.op.target_node is None:
5741 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5743 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
5744 self.op.target_node]
5745 del self.recalculate_locks[locking.LEVEL_NODE]
5747 self._LockInstancesNodes()
5749 def BuildHooksEnv(self):
5752 This runs on master, primary and secondary nodes of the instance.
5755 instance = self.instance
5756 source_node = instance.primary_node
5758 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5759 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5760 "OLD_PRIMARY": source_node,
5761 "NEW_PRIMARY": self.op.target_node,
5764 if instance.disk_template in constants.DTS_INT_MIRROR:
5765 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
5766 env["NEW_SECONDARY"] = source_node
5768 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
5770 env.update(_BuildInstanceHookEnvByObject(self, instance))
5771 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5773 nl_post.append(source_node)
5774 return env, nl, nl_post
5776 def CheckPrereq(self):
5777 """Check prerequisites.
5779 This checks that the instance is in the cluster.
5782 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5783 assert self.instance is not None, \
5784 "Cannot retrieve locked instance %s" % self.op.instance_name
5786 bep = self.cfg.GetClusterInfo().FillBE(instance)
5787 if instance.disk_template not in constants.DTS_MIRRORED:
5788 raise errors.OpPrereqError("Instance's disk layout is not"
5789 " mirrored, cannot failover.",
5792 if instance.disk_template in constants.DTS_EXT_MIRROR:
5793 _CheckIAllocatorOrNode(self, "iallocator", "target_node")
5794 if self.op.iallocator:
5795 self._RunAllocator()
5796 # Release all unnecessary node locks
5797 nodes_keep = [instance.primary_node, self.op.target_node]
5798 nodes_rel = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5799 if node not in nodes_keep]
5800 self.context.glm.release(locking.LEVEL_NODE, nodes_rel)
5801 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5803 # self.op.target_node is already populated, either directly or by the
5805 target_node = self.op.target_node
5808 secondary_nodes = instance.secondary_nodes
5809 if not secondary_nodes:
5810 raise errors.ConfigurationError("No secondary node but using"
5811 " %s disk template" %
5812 instance.disk_template)
5813 target_node = secondary_nodes[0]
5815 if self.op.iallocator or (self.op.target_node and
5816 self.op.target_node != target_node):
5817 raise errors.OpPrereqError("Instances with disk template %s cannot"
5818 " be failed over to arbitrary nodes"
5819 " (neither an iallocator nor a target"
5820 " node can be passed)" %
5821 instance.disk_template, errors.ECODE_INVAL)
5822 _CheckNodeOnline(self, target_node)
5823 _CheckNodeNotDrained(self, target_node)
5825 # Save target_node so that we can use it in BuildHooksEnv
5826 self.op.target_node = target_node
5828 if instance.admin_up:
5829 # check memory requirements on the secondary node
5830 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5831 instance.name, bep[constants.BE_MEMORY],
5832 instance.hypervisor)
5834 self.LogInfo("Not checking memory on the secondary node as"
5835 " instance will not be started")
5837 # check bridge existance
5838 _CheckInstanceBridgesExist(self, instance, node=target_node)
5840 def Exec(self, feedback_fn):
5841 """Failover an instance.
5843 The failover is done by shutting it down on its present node and
5844 starting it on the secondary.
5847 instance = self.instance
5848 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5850 source_node = instance.primary_node
5851 target_node = self.op.target_node
5853 if instance.admin_up:
5854 feedback_fn("* checking disk consistency between source and target")
5855 for dev in instance.disks:
5856 # for drbd, these are drbd over lvm
5857 if not _CheckDiskConsistency(self, dev, target_node, False):
5858 if not self.op.ignore_consistency:
5859 raise errors.OpExecError("Disk %s is degraded on target node,"
5860 " aborting failover." % dev.iv_name)
5862 feedback_fn("* not checking disk consistency as instance is not running")
5864 feedback_fn("* shutting down instance on source node")
5865 logging.info("Shutting down instance %s on node %s",
5866 instance.name, source_node)
5868 result = self.rpc.call_instance_shutdown(source_node, instance,
5869 self.op.shutdown_timeout)
5870 msg = result.fail_msg
5872 if self.op.ignore_consistency or primary_node.offline:
5873 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5874 " Proceeding anyway. Please make sure node"
5875 " %s is down. Error details: %s",
5876 instance.name, source_node, source_node, msg)
5878 raise errors.OpExecError("Could not shutdown instance %s on"
5880 (instance.name, source_node, msg))
5882 feedback_fn("* deactivating the instance's disks on source node")
5883 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5884 raise errors.OpExecError("Can't shut down the instance's disks.")
5886 instance.primary_node = target_node
5887 # distribute new instance config to the other nodes
5888 self.cfg.Update(instance, feedback_fn)
5890 # Only start the instance if it's marked as up
5891 if instance.admin_up:
5892 feedback_fn("* activating the instance's disks on target node")
5893 logging.info("Starting instance %s on node %s",
5894 instance.name, target_node)
5896 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5897 ignore_secondaries=True)
5899 _ShutdownInstanceDisks(self, instance)
5900 raise errors.OpExecError("Can't activate the instance's disks")
5902 feedback_fn("* starting the instance on the target node")
5903 result = self.rpc.call_instance_start(target_node, instance, None, None)
5904 msg = result.fail_msg
5906 _ShutdownInstanceDisks(self, instance)
5907 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5908 (instance.name, target_node, msg))
5910 def _RunAllocator(self):
5911 """Run the allocator based on input opcode.
5914 ial = IAllocator(self.cfg, self.rpc,
5915 mode=constants.IALLOCATOR_MODE_RELOC,
5916 name=self.instance.name,
5917 # TODO See why hail breaks with a single node below
5918 relocate_from=[self.instance.primary_node,
5919 self.instance.primary_node],
5922 ial.Run(self.op.iallocator)
5925 raise errors.OpPrereqError("Can't compute nodes using"
5926 " iallocator '%s': %s" %
5927 (self.op.iallocator, ial.info),
5929 if len(ial.result) != ial.required_nodes:
5930 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5931 " of nodes (%s), required %s" %
5932 (self.op.iallocator, len(ial.result),
5933 ial.required_nodes), errors.ECODE_FAULT)
5934 self.op.target_node = ial.result[0]
5935 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5936 self.instance.name, self.op.iallocator,
5937 utils.CommaJoin(ial.result))
5940 class LUInstanceMigrate(LogicalUnit):
5941 """Migrate an instance.
5943 This is migration without shutting down, compared to the failover,
5944 which is done with shutdown.
5947 HPATH = "instance-migrate"
5948 HTYPE = constants.HTYPE_INSTANCE
5951 def ExpandNames(self):
5952 self._ExpandAndLockInstance()
5954 if self.op.target_node is not None:
5955 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5957 self.needed_locks[locking.LEVEL_NODE] = []
5958 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5960 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5961 self.op.cleanup, self.op.iallocator,
5962 self.op.target_node)
5963 self.tasklets = [self._migrater]
5965 def DeclareLocks(self, level):
5966 if level == locking.LEVEL_NODE:
5967 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
5968 if instance.disk_template in constants.DTS_EXT_MIRROR:
5969 if self.op.target_node is None:
5970 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5972 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
5973 self.op.target_node]
5974 del self.recalculate_locks[locking.LEVEL_NODE]
5976 self._LockInstancesNodes()
5978 def BuildHooksEnv(self):
5981 This runs on master, primary and secondary nodes of the instance.
5984 instance = self._migrater.instance
5985 source_node = instance.primary_node
5986 target_node = self._migrater.target_node
5987 env = _BuildInstanceHookEnvByObject(self, instance)
5988 env["MIGRATE_LIVE"] = self._migrater.live
5989 env["MIGRATE_CLEANUP"] = self.op.cleanup
5991 "OLD_PRIMARY": source_node,
5992 "NEW_PRIMARY": target_node,
5995 if instance.disk_template in constants.DTS_INT_MIRROR:
5996 env["OLD_SECONDARY"] = target_node
5997 env["NEW_SECONDARY"] = source_node
5999 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6001 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6003 nl_post.append(source_node)
6004 return env, nl, nl_post
6007 class LUInstanceMove(LogicalUnit):
6008 """Move an instance by data-copying.
6011 HPATH = "instance-move"
6012 HTYPE = constants.HTYPE_INSTANCE
6015 def ExpandNames(self):
6016 self._ExpandAndLockInstance()
6017 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6018 self.op.target_node = target_node
6019 self.needed_locks[locking.LEVEL_NODE] = [target_node]
6020 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6022 def DeclareLocks(self, level):
6023 if level == locking.LEVEL_NODE:
6024 self._LockInstancesNodes(primary_only=True)
6026 def BuildHooksEnv(self):
6029 This runs on master, primary and secondary nodes of the instance.
6033 "TARGET_NODE": self.op.target_node,
6034 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6036 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6037 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
6038 self.op.target_node]
6041 def CheckPrereq(self):
6042 """Check prerequisites.
6044 This checks that the instance is in the cluster.
6047 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6048 assert self.instance is not None, \
6049 "Cannot retrieve locked instance %s" % self.op.instance_name
6051 node = self.cfg.GetNodeInfo(self.op.target_node)
6052 assert node is not None, \
6053 "Cannot retrieve locked node %s" % self.op.target_node
6055 self.target_node = target_node = node.name
6057 if target_node == instance.primary_node:
6058 raise errors.OpPrereqError("Instance %s is already on the node %s" %
6059 (instance.name, target_node),
6062 bep = self.cfg.GetClusterInfo().FillBE(instance)
6064 for idx, dsk in enumerate(instance.disks):
6065 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6066 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6067 " cannot copy" % idx, errors.ECODE_STATE)
6069 _CheckNodeOnline(self, target_node)
6070 _CheckNodeNotDrained(self, target_node)
6071 _CheckNodeVmCapable(self, target_node)
6073 if instance.admin_up:
6074 # check memory requirements on the secondary node
6075 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6076 instance.name, bep[constants.BE_MEMORY],
6077 instance.hypervisor)
6079 self.LogInfo("Not checking memory on the secondary node as"
6080 " instance will not be started")
6082 # check bridge existance
6083 _CheckInstanceBridgesExist(self, instance, node=target_node)
6085 def Exec(self, feedback_fn):
6086 """Move an instance.
6088 The move is done by shutting it down on its present node, copying
6089 the data over (slow) and starting it on the new node.
6092 instance = self.instance
6094 source_node = instance.primary_node
6095 target_node = self.target_node
6097 self.LogInfo("Shutting down instance %s on source node %s",
6098 instance.name, source_node)
6100 result = self.rpc.call_instance_shutdown(source_node, instance,
6101 self.op.shutdown_timeout)
6102 msg = result.fail_msg
6104 if self.op.ignore_consistency:
6105 self.proc.LogWarning("Could not shutdown instance %s on node %s."
6106 " Proceeding anyway. Please make sure node"
6107 " %s is down. Error details: %s",
6108 instance.name, source_node, source_node, msg)
6110 raise errors.OpExecError("Could not shutdown instance %s on"
6112 (instance.name, source_node, msg))
6114 # create the target disks
6116 _CreateDisks(self, instance, target_node=target_node)
6117 except errors.OpExecError:
6118 self.LogWarning("Device creation failed, reverting...")
6120 _RemoveDisks(self, instance, target_node=target_node)
6122 self.cfg.ReleaseDRBDMinors(instance.name)
6125 cluster_name = self.cfg.GetClusterInfo().cluster_name
6128 # activate, get path, copy the data over
6129 for idx, disk in enumerate(instance.disks):
6130 self.LogInfo("Copying data for disk %d", idx)
6131 result = self.rpc.call_blockdev_assemble(target_node, disk,
6132 instance.name, True, idx)
6134 self.LogWarning("Can't assemble newly created disk %d: %s",
6135 idx, result.fail_msg)
6136 errs.append(result.fail_msg)
6138 dev_path = result.payload
6139 result = self.rpc.call_blockdev_export(source_node, disk,
6140 target_node, dev_path,
6143 self.LogWarning("Can't copy data over for disk %d: %s",
6144 idx, result.fail_msg)
6145 errs.append(result.fail_msg)
6149 self.LogWarning("Some disks failed to copy, aborting")
6151 _RemoveDisks(self, instance, target_node=target_node)
6153 self.cfg.ReleaseDRBDMinors(instance.name)
6154 raise errors.OpExecError("Errors during disk copy: %s" %
6157 instance.primary_node = target_node
6158 self.cfg.Update(instance, feedback_fn)
6160 self.LogInfo("Removing the disks on the original node")
6161 _RemoveDisks(self, instance, target_node=source_node)
6163 # Only start the instance if it's marked as up
6164 if instance.admin_up:
6165 self.LogInfo("Starting instance %s on node %s",
6166 instance.name, target_node)
6168 disks_ok, _ = _AssembleInstanceDisks(self, instance,
6169 ignore_secondaries=True)
6171 _ShutdownInstanceDisks(self, instance)
6172 raise errors.OpExecError("Can't activate the instance's disks")
6174 result = self.rpc.call_instance_start(target_node, instance, None, None)
6175 msg = result.fail_msg
6177 _ShutdownInstanceDisks(self, instance)
6178 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6179 (instance.name, target_node, msg))
6182 class LUNodeMigrate(LogicalUnit):
6183 """Migrate all instances from a node.
6186 HPATH = "node-migrate"
6187 HTYPE = constants.HTYPE_NODE
6190 def CheckArguments(self):
6191 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
6193 def ExpandNames(self):
6194 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6196 self.needed_locks = {}
6198 # Create tasklets for migrating instances for all instances on this node
6202 self.lock_all_nodes = False
6204 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6205 logging.debug("Migrating instance %s", inst.name)
6206 names.append(inst.name)
6208 tasklets.append(TLMigrateInstance(self, inst.name, False,
6209 self.op.iallocator, None))
6211 if inst.disk_template in constants.DTS_EXT_MIRROR:
6212 # We need to lock all nodes, as the iallocator will choose the
6213 # destination nodes afterwards
6214 self.lock_all_nodes = True
6216 self.tasklets = tasklets
6218 # Declare node locks
6219 if self.lock_all_nodes:
6220 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6222 self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
6223 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6225 # Declare instance locks
6226 self.needed_locks[locking.LEVEL_INSTANCE] = names
6228 def DeclareLocks(self, level):
6229 if level == locking.LEVEL_NODE and not self.lock_all_nodes:
6230 self._LockInstancesNodes()
6232 def BuildHooksEnv(self):
6235 This runs on the master, the primary and all the secondaries.
6239 "NODE_NAME": self.op.node_name,
6242 nl = [self.cfg.GetMasterNode()]
6244 return (env, nl, nl)
6247 class TLMigrateInstance(Tasklet):
6248 """Tasklet class for instance migration.
6251 @ivar live: whether the migration will be done live or non-live;
6252 this variable is initalized only after CheckPrereq has run
6255 def __init__(self, lu, instance_name, cleanup,
6256 iallocator=None, target_node=None):
6257 """Initializes this class.
6260 Tasklet.__init__(self, lu)
6263 self.instance_name = instance_name
6264 self.cleanup = cleanup
6265 self.live = False # will be overridden later
6266 self.iallocator = iallocator
6267 self.target_node = target_node
6269 def CheckPrereq(self):
6270 """Check prerequisites.
6272 This checks that the instance is in the cluster.
6275 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6276 instance = self.cfg.GetInstanceInfo(instance_name)
6277 assert instance is not None
6278 self.instance = instance
6280 if instance.disk_template not in constants.DTS_MIRRORED:
6281 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
6282 " migrations" % instance.disk_template,
6285 if instance.disk_template in constants.DTS_EXT_MIRROR:
6286 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
6289 self._RunAllocator()
6291 # self.target_node is already populated, either directly or by the
6293 target_node = self.target_node
6295 if len(self.lu.tasklets) == 1:
6296 # It is safe to remove locks only when we're the only tasklet in the LU
6297 nodes_keep = [instance.primary_node, self.target_node]
6298 nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE]
6299 if node not in nodes_keep]
6300 self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel)
6301 self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6304 secondary_nodes = instance.secondary_nodes
6305 if not secondary_nodes:
6306 raise errors.ConfigurationError("No secondary node but using"
6307 " %s disk template" %
6308 instance.disk_template)
6309 target_node = secondary_nodes[0]
6310 if self.lu.op.iallocator or (self.lu.op.target_node and
6311 self.lu.op.target_node != target_node):
6312 raise errors.OpPrereqError("Instances with disk template %s cannot"
6313 " be migrated over to arbitrary nodes"
6314 " (neither an iallocator nor a target"
6315 " node can be passed)" %
6316 instance.disk_template, errors.ECODE_INVAL)
6318 i_be = self.cfg.GetClusterInfo().FillBE(instance)
6320 # check memory requirements on the secondary node
6321 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6322 instance.name, i_be[constants.BE_MEMORY],
6323 instance.hypervisor)
6325 # check bridge existance
6326 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6328 if not self.cleanup:
6329 _CheckNodeNotDrained(self.lu, target_node)
6330 result = self.rpc.call_instance_migratable(instance.primary_node,
6332 result.Raise("Can't migrate, please use failover",
6333 prereq=True, ecode=errors.ECODE_STATE)
6336 def _RunAllocator(self):
6337 """Run the allocator based on input opcode.
6340 ial = IAllocator(self.cfg, self.rpc,
6341 mode=constants.IALLOCATOR_MODE_RELOC,
6342 name=self.instance_name,
6343 # TODO See why hail breaks with a single node below
6344 relocate_from=[self.instance.primary_node,
6345 self.instance.primary_node],
6348 ial.Run(self.iallocator)
6351 raise errors.OpPrereqError("Can't compute nodes using"
6352 " iallocator '%s': %s" %
6353 (self.iallocator, ial.info),
6355 if len(ial.result) != ial.required_nodes:
6356 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6357 " of nodes (%s), required %s" %
6358 (self.iallocator, len(ial.result),
6359 ial.required_nodes), errors.ECODE_FAULT)
6360 self.target_node = ial.result[0]
6361 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6362 self.instance_name, self.iallocator,
6363 utils.CommaJoin(ial.result))
6365 if self.lu.op.live is not None and self.lu.op.mode is not None:
6366 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6367 " parameters are accepted",
6369 if self.lu.op.live is not None:
6371 self.lu.op.mode = constants.HT_MIGRATION_LIVE
6373 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6374 # reset the 'live' parameter to None so that repeated
6375 # invocations of CheckPrereq do not raise an exception
6376 self.lu.op.live = None
6377 elif self.lu.op.mode is None:
6378 # read the default value from the hypervisor
6379 i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False)
6380 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6382 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6384 def _WaitUntilSync(self):
6385 """Poll with custom rpc for disk sync.
6387 This uses our own step-based rpc call.
6390 self.feedback_fn("* wait until resync is done")
6394 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6396 self.instance.disks)
6398 for node, nres in result.items():
6399 nres.Raise("Cannot resync disks on node %s" % node)
6400 node_done, node_percent = nres.payload
6401 all_done = all_done and node_done
6402 if node_percent is not None:
6403 min_percent = min(min_percent, node_percent)
6405 if min_percent < 100:
6406 self.feedback_fn(" - progress: %.1f%%" % min_percent)
6409 def _EnsureSecondary(self, node):
6410 """Demote a node to secondary.
6413 self.feedback_fn("* switching node %s to secondary mode" % node)
6415 for dev in self.instance.disks:
6416 self.cfg.SetDiskID(dev, node)
6418 result = self.rpc.call_blockdev_close(node, self.instance.name,
6419 self.instance.disks)
6420 result.Raise("Cannot change disk to secondary on node %s" % node)
6422 def _GoStandalone(self):
6423 """Disconnect from the network.
6426 self.feedback_fn("* changing into standalone mode")
6427 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6428 self.instance.disks)
6429 for node, nres in result.items():
6430 nres.Raise("Cannot disconnect disks node %s" % node)
6432 def _GoReconnect(self, multimaster):
6433 """Reconnect to the network.
6439 msg = "single-master"
6440 self.feedback_fn("* changing disks into %s mode" % msg)
6441 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6442 self.instance.disks,
6443 self.instance.name, multimaster)
6444 for node, nres in result.items():
6445 nres.Raise("Cannot change disks config on node %s" % node)
6447 def _ExecCleanup(self):
6448 """Try to cleanup after a failed migration.
6450 The cleanup is done by:
6451 - check that the instance is running only on one node
6452 (and update the config if needed)
6453 - change disks on its secondary node to secondary
6454 - wait until disks are fully synchronized
6455 - disconnect from the network
6456 - change disks into single-master mode
6457 - wait again until disks are fully synchronized
6460 instance = self.instance
6461 target_node = self.target_node
6462 source_node = self.source_node
6464 # check running on only one node
6465 self.feedback_fn("* checking where the instance actually runs"
6466 " (if this hangs, the hypervisor might be in"
6468 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6469 for node, result in ins_l.items():
6470 result.Raise("Can't contact node %s" % node)
6472 runningon_source = instance.name in ins_l[source_node].payload
6473 runningon_target = instance.name in ins_l[target_node].payload
6475 if runningon_source and runningon_target:
6476 raise errors.OpExecError("Instance seems to be running on two nodes,"
6477 " or the hypervisor is confused. You will have"
6478 " to ensure manually that it runs only on one"
6479 " and restart this operation.")
6481 if not (runningon_source or runningon_target):
6482 raise errors.OpExecError("Instance does not seem to be running at all."
6483 " In this case, it's safer to repair by"
6484 " running 'gnt-instance stop' to ensure disk"
6485 " shutdown, and then restarting it.")
6487 if runningon_target:
6488 # the migration has actually succeeded, we need to update the config
6489 self.feedback_fn("* instance running on secondary node (%s),"
6490 " updating config" % target_node)
6491 instance.primary_node = target_node
6492 self.cfg.Update(instance, self.feedback_fn)
6493 demoted_node = source_node
6495 self.feedback_fn("* instance confirmed to be running on its"
6496 " primary node (%s)" % source_node)
6497 demoted_node = target_node
6499 if instance.disk_template in constants.DTS_INT_MIRROR:
6500 self._EnsureSecondary(demoted_node)
6502 self._WaitUntilSync()
6503 except errors.OpExecError:
6504 # we ignore here errors, since if the device is standalone, it
6505 # won't be able to sync
6507 self._GoStandalone()
6508 self._GoReconnect(False)
6509 self._WaitUntilSync()
6511 self.feedback_fn("* done")
6513 def _RevertDiskStatus(self):
6514 """Try to revert the disk status after a failed migration.
6517 target_node = self.target_node
6518 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
6522 self._EnsureSecondary(target_node)
6523 self._GoStandalone()
6524 self._GoReconnect(False)
6525 self._WaitUntilSync()
6526 except errors.OpExecError, err:
6527 self.lu.LogWarning("Migration failed and I can't reconnect the"
6528 " drives: error '%s'\n"
6529 "Please look and recover the instance status" %
6532 def _AbortMigration(self):
6533 """Call the hypervisor code to abort a started migration.
6536 instance = self.instance
6537 target_node = self.target_node
6538 migration_info = self.migration_info
6540 abort_result = self.rpc.call_finalize_migration(target_node,
6544 abort_msg = abort_result.fail_msg
6546 logging.error("Aborting migration failed on target node %s: %s",
6547 target_node, abort_msg)
6548 # Don't raise an exception here, as we stil have to try to revert the
6549 # disk status, even if this step failed.
6551 def _ExecMigration(self):
6552 """Migrate an instance.
6554 The migrate is done by:
6555 - change the disks into dual-master mode
6556 - wait until disks are fully synchronized again
6557 - migrate the instance
6558 - change disks on the new secondary node (the old primary) to secondary
6559 - wait until disks are fully synchronized
6560 - change disks into single-master mode
6563 instance = self.instance
6564 target_node = self.target_node
6565 source_node = self.source_node
6567 self.feedback_fn("* checking disk consistency between source and target")
6568 for dev in instance.disks:
6569 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6570 raise errors.OpExecError("Disk %s is degraded or not fully"
6571 " synchronized on target node,"
6572 " aborting migrate." % dev.iv_name)
6574 # First get the migration information from the remote node
6575 result = self.rpc.call_migration_info(source_node, instance)
6576 msg = result.fail_msg
6578 log_err = ("Failed fetching source migration information from %s: %s" %
6580 logging.error(log_err)
6581 raise errors.OpExecError(log_err)
6583 self.migration_info = migration_info = result.payload
6585 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6586 # Then switch the disks to master/master mode
6587 self._EnsureSecondary(target_node)
6588 self._GoStandalone()
6589 self._GoReconnect(True)
6590 self._WaitUntilSync()
6592 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6593 result = self.rpc.call_accept_instance(target_node,
6596 self.nodes_ip[target_node])
6598 msg = result.fail_msg
6600 logging.error("Instance pre-migration failed, trying to revert"
6601 " disk status: %s", msg)
6602 self.feedback_fn("Pre-migration failed, aborting")
6603 self._AbortMigration()
6604 self._RevertDiskStatus()
6605 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6606 (instance.name, msg))
6608 self.feedback_fn("* migrating instance to %s" % target_node)
6610 result = self.rpc.call_instance_migrate(source_node, instance,
6611 self.nodes_ip[target_node],
6613 msg = result.fail_msg
6615 logging.error("Instance migration failed, trying to revert"
6616 " disk status: %s", msg)
6617 self.feedback_fn("Migration failed, aborting")
6618 self._AbortMigration()
6619 self._RevertDiskStatus()
6620 raise errors.OpExecError("Could not migrate instance %s: %s" %
6621 (instance.name, msg))
6624 instance.primary_node = target_node
6625 # distribute new instance config to the other nodes
6626 self.cfg.Update(instance, self.feedback_fn)
6628 result = self.rpc.call_finalize_migration(target_node,
6632 msg = result.fail_msg
6634 logging.error("Instance migration succeeded, but finalization failed:"
6636 raise errors.OpExecError("Could not finalize instance migration: %s" %
6639 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6640 self._EnsureSecondary(source_node)
6641 self._WaitUntilSync()
6642 self._GoStandalone()
6643 self._GoReconnect(False)
6644 self._WaitUntilSync()
6646 self.feedback_fn("* done")
6648 def Exec(self, feedback_fn):
6649 """Perform the migration.
6652 feedback_fn("Migrating instance %s" % self.instance.name)
6654 self.feedback_fn = feedback_fn
6656 self.source_node = self.instance.primary_node
6658 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
6659 if self.instance.disk_template in constants.DTS_INT_MIRROR:
6660 self.target_node = self.instance.secondary_nodes[0]
6661 # Otherwise self.target_node has been populated either
6662 # directly, or through an iallocator.
6664 self.all_nodes = [self.source_node, self.target_node]
6666 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6667 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6671 return self._ExecCleanup()
6673 return self._ExecMigration()
6676 def _CreateBlockDev(lu, node, instance, device, force_create,
6678 """Create a tree of block devices on a given node.
6680 If this device type has to be created on secondaries, create it and
6683 If not, just recurse to children keeping the same 'force' value.
6685 @param lu: the lu on whose behalf we execute
6686 @param node: the node on which to create the device
6687 @type instance: L{objects.Instance}
6688 @param instance: the instance which owns the device
6689 @type device: L{objects.Disk}
6690 @param device: the device to create
6691 @type force_create: boolean
6692 @param force_create: whether to force creation of this device; this
6693 will be change to True whenever we find a device which has
6694 CreateOnSecondary() attribute
6695 @param info: the extra 'metadata' we should attach to the device
6696 (this will be represented as a LVM tag)
6697 @type force_open: boolean
6698 @param force_open: this parameter will be passes to the
6699 L{backend.BlockdevCreate} function where it specifies
6700 whether we run on primary or not, and it affects both
6701 the child assembly and the device own Open() execution
6704 if device.CreateOnSecondary():
6708 for child in device.children:
6709 _CreateBlockDev(lu, node, instance, child, force_create,
6712 if not force_create:
6715 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6718 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6719 """Create a single block device on a given node.
6721 This will not recurse over children of the device, so they must be
6724 @param lu: the lu on whose behalf we execute
6725 @param node: the node on which to create the device
6726 @type instance: L{objects.Instance}
6727 @param instance: the instance which owns the device
6728 @type device: L{objects.Disk}
6729 @param device: the device to create
6730 @param info: the extra 'metadata' we should attach to the device
6731 (this will be represented as a LVM tag)
6732 @type force_open: boolean
6733 @param force_open: this parameter will be passes to the
6734 L{backend.BlockdevCreate} function where it specifies
6735 whether we run on primary or not, and it affects both
6736 the child assembly and the device own Open() execution
6739 lu.cfg.SetDiskID(device, node)
6740 result = lu.rpc.call_blockdev_create(node, device, device.size,
6741 instance.name, force_open, info)
6742 result.Raise("Can't create block device %s on"
6743 " node %s for instance %s" % (device, node, instance.name))
6744 if device.physical_id is None:
6745 device.physical_id = result.payload
6748 def _GenerateUniqueNames(lu, exts):
6749 """Generate a suitable LV name.
6751 This will generate a logical volume name for the given instance.
6756 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6757 results.append("%s%s" % (new_id, val))
6761 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6763 """Generate a drbd8 device complete with its children.
6766 port = lu.cfg.AllocatePort()
6767 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6768 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6769 logical_id=(vgname, names[0]))
6770 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6771 logical_id=(vgname, names[1]))
6772 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6773 logical_id=(primary, secondary, port,
6776 children=[dev_data, dev_meta],
6781 def _GenerateDiskTemplate(lu, template_name,
6782 instance_name, primary_node,
6783 secondary_nodes, disk_info,
6784 file_storage_dir, file_driver,
6785 base_index, feedback_fn):
6786 """Generate the entire disk layout for a given template type.
6789 #TODO: compute space requirements
6791 vgname = lu.cfg.GetVGName()
6792 disk_count = len(disk_info)
6794 if template_name == constants.DT_DISKLESS:
6796 elif template_name == constants.DT_PLAIN:
6797 if len(secondary_nodes) != 0:
6798 raise errors.ProgrammerError("Wrong template configuration")
6800 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6801 for i in range(disk_count)])
6802 for idx, disk in enumerate(disk_info):
6803 disk_index = idx + base_index
6804 vg = disk.get("vg", vgname)
6805 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6806 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6807 logical_id=(vg, names[idx]),
6808 iv_name="disk/%d" % disk_index,
6810 disks.append(disk_dev)
6811 elif template_name == constants.DT_DRBD8:
6812 if len(secondary_nodes) != 1:
6813 raise errors.ProgrammerError("Wrong template configuration")
6814 remote_node = secondary_nodes[0]
6815 minors = lu.cfg.AllocateDRBDMinor(
6816 [primary_node, remote_node] * len(disk_info), instance_name)
6819 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6820 for i in range(disk_count)]):
6821 names.append(lv_prefix + "_data")
6822 names.append(lv_prefix + "_meta")
6823 for idx, disk in enumerate(disk_info):
6824 disk_index = idx + base_index
6825 vg = disk.get("vg", vgname)
6826 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6827 disk["size"], vg, names[idx*2:idx*2+2],
6828 "disk/%d" % disk_index,
6829 minors[idx*2], minors[idx*2+1])
6830 disk_dev.mode = disk["mode"]
6831 disks.append(disk_dev)
6832 elif template_name == constants.DT_FILE:
6833 if len(secondary_nodes) != 0:
6834 raise errors.ProgrammerError("Wrong template configuration")
6836 opcodes.RequireFileStorage()
6838 for idx, disk in enumerate(disk_info):
6839 disk_index = idx + base_index
6840 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6841 iv_name="disk/%d" % disk_index,
6842 logical_id=(file_driver,
6843 "%s/disk%d" % (file_storage_dir,
6846 disks.append(disk_dev)
6847 elif template_name == constants.DT_SHARED_FILE:
6848 if len(secondary_nodes) != 0:
6849 raise errors.ProgrammerError("Wrong template configuration")
6851 opcodes.RequireSharedFileStorage()
6853 for idx, disk in enumerate(disk_info):
6854 disk_index = idx + base_index
6855 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6856 iv_name="disk/%d" % disk_index,
6857 logical_id=(file_driver,
6858 "%s/disk%d" % (file_storage_dir,
6861 disks.append(disk_dev)
6862 elif template_name == constants.DT_BLOCK:
6863 if len(secondary_nodes) != 0:
6864 raise errors.ProgrammerError("Wrong template configuration")
6866 for idx, disk in enumerate(disk_info):
6867 disk_index = idx + base_index
6868 disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"],
6869 logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
6871 iv_name="disk/%d" % disk_index,
6873 disks.append(disk_dev)
6876 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6880 def _GetInstanceInfoText(instance):
6881 """Compute that text that should be added to the disk's metadata.
6884 return "originstname+%s" % instance.name
6887 def _CalcEta(time_taken, written, total_size):
6888 """Calculates the ETA based on size written and total size.
6890 @param time_taken: The time taken so far
6891 @param written: amount written so far
6892 @param total_size: The total size of data to be written
6893 @return: The remaining time in seconds
6896 avg_time = time_taken / float(written)
6897 return (total_size - written) * avg_time
6900 def _WipeDisks(lu, instance):
6901 """Wipes instance disks.
6903 @type lu: L{LogicalUnit}
6904 @param lu: the logical unit on whose behalf we execute
6905 @type instance: L{objects.Instance}
6906 @param instance: the instance whose disks we should create
6907 @return: the success of the wipe
6910 node = instance.primary_node
6912 for device in instance.disks:
6913 lu.cfg.SetDiskID(device, node)
6915 logging.info("Pause sync of instance %s disks", instance.name)
6916 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6918 for idx, success in enumerate(result.payload):
6920 logging.warn("pause-sync of instance %s for disks %d failed",
6924 for idx, device in enumerate(instance.disks):
6925 lu.LogInfo("* Wiping disk %d", idx)
6926 logging.info("Wiping disk %d for instance %s, node %s",
6927 idx, instance.name, node)
6929 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6930 # MAX_WIPE_CHUNK at max
6931 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6932 constants.MIN_WIPE_CHUNK_PERCENT)
6937 start_time = time.time()
6939 while offset < size:
6940 wipe_size = min(wipe_chunk_size, size - offset)
6941 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6942 result.Raise("Could not wipe disk %d at offset %d for size %d" %
6943 (idx, offset, wipe_size))
6946 if now - last_output >= 60:
6947 eta = _CalcEta(now - start_time, offset, size)
6948 lu.LogInfo(" - done: %.1f%% ETA: %s" %
6949 (offset / float(size) * 100, utils.FormatSeconds(eta)))
6952 logging.info("Resume sync of instance %s disks", instance.name)
6954 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6956 for idx, success in enumerate(result.payload):
6958 lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6959 " look at the status and troubleshoot the issue.", idx)
6960 logging.warn("resume-sync of instance %s for disks %d failed",
6964 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6965 """Create all disks for an instance.
6967 This abstracts away some work from AddInstance.
6969 @type lu: L{LogicalUnit}
6970 @param lu: the logical unit on whose behalf we execute
6971 @type instance: L{objects.Instance}
6972 @param instance: the instance whose disks we should create
6974 @param to_skip: list of indices to skip
6975 @type target_node: string
6976 @param target_node: if passed, overrides the target node for creation
6978 @return: the success of the creation
6981 info = _GetInstanceInfoText(instance)
6982 if target_node is None:
6983 pnode = instance.primary_node
6984 all_nodes = instance.all_nodes
6989 if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
6990 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6991 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6993 result.Raise("Failed to create directory '%s' on"
6994 " node %s" % (file_storage_dir, pnode))
6996 # Note: this needs to be kept in sync with adding of disks in
6997 # LUInstanceSetParams
6998 for idx, device in enumerate(instance.disks):
6999 if to_skip and idx in to_skip:
7001 logging.info("Creating volume %s for instance %s",
7002 device.iv_name, instance.name)
7004 for node in all_nodes:
7005 f_create = node == pnode
7006 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7009 def _RemoveDisks(lu, instance, target_node=None):
7010 """Remove all disks for an instance.
7012 This abstracts away some work from `AddInstance()` and
7013 `RemoveInstance()`. Note that in case some of the devices couldn't
7014 be removed, the removal will continue with the other ones (compare
7015 with `_CreateDisks()`).
7017 @type lu: L{LogicalUnit}
7018 @param lu: the logical unit on whose behalf we execute
7019 @type instance: L{objects.Instance}
7020 @param instance: the instance whose disks we should remove
7021 @type target_node: string
7022 @param target_node: used to override the node on which to remove the disks
7024 @return: the success of the removal
7027 logging.info("Removing block devices for instance %s", instance.name)
7030 for device in instance.disks:
7032 edata = [(target_node, device)]
7034 edata = device.ComputeNodeTree(instance.primary_node)
7035 for node, disk in edata:
7036 lu.cfg.SetDiskID(disk, node)
7037 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7039 lu.LogWarning("Could not remove block device %s on node %s,"
7040 " continuing anyway: %s", device.iv_name, node, msg)
7043 if instance.disk_template == constants.DT_FILE:
7044 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7048 tgt = instance.primary_node
7049 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7051 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7052 file_storage_dir, instance.primary_node, result.fail_msg)
7058 def _ComputeDiskSizePerVG(disk_template, disks):
7059 """Compute disk size requirements in the volume group
7062 def _compute(disks, payload):
7063 """Universal algorithm
7068 vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
7072 # Required free disk space as a function of disk and swap space
7074 constants.DT_DISKLESS: {},
7075 constants.DT_PLAIN: _compute(disks, 0),
7076 # 128 MB are added for drbd metadata for each disk
7077 constants.DT_DRBD8: _compute(disks, 128),
7078 constants.DT_FILE: {},
7079 constants.DT_SHARED_FILE: {},
7082 if disk_template not in req_size_dict:
7083 raise errors.ProgrammerError("Disk template '%s' size requirement"
7084 " is unknown" % disk_template)
7086 return req_size_dict[disk_template]
7089 def _ComputeDiskSize(disk_template, disks):
7090 """Compute disk size requirements in the volume group
7093 # Required free disk space as a function of disk and swap space
7095 constants.DT_DISKLESS: None,
7096 constants.DT_PLAIN: sum(d["size"] for d in disks),
7097 # 128 MB are added for drbd metadata for each disk
7098 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
7099 constants.DT_FILE: None,
7100 constants.DT_SHARED_FILE: 0,
7101 constants.DT_BLOCK: 0,
7104 if disk_template not in req_size_dict:
7105 raise errors.ProgrammerError("Disk template '%s' size requirement"
7106 " is unknown" % disk_template)
7108 return req_size_dict[disk_template]
7111 def _FilterVmNodes(lu, nodenames):
7112 """Filters out non-vm_capable nodes from a list.
7114 @type lu: L{LogicalUnit}
7115 @param lu: the logical unit for which we check
7116 @type nodenames: list
7117 @param nodenames: the list of nodes on which we should check
7119 @return: the list of vm-capable nodes
7122 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
7123 return [name for name in nodenames if name not in vm_nodes]
7126 def _CheckHVParams(lu, nodenames, hvname, hvparams):
7127 """Hypervisor parameter validation.
7129 This function abstract the hypervisor parameter validation to be
7130 used in both instance create and instance modify.
7132 @type lu: L{LogicalUnit}
7133 @param lu: the logical unit for which we check
7134 @type nodenames: list
7135 @param nodenames: the list of nodes on which we should check
7136 @type hvname: string
7137 @param hvname: the name of the hypervisor we should use
7138 @type hvparams: dict
7139 @param hvparams: the parameters which we need to check
7140 @raise errors.OpPrereqError: if the parameters are not valid
7143 nodenames = _FilterVmNodes(lu, nodenames)
7144 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
7147 for node in nodenames:
7151 info.Raise("Hypervisor parameter validation failed on node %s" % node)
7154 def _CheckOSParams(lu, required, nodenames, osname, osparams):
7155 """OS parameters validation.
7157 @type lu: L{LogicalUnit}
7158 @param lu: the logical unit for which we check
7159 @type required: boolean
7160 @param required: whether the validation should fail if the OS is not
7162 @type nodenames: list
7163 @param nodenames: the list of nodes on which we should check
7164 @type osname: string
7165 @param osname: the name of the hypervisor we should use
7166 @type osparams: dict
7167 @param osparams: the parameters which we need to check
7168 @raise errors.OpPrereqError: if the parameters are not valid
7171 nodenames = _FilterVmNodes(lu, nodenames)
7172 result = lu.rpc.call_os_validate(required, nodenames, osname,
7173 [constants.OS_VALIDATE_PARAMETERS],
7175 for node, nres in result.items():
7176 # we don't check for offline cases since this should be run only
7177 # against the master node and/or an instance's nodes
7178 nres.Raise("OS Parameters validation failed on node %s" % node)
7179 if not nres.payload:
7180 lu.LogInfo("OS %s not found on node %s, validation skipped",
7184 class LUInstanceCreate(LogicalUnit):
7185 """Create an instance.
7188 HPATH = "instance-add"
7189 HTYPE = constants.HTYPE_INSTANCE
7192 def CheckArguments(self):
7196 # do not require name_check to ease forward/backward compatibility
7198 if self.op.no_install and self.op.start:
7199 self.LogInfo("No-installation mode selected, disabling startup")
7200 self.op.start = False
7201 # validate/normalize the instance name
7202 self.op.instance_name = \
7203 netutils.Hostname.GetNormalizedName(self.op.instance_name)
7205 if self.op.ip_check and not self.op.name_check:
7206 # TODO: make the ip check more flexible and not depend on the name check
7207 raise errors.OpPrereqError("Cannot do ip check without a name check",
7210 # check nics' parameter names
7211 for nic in self.op.nics:
7212 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7214 # check disks. parameter names and consistent adopt/no-adopt strategy
7215 has_adopt = has_no_adopt = False
7216 for disk in self.op.disks:
7217 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7222 if has_adopt and has_no_adopt:
7223 raise errors.OpPrereqError("Either all disks are adopted or none is",
7226 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7227 raise errors.OpPrereqError("Disk adoption is not supported for the"
7228 " '%s' disk template" %
7229 self.op.disk_template,
7231 if self.op.iallocator is not None:
7232 raise errors.OpPrereqError("Disk adoption not allowed with an"
7233 " iallocator script", errors.ECODE_INVAL)
7234 if self.op.mode == constants.INSTANCE_IMPORT:
7235 raise errors.OpPrereqError("Disk adoption not allowed for"
7236 " instance import", errors.ECODE_INVAL)
7238 if self.op.disk_template in constants.DTS_MUST_ADOPT:
7239 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
7240 " but no 'adopt' parameter given" %
7241 self.op.disk_template,
7244 self.adopt_disks = has_adopt
7246 # instance name verification
7247 if self.op.name_check:
7248 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7249 self.op.instance_name = self.hostname1.name
7250 # used in CheckPrereq for ip ping check
7251 self.check_ip = self.hostname1.ip
7253 self.check_ip = None
7255 # file storage checks
7256 if (self.op.file_driver and
7257 not self.op.file_driver in constants.FILE_DRIVER):
7258 raise errors.OpPrereqError("Invalid file driver name '%s'" %
7259 self.op.file_driver, errors.ECODE_INVAL)
7261 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7262 raise errors.OpPrereqError("File storage directory path not absolute",
7265 ### Node/iallocator related checks
7266 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7268 if self.op.pnode is not None:
7269 if self.op.disk_template in constants.DTS_INT_MIRROR:
7270 if self.op.snode is None:
7271 raise errors.OpPrereqError("The networked disk templates need"
7272 " a mirror node", errors.ECODE_INVAL)
7274 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7276 self.op.snode = None
7278 self._cds = _GetClusterDomainSecret()
7280 if self.op.mode == constants.INSTANCE_IMPORT:
7281 # On import force_variant must be True, because if we forced it at
7282 # initial install, our only chance when importing it back is that it
7284 self.op.force_variant = True
7286 if self.op.no_install:
7287 self.LogInfo("No-installation mode has no effect during import")
7289 elif self.op.mode == constants.INSTANCE_CREATE:
7290 if self.op.os_type is None:
7291 raise errors.OpPrereqError("No guest OS specified",
7293 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7294 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7295 " installation" % self.op.os_type,
7297 if self.op.disk_template is None:
7298 raise errors.OpPrereqError("No disk template specified",
7301 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7302 # Check handshake to ensure both clusters have the same domain secret
7303 src_handshake = self.op.source_handshake
7304 if not src_handshake:
7305 raise errors.OpPrereqError("Missing source handshake",
7308 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7311 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7314 # Load and check source CA
7315 self.source_x509_ca_pem = self.op.source_x509_ca
7316 if not self.source_x509_ca_pem:
7317 raise errors.OpPrereqError("Missing source X509 CA",
7321 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7323 except OpenSSL.crypto.Error, err:
7324 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7325 (err, ), errors.ECODE_INVAL)
7327 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7328 if errcode is not None:
7329 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7332 self.source_x509_ca = cert
7334 src_instance_name = self.op.source_instance_name
7335 if not src_instance_name:
7336 raise errors.OpPrereqError("Missing source instance name",
7339 self.source_instance_name = \
7340 netutils.GetHostname(name=src_instance_name).name
7343 raise errors.OpPrereqError("Invalid instance creation mode %r" %
7344 self.op.mode, errors.ECODE_INVAL)
7346 def ExpandNames(self):
7347 """ExpandNames for CreateInstance.
7349 Figure out the right locks for instance creation.
7352 self.needed_locks = {}
7354 instance_name = self.op.instance_name
7355 # this is just a preventive check, but someone might still add this
7356 # instance in the meantime, and creation will fail at lock-add time
7357 if instance_name in self.cfg.GetInstanceList():
7358 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7359 instance_name, errors.ECODE_EXISTS)
7361 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7363 if self.op.iallocator:
7364 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7366 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7367 nodelist = [self.op.pnode]
7368 if self.op.snode is not None:
7369 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7370 nodelist.append(self.op.snode)
7371 self.needed_locks[locking.LEVEL_NODE] = nodelist
7373 # in case of import lock the source node too
7374 if self.op.mode == constants.INSTANCE_IMPORT:
7375 src_node = self.op.src_node
7376 src_path = self.op.src_path
7378 if src_path is None:
7379 self.op.src_path = src_path = self.op.instance_name
7381 if src_node is None:
7382 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7383 self.op.src_node = None
7384 if os.path.isabs(src_path):
7385 raise errors.OpPrereqError("Importing an instance from an absolute"
7386 " path requires a source node option.",
7389 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7390 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7391 self.needed_locks[locking.LEVEL_NODE].append(src_node)
7392 if not os.path.isabs(src_path):
7393 self.op.src_path = src_path = \
7394 utils.PathJoin(constants.EXPORT_DIR, src_path)
7396 def _RunAllocator(self):
7397 """Run the allocator based on input opcode.
7400 nics = [n.ToDict() for n in self.nics]
7401 ial = IAllocator(self.cfg, self.rpc,
7402 mode=constants.IALLOCATOR_MODE_ALLOC,
7403 name=self.op.instance_name,
7404 disk_template=self.op.disk_template,
7407 vcpus=self.be_full[constants.BE_VCPUS],
7408 mem_size=self.be_full[constants.BE_MEMORY],
7411 hypervisor=self.op.hypervisor,
7414 ial.Run(self.op.iallocator)
7417 raise errors.OpPrereqError("Can't compute nodes using"
7418 " iallocator '%s': %s" %
7419 (self.op.iallocator, ial.info),
7421 if len(ial.result) != ial.required_nodes:
7422 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7423 " of nodes (%s), required %s" %
7424 (self.op.iallocator, len(ial.result),
7425 ial.required_nodes), errors.ECODE_FAULT)
7426 self.op.pnode = ial.result[0]
7427 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7428 self.op.instance_name, self.op.iallocator,
7429 utils.CommaJoin(ial.result))
7430 if ial.required_nodes == 2:
7431 self.op.snode = ial.result[1]
7433 def BuildHooksEnv(self):
7436 This runs on master, primary and secondary nodes of the instance.
7440 "ADD_MODE": self.op.mode,
7442 if self.op.mode == constants.INSTANCE_IMPORT:
7443 env["SRC_NODE"] = self.op.src_node
7444 env["SRC_PATH"] = self.op.src_path
7445 env["SRC_IMAGES"] = self.src_images
7447 env.update(_BuildInstanceHookEnv(
7448 name=self.op.instance_name,
7449 primary_node=self.op.pnode,
7450 secondary_nodes=self.secondaries,
7451 status=self.op.start,
7452 os_type=self.op.os_type,
7453 memory=self.be_full[constants.BE_MEMORY],
7454 vcpus=self.be_full[constants.BE_VCPUS],
7455 nics=_NICListToTuple(self, self.nics),
7456 disk_template=self.op.disk_template,
7457 disks=[(d["size"], d["mode"]) for d in self.disks],
7460 hypervisor_name=self.op.hypervisor,
7463 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7467 def _ReadExportInfo(self):
7468 """Reads the export information from disk.
7470 It will override the opcode source node and path with the actual
7471 information, if these two were not specified before.
7473 @return: the export information
7476 assert self.op.mode == constants.INSTANCE_IMPORT
7478 src_node = self.op.src_node
7479 src_path = self.op.src_path
7481 if src_node is None:
7482 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7483 exp_list = self.rpc.call_export_list(locked_nodes)
7485 for node in exp_list:
7486 if exp_list[node].fail_msg:
7488 if src_path in exp_list[node].payload:
7490 self.op.src_node = src_node = node
7491 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7495 raise errors.OpPrereqError("No export found for relative path %s" %
7496 src_path, errors.ECODE_INVAL)
7498 _CheckNodeOnline(self, src_node)
7499 result = self.rpc.call_export_info(src_node, src_path)
7500 result.Raise("No export or invalid export found in dir %s" % src_path)
7502 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7503 if not export_info.has_section(constants.INISECT_EXP):
7504 raise errors.ProgrammerError("Corrupted export config",
7505 errors.ECODE_ENVIRON)
7507 ei_version = export_info.get(constants.INISECT_EXP, "version")
7508 if (int(ei_version) != constants.EXPORT_VERSION):
7509 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7510 (ei_version, constants.EXPORT_VERSION),
7511 errors.ECODE_ENVIRON)
7514 def _ReadExportParams(self, einfo):
7515 """Use export parameters as defaults.
7517 In case the opcode doesn't specify (as in override) some instance
7518 parameters, then try to use them from the export information, if
7522 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7524 if self.op.disk_template is None:
7525 if einfo.has_option(constants.INISECT_INS, "disk_template"):
7526 self.op.disk_template = einfo.get(constants.INISECT_INS,
7529 raise errors.OpPrereqError("No disk template specified and the export"
7530 " is missing the disk_template information",
7533 if not self.op.disks:
7534 if einfo.has_option(constants.INISECT_INS, "disk_count"):
7536 # TODO: import the disk iv_name too
7537 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7538 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7539 disks.append({"size": disk_sz})
7540 self.op.disks = disks
7542 raise errors.OpPrereqError("No disk info specified and the export"
7543 " is missing the disk information",
7546 if (not self.op.nics and
7547 einfo.has_option(constants.INISECT_INS, "nic_count")):
7549 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7551 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7552 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7557 if (self.op.hypervisor is None and
7558 einfo.has_option(constants.INISECT_INS, "hypervisor")):
7559 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7560 if einfo.has_section(constants.INISECT_HYP):
7561 # use the export parameters but do not override the ones
7562 # specified by the user
7563 for name, value in einfo.items(constants.INISECT_HYP):
7564 if name not in self.op.hvparams:
7565 self.op.hvparams[name] = value
7567 if einfo.has_section(constants.INISECT_BEP):
7568 # use the parameters, without overriding
7569 for name, value in einfo.items(constants.INISECT_BEP):
7570 if name not in self.op.beparams:
7571 self.op.beparams[name] = value
7573 # try to read the parameters old style, from the main section
7574 for name in constants.BES_PARAMETERS:
7575 if (name not in self.op.beparams and
7576 einfo.has_option(constants.INISECT_INS, name)):
7577 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7579 if einfo.has_section(constants.INISECT_OSP):
7580 # use the parameters, without overriding
7581 for name, value in einfo.items(constants.INISECT_OSP):
7582 if name not in self.op.osparams:
7583 self.op.osparams[name] = value
7585 def _RevertToDefaults(self, cluster):
7586 """Revert the instance parameters to the default values.
7590 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7591 for name in self.op.hvparams.keys():
7592 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7593 del self.op.hvparams[name]
7595 be_defs = cluster.SimpleFillBE({})
7596 for name in self.op.beparams.keys():
7597 if name in be_defs and be_defs[name] == self.op.beparams[name]:
7598 del self.op.beparams[name]
7600 nic_defs = cluster.SimpleFillNIC({})
7601 for nic in self.op.nics:
7602 for name in constants.NICS_PARAMETERS:
7603 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7606 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7607 for name in self.op.osparams.keys():
7608 if name in os_defs and os_defs[name] == self.op.osparams[name]:
7609 del self.op.osparams[name]
7611 def CheckPrereq(self):
7612 """Check prerequisites.
7615 if self.op.mode == constants.INSTANCE_IMPORT:
7616 export_info = self._ReadExportInfo()
7617 self._ReadExportParams(export_info)
7619 if (not self.cfg.GetVGName() and
7620 self.op.disk_template not in constants.DTS_NOT_LVM):
7621 raise errors.OpPrereqError("Cluster does not support lvm-based"
7622 " instances", errors.ECODE_STATE)
7624 if self.op.hypervisor is None:
7625 self.op.hypervisor = self.cfg.GetHypervisorType()
7627 cluster = self.cfg.GetClusterInfo()
7628 enabled_hvs = cluster.enabled_hypervisors
7629 if self.op.hypervisor not in enabled_hvs:
7630 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7631 " cluster (%s)" % (self.op.hypervisor,
7632 ",".join(enabled_hvs)),
7635 # check hypervisor parameter syntax (locally)
7636 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7637 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7639 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7640 hv_type.CheckParameterSyntax(filled_hvp)
7641 self.hv_full = filled_hvp
7642 # check that we don't specify global parameters on an instance
7643 _CheckGlobalHvParams(self.op.hvparams)
7645 # fill and remember the beparams dict
7646 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7647 self.be_full = cluster.SimpleFillBE(self.op.beparams)
7649 # build os parameters
7650 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7652 # now that hvp/bep are in final format, let's reset to defaults,
7654 if self.op.identify_defaults:
7655 self._RevertToDefaults(cluster)
7659 for idx, nic in enumerate(self.op.nics):
7660 nic_mode_req = nic.get("mode", None)
7661 nic_mode = nic_mode_req
7662 if nic_mode is None:
7663 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7665 # in routed mode, for the first nic, the default ip is 'auto'
7666 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7667 default_ip_mode = constants.VALUE_AUTO
7669 default_ip_mode = constants.VALUE_NONE
7671 # ip validity checks
7672 ip = nic.get("ip", default_ip_mode)
7673 if ip is None or ip.lower() == constants.VALUE_NONE:
7675 elif ip.lower() == constants.VALUE_AUTO:
7676 if not self.op.name_check:
7677 raise errors.OpPrereqError("IP address set to auto but name checks"
7678 " have been skipped",
7680 nic_ip = self.hostname1.ip
7682 if not netutils.IPAddress.IsValid(ip):
7683 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7687 # TODO: check the ip address for uniqueness
7688 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7689 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7692 # MAC address verification
7693 mac = nic.get("mac", constants.VALUE_AUTO)
7694 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7695 mac = utils.NormalizeAndValidateMac(mac)
7698 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7699 except errors.ReservationError:
7700 raise errors.OpPrereqError("MAC address %s already in use"
7701 " in cluster" % mac,
7702 errors.ECODE_NOTUNIQUE)
7704 # Build nic parameters
7705 link = nic.get(constants.INIC_LINK, None)
7708 nicparams[constants.NIC_MODE] = nic_mode_req
7710 nicparams[constants.NIC_LINK] = link
7712 check_params = cluster.SimpleFillNIC(nicparams)
7713 objects.NIC.CheckParameterSyntax(check_params)
7714 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7716 # disk checks/pre-build
7718 for disk in self.op.disks:
7719 mode = disk.get("mode", constants.DISK_RDWR)
7720 if mode not in constants.DISK_ACCESS_SET:
7721 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7722 mode, errors.ECODE_INVAL)
7723 size = disk.get("size", None)
7725 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7728 except (TypeError, ValueError):
7729 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7731 vg = disk.get("vg", self.cfg.GetVGName())
7732 new_disk = {"size": size, "mode": mode, "vg": vg}
7734 new_disk["adopt"] = disk["adopt"]
7735 self.disks.append(new_disk)
7737 if self.op.mode == constants.INSTANCE_IMPORT:
7739 # Check that the new instance doesn't have less disks than the export
7740 instance_disks = len(self.disks)
7741 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7742 if instance_disks < export_disks:
7743 raise errors.OpPrereqError("Not enough disks to import."
7744 " (instance: %d, export: %d)" %
7745 (instance_disks, export_disks),
7749 for idx in range(export_disks):
7750 option = 'disk%d_dump' % idx
7751 if export_info.has_option(constants.INISECT_INS, option):
7752 # FIXME: are the old os-es, disk sizes, etc. useful?
7753 export_name = export_info.get(constants.INISECT_INS, option)
7754 image = utils.PathJoin(self.op.src_path, export_name)
7755 disk_images.append(image)
7757 disk_images.append(False)
7759 self.src_images = disk_images
7761 old_name = export_info.get(constants.INISECT_INS, 'name')
7763 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7764 except (TypeError, ValueError), err:
7765 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7766 " an integer: %s" % str(err),
7768 if self.op.instance_name == old_name:
7769 for idx, nic in enumerate(self.nics):
7770 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7771 nic_mac_ini = 'nic%d_mac' % idx
7772 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7774 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7776 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7777 if self.op.ip_check:
7778 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7779 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7780 (self.check_ip, self.op.instance_name),
7781 errors.ECODE_NOTUNIQUE)
7783 #### mac address generation
7784 # By generating here the mac address both the allocator and the hooks get
7785 # the real final mac address rather than the 'auto' or 'generate' value.
7786 # There is a race condition between the generation and the instance object
7787 # creation, which means that we know the mac is valid now, but we're not
7788 # sure it will be when we actually add the instance. If things go bad
7789 # adding the instance will abort because of a duplicate mac, and the
7790 # creation job will fail.
7791 for nic in self.nics:
7792 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7793 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7797 if self.op.iallocator is not None:
7798 self._RunAllocator()
7800 #### node related checks
7802 # check primary node
7803 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7804 assert self.pnode is not None, \
7805 "Cannot retrieve locked node %s" % self.op.pnode
7807 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7808 pnode.name, errors.ECODE_STATE)
7810 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7811 pnode.name, errors.ECODE_STATE)
7812 if not pnode.vm_capable:
7813 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7814 " '%s'" % pnode.name, errors.ECODE_STATE)
7816 self.secondaries = []
7818 # mirror node verification
7819 if self.op.disk_template in constants.DTS_INT_MIRROR:
7820 if self.op.snode == pnode.name:
7821 raise errors.OpPrereqError("The secondary node cannot be the"
7822 " primary node.", errors.ECODE_INVAL)
7823 _CheckNodeOnline(self, self.op.snode)
7824 _CheckNodeNotDrained(self, self.op.snode)
7825 _CheckNodeVmCapable(self, self.op.snode)
7826 self.secondaries.append(self.op.snode)
7828 nodenames = [pnode.name] + self.secondaries
7830 if not self.adopt_disks:
7831 # Check lv size requirements, if not adopting
7832 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7833 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7835 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
7836 all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7837 if len(all_lvs) != len(self.disks):
7838 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7840 for lv_name in all_lvs:
7842 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7843 # to ReserveLV uses the same syntax
7844 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7845 except errors.ReservationError:
7846 raise errors.OpPrereqError("LV named %s used by another instance" %
7847 lv_name, errors.ECODE_NOTUNIQUE)
7849 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7850 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7852 node_lvs = self.rpc.call_lv_list([pnode.name],
7853 vg_names.payload.keys())[pnode.name]
7854 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7855 node_lvs = node_lvs.payload
7857 delta = all_lvs.difference(node_lvs.keys())
7859 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7860 utils.CommaJoin(delta),
7862 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7864 raise errors.OpPrereqError("Online logical volumes found, cannot"
7865 " adopt: %s" % utils.CommaJoin(online_lvs),
7867 # update the size of disk based on what is found
7868 for dsk in self.disks:
7869 dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7871 elif self.op.disk_template == constants.DT_BLOCK:
7872 # Normalize and de-duplicate device paths
7873 all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks])
7874 if len(all_disks) != len(self.disks):
7875 raise errors.OpPrereqError("Duplicate disk names given for adoption",
7877 baddisks = [d for d in all_disks
7878 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
7880 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
7881 " cannot be adopted" %
7882 (", ".join(baddisks),
7883 constants.ADOPTABLE_BLOCKDEV_ROOT),
7886 node_disks = self.rpc.call_bdev_sizes([pnode.name],
7887 list(all_disks))[pnode.name]
7888 node_disks.Raise("Cannot get block device information from node %s" %
7890 node_disks = node_disks.payload
7891 delta = all_disks.difference(node_disks.keys())
7893 raise errors.OpPrereqError("Missing block device(s): %s" %
7894 utils.CommaJoin(delta),
7896 for dsk in self.disks:
7897 dsk["size"] = int(float(node_disks[dsk["adopt"]]))
7899 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7901 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7902 # check OS parameters (remotely)
7903 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7905 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7907 # memory check on primary node
7909 _CheckNodeFreeMemory(self, self.pnode.name,
7910 "creating instance %s" % self.op.instance_name,
7911 self.be_full[constants.BE_MEMORY],
7914 self.dry_run_result = list(nodenames)
7916 def Exec(self, feedback_fn):
7917 """Create and add the instance to the cluster.
7920 instance = self.op.instance_name
7921 pnode_name = self.pnode.name
7923 ht_kind = self.op.hypervisor
7924 if ht_kind in constants.HTS_REQ_PORT:
7925 network_port = self.cfg.AllocatePort()
7929 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
7930 # this is needed because os.path.join does not accept None arguments
7931 if self.op.file_storage_dir is None:
7932 string_file_storage_dir = ""
7934 string_file_storage_dir = self.op.file_storage_dir
7936 # build the full file storage dir path
7937 if self.op.disk_template == constants.DT_SHARED_FILE:
7938 get_fsd_fn = self.cfg.GetSharedFileStorageDir
7940 get_fsd_fn = self.cfg.GetFileStorageDir
7942 file_storage_dir = utils.PathJoin(get_fsd_fn(),
7943 string_file_storage_dir, instance)
7945 file_storage_dir = ""
7947 disks = _GenerateDiskTemplate(self,
7948 self.op.disk_template,
7949 instance, pnode_name,
7953 self.op.file_driver,
7957 iobj = objects.Instance(name=instance, os=self.op.os_type,
7958 primary_node=pnode_name,
7959 nics=self.nics, disks=disks,
7960 disk_template=self.op.disk_template,
7962 network_port=network_port,
7963 beparams=self.op.beparams,
7964 hvparams=self.op.hvparams,
7965 hypervisor=self.op.hypervisor,
7966 osparams=self.op.osparams,
7969 if self.adopt_disks:
7970 if self.op.disk_template == constants.DT_PLAIN:
7971 # rename LVs to the newly-generated names; we need to construct
7972 # 'fake' LV disks with the old data, plus the new unique_id
7973 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7975 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7976 rename_to.append(t_dsk.logical_id)
7977 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7978 self.cfg.SetDiskID(t_dsk, pnode_name)
7979 result = self.rpc.call_blockdev_rename(pnode_name,
7980 zip(tmp_disks, rename_to))
7981 result.Raise("Failed to rename adoped LVs")
7983 feedback_fn("* creating instance disks...")
7985 _CreateDisks(self, iobj)
7986 except errors.OpExecError:
7987 self.LogWarning("Device creation failed, reverting...")
7989 _RemoveDisks(self, iobj)
7991 self.cfg.ReleaseDRBDMinors(instance)
7994 if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7995 feedback_fn("* wiping instance disks...")
7997 _WipeDisks(self, iobj)
7998 except errors.OpExecError:
7999 self.LogWarning("Device wiping failed, reverting...")
8001 _RemoveDisks(self, iobj)
8003 self.cfg.ReleaseDRBDMinors(instance)
8006 feedback_fn("adding instance %s to cluster config" % instance)
8008 self.cfg.AddInstance(iobj, self.proc.GetECId())
8010 # Declare that we don't want to remove the instance lock anymore, as we've
8011 # added the instance to the config
8012 del self.remove_locks[locking.LEVEL_INSTANCE]
8013 # Unlock all the nodes
8014 if self.op.mode == constants.INSTANCE_IMPORT:
8015 nodes_keep = [self.op.src_node]
8016 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
8017 if node != self.op.src_node]
8018 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
8019 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
8021 self.context.glm.release(locking.LEVEL_NODE)
8022 del self.acquired_locks[locking.LEVEL_NODE]
8024 if self.op.wait_for_sync:
8025 disk_abort = not _WaitForSync(self, iobj)
8026 elif iobj.disk_template in constants.DTS_INT_MIRROR:
8027 # make sure the disks are not degraded (still sync-ing is ok)
8029 feedback_fn("* checking mirrors status")
8030 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8035 _RemoveDisks(self, iobj)
8036 self.cfg.RemoveInstance(iobj.name)
8037 # Make sure the instance lock gets removed
8038 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8039 raise errors.OpExecError("There are some degraded disks for"
8042 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8043 if self.op.mode == constants.INSTANCE_CREATE:
8044 if not self.op.no_install:
8045 feedback_fn("* running the instance OS create scripts...")
8046 # FIXME: pass debug option from opcode to backend
8047 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
8048 self.op.debug_level)
8049 result.Raise("Could not add os for instance %s"
8050 " on node %s" % (instance, pnode_name))
8052 elif self.op.mode == constants.INSTANCE_IMPORT:
8053 feedback_fn("* running the instance OS import scripts...")
8057 for idx, image in enumerate(self.src_images):
8061 # FIXME: pass debug option from opcode to backend
8062 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
8063 constants.IEIO_FILE, (image, ),
8064 constants.IEIO_SCRIPT,
8065 (iobj.disks[idx], idx),
8067 transfers.append(dt)
8070 masterd.instance.TransferInstanceData(self, feedback_fn,
8071 self.op.src_node, pnode_name,
8072 self.pnode.secondary_ip,
8074 if not compat.all(import_result):
8075 self.LogWarning("Some disks for instance %s on node %s were not"
8076 " imported successfully" % (instance, pnode_name))
8078 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8079 feedback_fn("* preparing remote import...")
8080 # The source cluster will stop the instance before attempting to make a
8081 # connection. In some cases stopping an instance can take a long time,
8082 # hence the shutdown timeout is added to the connection timeout.
8083 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
8084 self.op.source_shutdown_timeout)
8085 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
8087 assert iobj.primary_node == self.pnode.name
8089 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
8090 self.source_x509_ca,
8091 self._cds, timeouts)
8092 if not compat.all(disk_results):
8093 # TODO: Should the instance still be started, even if some disks
8094 # failed to import (valid for local imports, too)?
8095 self.LogWarning("Some disks for instance %s on node %s were not"
8096 " imported successfully" % (instance, pnode_name))
8098 # Run rename script on newly imported instance
8099 assert iobj.name == instance
8100 feedback_fn("Running rename script for %s" % instance)
8101 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
8102 self.source_instance_name,
8103 self.op.debug_level)
8105 self.LogWarning("Failed to run rename script for %s on node"
8106 " %s: %s" % (instance, pnode_name, result.fail_msg))
8109 # also checked in the prereq part
8110 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
8114 iobj.admin_up = True
8115 self.cfg.Update(iobj, feedback_fn)
8116 logging.info("Starting instance %s on node %s", instance, pnode_name)
8117 feedback_fn("* starting instance...")
8118 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
8119 result.Raise("Could not start instance")
8121 return list(iobj.all_nodes)
8124 class LUInstanceConsole(NoHooksLU):
8125 """Connect to an instance's console.
8127 This is somewhat special in that it returns the command line that
8128 you need to run on the master node in order to connect to the
8134 def ExpandNames(self):
8135 self._ExpandAndLockInstance()
8137 def CheckPrereq(self):
8138 """Check prerequisites.
8140 This checks that the instance is in the cluster.
8143 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8144 assert self.instance is not None, \
8145 "Cannot retrieve locked instance %s" % self.op.instance_name
8146 _CheckNodeOnline(self, self.instance.primary_node)
8148 def Exec(self, feedback_fn):
8149 """Connect to the console of an instance
8152 instance = self.instance
8153 node = instance.primary_node
8155 node_insts = self.rpc.call_instance_list([node],
8156 [instance.hypervisor])[node]
8157 node_insts.Raise("Can't get node information from %s" % node)
8159 if instance.name not in node_insts.payload:
8160 if instance.admin_up:
8161 state = constants.INSTST_ERRORDOWN
8163 state = constants.INSTST_ADMINDOWN
8164 raise errors.OpExecError("Instance %s is not running (state %s)" %
8165 (instance.name, state))
8167 logging.debug("Connecting to console of %s on %s", instance.name, node)
8169 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
8172 def _GetInstanceConsole(cluster, instance):
8173 """Returns console information for an instance.
8175 @type cluster: L{objects.Cluster}
8176 @type instance: L{objects.Instance}
8180 hyper = hypervisor.GetHypervisor(instance.hypervisor)
8181 # beparams and hvparams are passed separately, to avoid editing the
8182 # instance and then saving the defaults in the instance itself.
8183 hvparams = cluster.FillHV(instance)
8184 beparams = cluster.FillBE(instance)
8185 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8187 assert console.instance == instance.name
8188 assert console.Validate()
8190 return console.ToDict()
8193 class LUInstanceReplaceDisks(LogicalUnit):
8194 """Replace the disks of an instance.
8197 HPATH = "mirrors-replace"
8198 HTYPE = constants.HTYPE_INSTANCE
8201 def CheckArguments(self):
8202 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8205 def ExpandNames(self):
8206 self._ExpandAndLockInstance()
8208 if self.op.iallocator is not None:
8209 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8211 elif self.op.remote_node is not None:
8212 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8213 self.op.remote_node = remote_node
8215 # Warning: do not remove the locking of the new secondary here
8216 # unless DRBD8.AddChildren is changed to work in parallel;
8217 # currently it doesn't since parallel invocations of
8218 # FindUnusedMinor will conflict
8219 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
8220 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8223 self.needed_locks[locking.LEVEL_NODE] = []
8224 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8226 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8227 self.op.iallocator, self.op.remote_node,
8228 self.op.disks, False, self.op.early_release)
8230 self.tasklets = [self.replacer]
8232 def DeclareLocks(self, level):
8233 # If we're not already locking all nodes in the set we have to declare the
8234 # instance's primary/secondary nodes.
8235 if (level == locking.LEVEL_NODE and
8236 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
8237 self._LockInstancesNodes()
8239 def BuildHooksEnv(self):
8242 This runs on the master, the primary and all the secondaries.
8245 instance = self.replacer.instance
8247 "MODE": self.op.mode,
8248 "NEW_SECONDARY": self.op.remote_node,
8249 "OLD_SECONDARY": instance.secondary_nodes[0],
8251 env.update(_BuildInstanceHookEnvByObject(self, instance))
8253 self.cfg.GetMasterNode(),
8254 instance.primary_node,
8256 if self.op.remote_node is not None:
8257 nl.append(self.op.remote_node)
8261 class TLReplaceDisks(Tasklet):
8262 """Replaces disks for an instance.
8264 Note: Locking is not within the scope of this class.
8267 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8268 disks, delay_iallocator, early_release):
8269 """Initializes this class.
8272 Tasklet.__init__(self, lu)
8275 self.instance_name = instance_name
8277 self.iallocator_name = iallocator_name
8278 self.remote_node = remote_node
8280 self.delay_iallocator = delay_iallocator
8281 self.early_release = early_release
8284 self.instance = None
8285 self.new_node = None
8286 self.target_node = None
8287 self.other_node = None
8288 self.remote_node_info = None
8289 self.node_secondary_ip = None
8292 def CheckArguments(mode, remote_node, iallocator):
8293 """Helper function for users of this class.
8296 # check for valid parameter combination
8297 if mode == constants.REPLACE_DISK_CHG:
8298 if remote_node is None and iallocator is None:
8299 raise errors.OpPrereqError("When changing the secondary either an"
8300 " iallocator script must be used or the"
8301 " new node given", errors.ECODE_INVAL)
8303 if remote_node is not None and iallocator is not None:
8304 raise errors.OpPrereqError("Give either the iallocator or the new"
8305 " secondary, not both", errors.ECODE_INVAL)
8307 elif remote_node is not None or iallocator is not None:
8308 # Not replacing the secondary
8309 raise errors.OpPrereqError("The iallocator and new node options can"
8310 " only be used when changing the"
8311 " secondary node", errors.ECODE_INVAL)
8314 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8315 """Compute a new secondary node using an IAllocator.
8318 ial = IAllocator(lu.cfg, lu.rpc,
8319 mode=constants.IALLOCATOR_MODE_RELOC,
8321 relocate_from=relocate_from)
8323 ial.Run(iallocator_name)
8326 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8327 " %s" % (iallocator_name, ial.info),
8330 if len(ial.result) != ial.required_nodes:
8331 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8332 " of nodes (%s), required %s" %
8334 len(ial.result), ial.required_nodes),
8337 remote_node_name = ial.result[0]
8339 lu.LogInfo("Selected new secondary for instance '%s': %s",
8340 instance_name, remote_node_name)
8342 return remote_node_name
8344 def _FindFaultyDisks(self, node_name):
8345 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8348 def CheckPrereq(self):
8349 """Check prerequisites.
8351 This checks that the instance is in the cluster.
8354 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8355 assert instance is not None, \
8356 "Cannot retrieve locked instance %s" % self.instance_name
8358 if instance.disk_template != constants.DT_DRBD8:
8359 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8360 " instances", errors.ECODE_INVAL)
8362 if len(instance.secondary_nodes) != 1:
8363 raise errors.OpPrereqError("The instance has a strange layout,"
8364 " expected one secondary but found %d" %
8365 len(instance.secondary_nodes),
8368 if not self.delay_iallocator:
8369 self._CheckPrereq2()
8371 def _CheckPrereq2(self):
8372 """Check prerequisites, second part.
8374 This function should always be part of CheckPrereq. It was separated and is
8375 now called from Exec because during node evacuation iallocator was only
8376 called with an unmodified cluster model, not taking planned changes into
8380 instance = self.instance
8381 secondary_node = instance.secondary_nodes[0]
8383 if self.iallocator_name is None:
8384 remote_node = self.remote_node
8386 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8387 instance.name, instance.secondary_nodes)
8389 if remote_node is not None:
8390 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8391 assert self.remote_node_info is not None, \
8392 "Cannot retrieve locked node %s" % remote_node
8394 self.remote_node_info = None
8396 if remote_node == self.instance.primary_node:
8397 raise errors.OpPrereqError("The specified node is the primary node of"
8398 " the instance.", errors.ECODE_INVAL)
8400 if remote_node == secondary_node:
8401 raise errors.OpPrereqError("The specified node is already the"
8402 " secondary node of the instance.",
8405 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8406 constants.REPLACE_DISK_CHG):
8407 raise errors.OpPrereqError("Cannot specify disks to be replaced",
8410 if self.mode == constants.REPLACE_DISK_AUTO:
8411 faulty_primary = self._FindFaultyDisks(instance.primary_node)
8412 faulty_secondary = self._FindFaultyDisks(secondary_node)
8414 if faulty_primary and faulty_secondary:
8415 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8416 " one node and can not be repaired"
8417 " automatically" % self.instance_name,
8421 self.disks = faulty_primary
8422 self.target_node = instance.primary_node
8423 self.other_node = secondary_node
8424 check_nodes = [self.target_node, self.other_node]
8425 elif faulty_secondary:
8426 self.disks = faulty_secondary
8427 self.target_node = secondary_node
8428 self.other_node = instance.primary_node
8429 check_nodes = [self.target_node, self.other_node]
8435 # Non-automatic modes
8436 if self.mode == constants.REPLACE_DISK_PRI:
8437 self.target_node = instance.primary_node
8438 self.other_node = secondary_node
8439 check_nodes = [self.target_node, self.other_node]
8441 elif self.mode == constants.REPLACE_DISK_SEC:
8442 self.target_node = secondary_node
8443 self.other_node = instance.primary_node
8444 check_nodes = [self.target_node, self.other_node]
8446 elif self.mode == constants.REPLACE_DISK_CHG:
8447 self.new_node = remote_node
8448 self.other_node = instance.primary_node
8449 self.target_node = secondary_node
8450 check_nodes = [self.new_node, self.other_node]
8452 _CheckNodeNotDrained(self.lu, remote_node)
8453 _CheckNodeVmCapable(self.lu, remote_node)
8455 old_node_info = self.cfg.GetNodeInfo(secondary_node)
8456 assert old_node_info is not None
8457 if old_node_info.offline and not self.early_release:
8458 # doesn't make sense to delay the release
8459 self.early_release = True
8460 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8461 " early-release mode", secondary_node)
8464 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8467 # If not specified all disks should be replaced
8469 self.disks = range(len(self.instance.disks))
8471 for node in check_nodes:
8472 _CheckNodeOnline(self.lu, node)
8474 # Check whether disks are valid
8475 for disk_idx in self.disks:
8476 instance.FindDisk(disk_idx)
8478 # Get secondary node IP addresses
8481 for node_name in [self.target_node, self.other_node, self.new_node]:
8482 if node_name is not None:
8483 node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8485 self.node_secondary_ip = node_2nd_ip
8487 def Exec(self, feedback_fn):
8488 """Execute disk replacement.
8490 This dispatches the disk replacement to the appropriate handler.
8493 if self.delay_iallocator:
8494 self._CheckPrereq2()
8497 feedback_fn("No disks need replacement")
8500 feedback_fn("Replacing disk(s) %s for %s" %
8501 (utils.CommaJoin(self.disks), self.instance.name))
8503 activate_disks = (not self.instance.admin_up)
8505 # Activate the instance disks if we're replacing them on a down instance
8507 _StartInstanceDisks(self.lu, self.instance, True)
8510 # Should we replace the secondary node?
8511 if self.new_node is not None:
8512 fn = self._ExecDrbd8Secondary
8514 fn = self._ExecDrbd8DiskOnly
8516 return fn(feedback_fn)
8519 # Deactivate the instance disks if we're replacing them on a
8522 _SafeShutdownInstanceDisks(self.lu, self.instance)
8524 def _CheckVolumeGroup(self, nodes):
8525 self.lu.LogInfo("Checking volume groups")
8527 vgname = self.cfg.GetVGName()
8529 # Make sure volume group exists on all involved nodes
8530 results = self.rpc.call_vg_list(nodes)
8532 raise errors.OpExecError("Can't list volume groups on the nodes")
8536 res.Raise("Error checking node %s" % node)
8537 if vgname not in res.payload:
8538 raise errors.OpExecError("Volume group '%s' not found on node %s" %
8541 def _CheckDisksExistence(self, nodes):
8542 # Check disk existence
8543 for idx, dev in enumerate(self.instance.disks):
8544 if idx not in self.disks:
8548 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8549 self.cfg.SetDiskID(dev, node)
8551 result = self.rpc.call_blockdev_find(node, dev)
8553 msg = result.fail_msg
8554 if msg or not result.payload:
8556 msg = "disk not found"
8557 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8560 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8561 for idx, dev in enumerate(self.instance.disks):
8562 if idx not in self.disks:
8565 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8568 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8570 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8571 " replace disks for instance %s" %
8572 (node_name, self.instance.name))
8574 def _CreateNewStorage(self, node_name):
8575 vgname = self.cfg.GetVGName()
8578 for idx, dev in enumerate(self.instance.disks):
8579 if idx not in self.disks:
8582 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8584 self.cfg.SetDiskID(dev, node_name)
8586 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8587 names = _GenerateUniqueNames(self.lu, lv_names)
8589 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8590 logical_id=(vgname, names[0]))
8591 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8592 logical_id=(vgname, names[1]))
8594 new_lvs = [lv_data, lv_meta]
8595 old_lvs = dev.children
8596 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8598 # we pass force_create=True to force the LVM creation
8599 for new_lv in new_lvs:
8600 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8601 _GetInstanceInfoText(self.instance), False)
8605 def _CheckDevices(self, node_name, iv_names):
8606 for name, (dev, _, _) in iv_names.iteritems():
8607 self.cfg.SetDiskID(dev, node_name)
8609 result = self.rpc.call_blockdev_find(node_name, dev)
8611 msg = result.fail_msg
8612 if msg or not result.payload:
8614 msg = "disk not found"
8615 raise errors.OpExecError("Can't find DRBD device %s: %s" %
8618 if result.payload.is_degraded:
8619 raise errors.OpExecError("DRBD device %s is degraded!" % name)
8621 def _RemoveOldStorage(self, node_name, iv_names):
8622 for name, (_, old_lvs, _) in iv_names.iteritems():
8623 self.lu.LogInfo("Remove logical volumes for %s" % name)
8626 self.cfg.SetDiskID(lv, node_name)
8628 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8630 self.lu.LogWarning("Can't remove old LV: %s" % msg,
8631 hint="remove unused LVs manually")
8633 def _ReleaseNodeLock(self, node_name):
8634 """Releases the lock for a given node."""
8635 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8637 def _ExecDrbd8DiskOnly(self, feedback_fn):
8638 """Replace a disk on the primary or secondary for DRBD 8.
8640 The algorithm for replace is quite complicated:
8642 1. for each disk to be replaced:
8644 1. create new LVs on the target node with unique names
8645 1. detach old LVs from the drbd device
8646 1. rename old LVs to name_replaced.<time_t>
8647 1. rename new LVs to old LVs
8648 1. attach the new LVs (with the old names now) to the drbd device
8650 1. wait for sync across all devices
8652 1. for each modified disk:
8654 1. remove old LVs (which have the name name_replaces.<time_t>)
8656 Failures are not very well handled.
8661 # Step: check device activation
8662 self.lu.LogStep(1, steps_total, "Check device existence")
8663 self._CheckDisksExistence([self.other_node, self.target_node])
8664 self._CheckVolumeGroup([self.target_node, self.other_node])
8666 # Step: check other node consistency
8667 self.lu.LogStep(2, steps_total, "Check peer consistency")
8668 self._CheckDisksConsistency(self.other_node,
8669 self.other_node == self.instance.primary_node,
8672 # Step: create new storage
8673 self.lu.LogStep(3, steps_total, "Allocate new storage")
8674 iv_names = self._CreateNewStorage(self.target_node)
8676 # Step: for each lv, detach+rename*2+attach
8677 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8678 for dev, old_lvs, new_lvs in iv_names.itervalues():
8679 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8681 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8683 result.Raise("Can't detach drbd from local storage on node"
8684 " %s for device %s" % (self.target_node, dev.iv_name))
8686 #cfg.Update(instance)
8688 # ok, we created the new LVs, so now we know we have the needed
8689 # storage; as such, we proceed on the target node to rename
8690 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8691 # using the assumption that logical_id == physical_id (which in
8692 # turn is the unique_id on that node)
8694 # FIXME(iustin): use a better name for the replaced LVs
8695 temp_suffix = int(time.time())
8696 ren_fn = lambda d, suff: (d.physical_id[0],
8697 d.physical_id[1] + "_replaced-%s" % suff)
8699 # Build the rename list based on what LVs exist on the node
8700 rename_old_to_new = []
8701 for to_ren in old_lvs:
8702 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8703 if not result.fail_msg and result.payload:
8705 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8707 self.lu.LogInfo("Renaming the old LVs on the target node")
8708 result = self.rpc.call_blockdev_rename(self.target_node,
8710 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8712 # Now we rename the new LVs to the old LVs
8713 self.lu.LogInfo("Renaming the new LVs on the target node")
8714 rename_new_to_old = [(new, old.physical_id)
8715 for old, new in zip(old_lvs, new_lvs)]
8716 result = self.rpc.call_blockdev_rename(self.target_node,
8718 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8720 for old, new in zip(old_lvs, new_lvs):
8721 new.logical_id = old.logical_id
8722 self.cfg.SetDiskID(new, self.target_node)
8724 for disk in old_lvs:
8725 disk.logical_id = ren_fn(disk, temp_suffix)
8726 self.cfg.SetDiskID(disk, self.target_node)
8728 # Now that the new lvs have the old name, we can add them to the device
8729 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8730 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8732 msg = result.fail_msg
8734 for new_lv in new_lvs:
8735 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8738 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8739 hint=("cleanup manually the unused logical"
8741 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8743 dev.children = new_lvs
8745 self.cfg.Update(self.instance, feedback_fn)
8748 if self.early_release:
8749 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8751 self._RemoveOldStorage(self.target_node, iv_names)
8752 # WARNING: we release both node locks here, do not do other RPCs
8753 # than WaitForSync to the primary node
8754 self._ReleaseNodeLock([self.target_node, self.other_node])
8757 # This can fail as the old devices are degraded and _WaitForSync
8758 # does a combined result over all disks, so we don't check its return value
8759 self.lu.LogStep(cstep, steps_total, "Sync devices")
8761 _WaitForSync(self.lu, self.instance)
8763 # Check all devices manually
8764 self._CheckDevices(self.instance.primary_node, iv_names)
8766 # Step: remove old storage
8767 if not self.early_release:
8768 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8770 self._RemoveOldStorage(self.target_node, iv_names)
8772 def _ExecDrbd8Secondary(self, feedback_fn):
8773 """Replace the secondary node for DRBD 8.
8775 The algorithm for replace is quite complicated:
8776 - for all disks of the instance:
8777 - create new LVs on the new node with same names
8778 - shutdown the drbd device on the old secondary
8779 - disconnect the drbd network on the primary
8780 - create the drbd device on the new secondary
8781 - network attach the drbd on the primary, using an artifice:
8782 the drbd code for Attach() will connect to the network if it
8783 finds a device which is connected to the good local disks but
8785 - wait for sync across all devices
8786 - remove all disks from the old secondary
8788 Failures are not very well handled.
8793 # Step: check device activation
8794 self.lu.LogStep(1, steps_total, "Check device existence")
8795 self._CheckDisksExistence([self.instance.primary_node])
8796 self._CheckVolumeGroup([self.instance.primary_node])
8798 # Step: check other node consistency
8799 self.lu.LogStep(2, steps_total, "Check peer consistency")
8800 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8802 # Step: create new storage
8803 self.lu.LogStep(3, steps_total, "Allocate new storage")
8804 for idx, dev in enumerate(self.instance.disks):
8805 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8806 (self.new_node, idx))
8807 # we pass force_create=True to force LVM creation
8808 for new_lv in dev.children:
8809 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8810 _GetInstanceInfoText(self.instance), False)
8812 # Step 4: dbrd minors and drbd setups changes
8813 # after this, we must manually remove the drbd minors on both the
8814 # error and the success paths
8815 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8816 minors = self.cfg.AllocateDRBDMinor([self.new_node
8817 for dev in self.instance.disks],
8819 logging.debug("Allocated minors %r", minors)
8822 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8823 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8824 (self.new_node, idx))
8825 # create new devices on new_node; note that we create two IDs:
8826 # one without port, so the drbd will be activated without
8827 # networking information on the new node at this stage, and one
8828 # with network, for the latter activation in step 4
8829 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8830 if self.instance.primary_node == o_node1:
8833 assert self.instance.primary_node == o_node2, "Three-node instance?"
8836 new_alone_id = (self.instance.primary_node, self.new_node, None,
8837 p_minor, new_minor, o_secret)
8838 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8839 p_minor, new_minor, o_secret)
8841 iv_names[idx] = (dev, dev.children, new_net_id)
8842 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8844 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8845 logical_id=new_alone_id,
8846 children=dev.children,
8849 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8850 _GetInstanceInfoText(self.instance), False)
8851 except errors.GenericError:
8852 self.cfg.ReleaseDRBDMinors(self.instance.name)
8855 # We have new devices, shutdown the drbd on the old secondary
8856 for idx, dev in enumerate(self.instance.disks):
8857 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8858 self.cfg.SetDiskID(dev, self.target_node)
8859 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8861 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8862 "node: %s" % (idx, msg),
8863 hint=("Please cleanup this device manually as"
8864 " soon as possible"))
8866 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8867 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8868 self.node_secondary_ip,
8869 self.instance.disks)\
8870 [self.instance.primary_node]
8872 msg = result.fail_msg
8874 # detaches didn't succeed (unlikely)
8875 self.cfg.ReleaseDRBDMinors(self.instance.name)
8876 raise errors.OpExecError("Can't detach the disks from the network on"
8877 " old node: %s" % (msg,))
8879 # if we managed to detach at least one, we update all the disks of
8880 # the instance to point to the new secondary
8881 self.lu.LogInfo("Updating instance configuration")
8882 for dev, _, new_logical_id in iv_names.itervalues():
8883 dev.logical_id = new_logical_id
8884 self.cfg.SetDiskID(dev, self.instance.primary_node)
8886 self.cfg.Update(self.instance, feedback_fn)
8888 # and now perform the drbd attach
8889 self.lu.LogInfo("Attaching primary drbds to new secondary"
8890 " (standalone => connected)")
8891 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8893 self.node_secondary_ip,
8894 self.instance.disks,
8897 for to_node, to_result in result.items():
8898 msg = to_result.fail_msg
8900 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8902 hint=("please do a gnt-instance info to see the"
8903 " status of disks"))
8905 if self.early_release:
8906 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8908 self._RemoveOldStorage(self.target_node, iv_names)
8909 # WARNING: we release all node locks here, do not do other RPCs
8910 # than WaitForSync to the primary node
8911 self._ReleaseNodeLock([self.instance.primary_node,
8916 # This can fail as the old devices are degraded and _WaitForSync
8917 # does a combined result over all disks, so we don't check its return value
8918 self.lu.LogStep(cstep, steps_total, "Sync devices")
8920 _WaitForSync(self.lu, self.instance)
8922 # Check all devices manually
8923 self._CheckDevices(self.instance.primary_node, iv_names)
8925 # Step: remove old storage
8926 if not self.early_release:
8927 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8928 self._RemoveOldStorage(self.target_node, iv_names)
8931 class LURepairNodeStorage(NoHooksLU):
8932 """Repairs the volume group on a node.
8937 def CheckArguments(self):
8938 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8940 storage_type = self.op.storage_type
8942 if (constants.SO_FIX_CONSISTENCY not in
8943 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8944 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8945 " repaired" % storage_type,
8948 def ExpandNames(self):
8949 self.needed_locks = {
8950 locking.LEVEL_NODE: [self.op.node_name],
8953 def _CheckFaultyDisks(self, instance, node_name):
8954 """Ensure faulty disks abort the opcode or at least warn."""
8956 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8958 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8959 " node '%s'" % (instance.name, node_name),
8961 except errors.OpPrereqError, err:
8962 if self.op.ignore_consistency:
8963 self.proc.LogWarning(str(err.args[0]))
8967 def CheckPrereq(self):
8968 """Check prerequisites.
8971 # Check whether any instance on this node has faulty disks
8972 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8973 if not inst.admin_up:
8975 check_nodes = set(inst.all_nodes)
8976 check_nodes.discard(self.op.node_name)
8977 for inst_node_name in check_nodes:
8978 self._CheckFaultyDisks(inst, inst_node_name)
8980 def Exec(self, feedback_fn):
8981 feedback_fn("Repairing storage unit '%s' on %s ..." %
8982 (self.op.name, self.op.node_name))
8984 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8985 result = self.rpc.call_storage_execute(self.op.node_name,
8986 self.op.storage_type, st_args,
8988 constants.SO_FIX_CONSISTENCY)
8989 result.Raise("Failed to repair storage unit '%s' on %s" %
8990 (self.op.name, self.op.node_name))
8993 class LUNodeEvacStrategy(NoHooksLU):
8994 """Computes the node evacuation strategy.
8999 def CheckArguments(self):
9000 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
9002 def ExpandNames(self):
9003 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
9004 self.needed_locks = locks = {}
9005 if self.op.remote_node is None:
9006 locks[locking.LEVEL_NODE] = locking.ALL_SET
9008 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9009 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
9011 def Exec(self, feedback_fn):
9012 if self.op.remote_node is not None:
9014 for node in self.op.nodes:
9015 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
9018 if i.primary_node == self.op.remote_node:
9019 raise errors.OpPrereqError("Node %s is the primary node of"
9020 " instance %s, cannot use it as"
9022 (self.op.remote_node, i.name),
9024 result.append([i.name, self.op.remote_node])
9026 ial = IAllocator(self.cfg, self.rpc,
9027 mode=constants.IALLOCATOR_MODE_MEVAC,
9028 evac_nodes=self.op.nodes)
9029 ial.Run(self.op.iallocator, validate=True)
9031 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
9037 class LUInstanceGrowDisk(LogicalUnit):
9038 """Grow a disk of an instance.
9042 HTYPE = constants.HTYPE_INSTANCE
9045 def ExpandNames(self):
9046 self._ExpandAndLockInstance()
9047 self.needed_locks[locking.LEVEL_NODE] = []
9048 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9050 def DeclareLocks(self, level):
9051 if level == locking.LEVEL_NODE:
9052 self._LockInstancesNodes()
9054 def BuildHooksEnv(self):
9057 This runs on the master, the primary and all the secondaries.
9061 "DISK": self.op.disk,
9062 "AMOUNT": self.op.amount,
9064 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9065 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9068 def CheckPrereq(self):
9069 """Check prerequisites.
9071 This checks that the instance is in the cluster.
9074 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9075 assert instance is not None, \
9076 "Cannot retrieve locked instance %s" % self.op.instance_name
9077 nodenames = list(instance.all_nodes)
9078 for node in nodenames:
9079 _CheckNodeOnline(self, node)
9081 self.instance = instance
9083 if instance.disk_template not in constants.DTS_GROWABLE:
9084 raise errors.OpPrereqError("Instance's disk layout does not support"
9085 " growing.", errors.ECODE_INVAL)
9087 self.disk = instance.FindDisk(self.op.disk)
9089 if instance.disk_template not in (constants.DT_FILE,
9090 constants.DT_SHARED_FILE):
9091 # TODO: check the free disk space for file, when that feature will be
9093 _CheckNodesFreeDiskPerVG(self, nodenames,
9094 self.disk.ComputeGrowth(self.op.amount))
9096 def Exec(self, feedback_fn):
9097 """Execute disk grow.
9100 instance = self.instance
9103 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
9105 raise errors.OpExecError("Cannot activate block device to grow")
9107 for node in instance.all_nodes:
9108 self.cfg.SetDiskID(disk, node)
9109 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
9110 result.Raise("Grow request failed to node %s" % node)
9112 # TODO: Rewrite code to work properly
9113 # DRBD goes into sync mode for a short amount of time after executing the
9114 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
9115 # calling "resize" in sync mode fails. Sleeping for a short amount of
9116 # time is a work-around.
9119 disk.RecordGrow(self.op.amount)
9120 self.cfg.Update(instance, feedback_fn)
9121 if self.op.wait_for_sync:
9122 disk_abort = not _WaitForSync(self, instance, disks=[disk])
9124 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
9125 " status.\nPlease check the instance.")
9126 if not instance.admin_up:
9127 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
9128 elif not instance.admin_up:
9129 self.proc.LogWarning("Not shutting down the disk even if the instance is"
9130 " not supposed to be running because no wait for"
9131 " sync mode was requested.")
9134 class LUInstanceQueryData(NoHooksLU):
9135 """Query runtime instance data.
9140 def ExpandNames(self):
9141 self.needed_locks = {}
9142 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9144 if self.op.instances:
9145 self.wanted_names = []
9146 for name in self.op.instances:
9147 full_name = _ExpandInstanceName(self.cfg, name)
9148 self.wanted_names.append(full_name)
9149 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9151 self.wanted_names = None
9152 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9154 self.needed_locks[locking.LEVEL_NODE] = []
9155 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9157 def DeclareLocks(self, level):
9158 if level == locking.LEVEL_NODE:
9159 self._LockInstancesNodes()
9161 def CheckPrereq(self):
9162 """Check prerequisites.
9164 This only checks the optional instance list against the existing names.
9167 if self.wanted_names is None:
9168 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
9170 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
9171 in self.wanted_names]
9173 def _ComputeBlockdevStatus(self, node, instance_name, dev):
9174 """Returns the status of a block device
9177 if self.op.static or not node:
9180 self.cfg.SetDiskID(dev, node)
9182 result = self.rpc.call_blockdev_find(node, dev)
9186 result.Raise("Can't compute disk status for %s" % instance_name)
9188 status = result.payload
9192 return (status.dev_path, status.major, status.minor,
9193 status.sync_percent, status.estimated_time,
9194 status.is_degraded, status.ldisk_status)
9196 def _ComputeDiskStatus(self, instance, snode, dev):
9197 """Compute block device status.
9200 if dev.dev_type in constants.LDS_DRBD:
9201 # we change the snode then (otherwise we use the one passed in)
9202 if dev.logical_id[0] == instance.primary_node:
9203 snode = dev.logical_id[1]
9205 snode = dev.logical_id[0]
9207 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9209 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9212 dev_children = [self._ComputeDiskStatus(instance, snode, child)
9213 for child in dev.children]
9218 "iv_name": dev.iv_name,
9219 "dev_type": dev.dev_type,
9220 "logical_id": dev.logical_id,
9221 "physical_id": dev.physical_id,
9222 "pstatus": dev_pstatus,
9223 "sstatus": dev_sstatus,
9224 "children": dev_children,
9231 def Exec(self, feedback_fn):
9232 """Gather and return data"""
9235 cluster = self.cfg.GetClusterInfo()
9237 for instance in self.wanted_instances:
9238 if not self.op.static:
9239 remote_info = self.rpc.call_instance_info(instance.primary_node,
9241 instance.hypervisor)
9242 remote_info.Raise("Error checking node %s" % instance.primary_node)
9243 remote_info = remote_info.payload
9244 if remote_info and "state" in remote_info:
9247 remote_state = "down"
9250 if instance.admin_up:
9253 config_state = "down"
9255 disks = [self._ComputeDiskStatus(instance, None, device)
9256 for device in instance.disks]
9259 "name": instance.name,
9260 "config_state": config_state,
9261 "run_state": remote_state,
9262 "pnode": instance.primary_node,
9263 "snodes": instance.secondary_nodes,
9265 # this happens to be the same format used for hooks
9266 "nics": _NICListToTuple(self, instance.nics),
9267 "disk_template": instance.disk_template,
9269 "hypervisor": instance.hypervisor,
9270 "network_port": instance.network_port,
9271 "hv_instance": instance.hvparams,
9272 "hv_actual": cluster.FillHV(instance, skip_globals=True),
9273 "be_instance": instance.beparams,
9274 "be_actual": cluster.FillBE(instance),
9275 "os_instance": instance.osparams,
9276 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9277 "serial_no": instance.serial_no,
9278 "mtime": instance.mtime,
9279 "ctime": instance.ctime,
9280 "uuid": instance.uuid,
9283 result[instance.name] = idict
9288 class LUInstanceSetParams(LogicalUnit):
9289 """Modifies an instances's parameters.
9292 HPATH = "instance-modify"
9293 HTYPE = constants.HTYPE_INSTANCE
9296 def CheckArguments(self):
9297 if not (self.op.nics or self.op.disks or self.op.disk_template or
9298 self.op.hvparams or self.op.beparams or self.op.os_name):
9299 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9301 if self.op.hvparams:
9302 _CheckGlobalHvParams(self.op.hvparams)
9306 for disk_op, disk_dict in self.op.disks:
9307 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9308 if disk_op == constants.DDM_REMOVE:
9311 elif disk_op == constants.DDM_ADD:
9314 if not isinstance(disk_op, int):
9315 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9316 if not isinstance(disk_dict, dict):
9317 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9318 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9320 if disk_op == constants.DDM_ADD:
9321 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9322 if mode not in constants.DISK_ACCESS_SET:
9323 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9325 size = disk_dict.get('size', None)
9327 raise errors.OpPrereqError("Required disk parameter size missing",
9331 except (TypeError, ValueError), err:
9332 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9333 str(err), errors.ECODE_INVAL)
9334 disk_dict['size'] = size
9336 # modification of disk
9337 if 'size' in disk_dict:
9338 raise errors.OpPrereqError("Disk size change not possible, use"
9339 " grow-disk", errors.ECODE_INVAL)
9341 if disk_addremove > 1:
9342 raise errors.OpPrereqError("Only one disk add or remove operation"
9343 " supported at a time", errors.ECODE_INVAL)
9345 if self.op.disks and self.op.disk_template is not None:
9346 raise errors.OpPrereqError("Disk template conversion and other disk"
9347 " changes not supported at the same time",
9350 if (self.op.disk_template and
9351 self.op.disk_template in constants.DTS_INT_MIRROR and
9352 self.op.remote_node is None):
9353 raise errors.OpPrereqError("Changing the disk template to a mirrored"
9354 " one requires specifying a secondary node",
9359 for nic_op, nic_dict in self.op.nics:
9360 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9361 if nic_op == constants.DDM_REMOVE:
9364 elif nic_op == constants.DDM_ADD:
9367 if not isinstance(nic_op, int):
9368 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9369 if not isinstance(nic_dict, dict):
9370 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9371 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9373 # nic_dict should be a dict
9374 nic_ip = nic_dict.get('ip', None)
9375 if nic_ip is not None:
9376 if nic_ip.lower() == constants.VALUE_NONE:
9377 nic_dict['ip'] = None
9379 if not netutils.IPAddress.IsValid(nic_ip):
9380 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9383 nic_bridge = nic_dict.get('bridge', None)
9384 nic_link = nic_dict.get('link', None)
9385 if nic_bridge and nic_link:
9386 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9387 " at the same time", errors.ECODE_INVAL)
9388 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9389 nic_dict['bridge'] = None
9390 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9391 nic_dict['link'] = None
9393 if nic_op == constants.DDM_ADD:
9394 nic_mac = nic_dict.get('mac', None)
9396 nic_dict['mac'] = constants.VALUE_AUTO
9398 if 'mac' in nic_dict:
9399 nic_mac = nic_dict['mac']
9400 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9401 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9403 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9404 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9405 " modifying an existing nic",
9408 if nic_addremove > 1:
9409 raise errors.OpPrereqError("Only one NIC add or remove operation"
9410 " supported at a time", errors.ECODE_INVAL)
9412 def ExpandNames(self):
9413 self._ExpandAndLockInstance()
9414 self.needed_locks[locking.LEVEL_NODE] = []
9415 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9417 def DeclareLocks(self, level):
9418 if level == locking.LEVEL_NODE:
9419 self._LockInstancesNodes()
9420 if self.op.disk_template and self.op.remote_node:
9421 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9422 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9424 def BuildHooksEnv(self):
9427 This runs on the master, primary and secondaries.
9431 if constants.BE_MEMORY in self.be_new:
9432 args['memory'] = self.be_new[constants.BE_MEMORY]
9433 if constants.BE_VCPUS in self.be_new:
9434 args['vcpus'] = self.be_new[constants.BE_VCPUS]
9435 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9436 # information at all.
9439 nic_override = dict(self.op.nics)
9440 for idx, nic in enumerate(self.instance.nics):
9441 if idx in nic_override:
9442 this_nic_override = nic_override[idx]
9444 this_nic_override = {}
9445 if 'ip' in this_nic_override:
9446 ip = this_nic_override['ip']
9449 if 'mac' in this_nic_override:
9450 mac = this_nic_override['mac']
9453 if idx in self.nic_pnew:
9454 nicparams = self.nic_pnew[idx]
9456 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9457 mode = nicparams[constants.NIC_MODE]
9458 link = nicparams[constants.NIC_LINK]
9459 args['nics'].append((ip, mac, mode, link))
9460 if constants.DDM_ADD in nic_override:
9461 ip = nic_override[constants.DDM_ADD].get('ip', None)
9462 mac = nic_override[constants.DDM_ADD]['mac']
9463 nicparams = self.nic_pnew[constants.DDM_ADD]
9464 mode = nicparams[constants.NIC_MODE]
9465 link = nicparams[constants.NIC_LINK]
9466 args['nics'].append((ip, mac, mode, link))
9467 elif constants.DDM_REMOVE in nic_override:
9468 del args['nics'][-1]
9470 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9471 if self.op.disk_template:
9472 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9473 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9476 def CheckPrereq(self):
9477 """Check prerequisites.
9479 This only checks the instance list against the existing names.
9482 # checking the new params on the primary/secondary nodes
9484 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9485 cluster = self.cluster = self.cfg.GetClusterInfo()
9486 assert self.instance is not None, \
9487 "Cannot retrieve locked instance %s" % self.op.instance_name
9488 pnode = instance.primary_node
9489 nodelist = list(instance.all_nodes)
9492 if self.op.os_name and not self.op.force:
9493 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9494 self.op.force_variant)
9495 instance_os = self.op.os_name
9497 instance_os = instance.os
9499 if self.op.disk_template:
9500 if instance.disk_template == self.op.disk_template:
9501 raise errors.OpPrereqError("Instance already has disk template %s" %
9502 instance.disk_template, errors.ECODE_INVAL)
9504 if (instance.disk_template,
9505 self.op.disk_template) not in self._DISK_CONVERSIONS:
9506 raise errors.OpPrereqError("Unsupported disk template conversion from"
9507 " %s to %s" % (instance.disk_template,
9508 self.op.disk_template),
9510 _CheckInstanceDown(self, instance, "cannot change disk template")
9511 if self.op.disk_template in constants.DTS_INT_MIRROR:
9512 if self.op.remote_node == pnode:
9513 raise errors.OpPrereqError("Given new secondary node %s is the same"
9514 " as the primary node of the instance" %
9515 self.op.remote_node, errors.ECODE_STATE)
9516 _CheckNodeOnline(self, self.op.remote_node)
9517 _CheckNodeNotDrained(self, self.op.remote_node)
9518 # FIXME: here we assume that the old instance type is DT_PLAIN
9519 assert instance.disk_template == constants.DT_PLAIN
9520 disks = [{"size": d.size, "vg": d.logical_id[0]}
9521 for d in instance.disks]
9522 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9523 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9525 # hvparams processing
9526 if self.op.hvparams:
9527 hv_type = instance.hypervisor
9528 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9529 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9530 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9533 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9534 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9535 self.hv_new = hv_new # the new actual values
9536 self.hv_inst = i_hvdict # the new dict (without defaults)
9538 self.hv_new = self.hv_inst = {}
9540 # beparams processing
9541 if self.op.beparams:
9542 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9544 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9545 be_new = cluster.SimpleFillBE(i_bedict)
9546 self.be_new = be_new # the new actual values
9547 self.be_inst = i_bedict # the new dict (without defaults)
9549 self.be_new = self.be_inst = {}
9551 # osparams processing
9552 if self.op.osparams:
9553 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9554 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9555 self.os_inst = i_osdict # the new dict (without defaults)
9561 if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9562 mem_check_list = [pnode]
9563 if be_new[constants.BE_AUTO_BALANCE]:
9564 # either we changed auto_balance to yes or it was from before
9565 mem_check_list.extend(instance.secondary_nodes)
9566 instance_info = self.rpc.call_instance_info(pnode, instance.name,
9567 instance.hypervisor)
9568 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9569 instance.hypervisor)
9570 pninfo = nodeinfo[pnode]
9571 msg = pninfo.fail_msg
9573 # Assume the primary node is unreachable and go ahead
9574 self.warn.append("Can't get info from primary node %s: %s" %
9576 elif not isinstance(pninfo.payload.get('memory_free', None), int):
9577 self.warn.append("Node data from primary node %s doesn't contain"
9578 " free memory information" % pnode)
9579 elif instance_info.fail_msg:
9580 self.warn.append("Can't get instance runtime information: %s" %
9581 instance_info.fail_msg)
9583 if instance_info.payload:
9584 current_mem = int(instance_info.payload['memory'])
9586 # Assume instance not running
9587 # (there is a slight race condition here, but it's not very probable,
9588 # and we have no other way to check)
9590 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9591 pninfo.payload['memory_free'])
9593 raise errors.OpPrereqError("This change will prevent the instance"
9594 " from starting, due to %d MB of memory"
9595 " missing on its primary node" % miss_mem,
9598 if be_new[constants.BE_AUTO_BALANCE]:
9599 for node, nres in nodeinfo.items():
9600 if node not in instance.secondary_nodes:
9604 self.warn.append("Can't get info from secondary node %s: %s" %
9606 elif not isinstance(nres.payload.get('memory_free', None), int):
9607 self.warn.append("Secondary node %s didn't return free"
9608 " memory information" % node)
9609 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9610 self.warn.append("Not enough memory to failover instance to"
9611 " secondary node %s" % node)
9616 for nic_op, nic_dict in self.op.nics:
9617 if nic_op == constants.DDM_REMOVE:
9618 if not instance.nics:
9619 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9622 if nic_op != constants.DDM_ADD:
9624 if not instance.nics:
9625 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9626 " no NICs" % nic_op,
9628 if nic_op < 0 or nic_op >= len(instance.nics):
9629 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9631 (nic_op, len(instance.nics) - 1),
9633 old_nic_params = instance.nics[nic_op].nicparams
9634 old_nic_ip = instance.nics[nic_op].ip
9639 update_params_dict = dict([(key, nic_dict[key])
9640 for key in constants.NICS_PARAMETERS
9641 if key in nic_dict])
9643 if 'bridge' in nic_dict:
9644 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9646 new_nic_params = _GetUpdatedParams(old_nic_params,
9648 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9649 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9650 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9651 self.nic_pinst[nic_op] = new_nic_params
9652 self.nic_pnew[nic_op] = new_filled_nic_params
9653 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9655 if new_nic_mode == constants.NIC_MODE_BRIDGED:
9656 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9657 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9659 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9661 self.warn.append(msg)
9663 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9664 if new_nic_mode == constants.NIC_MODE_ROUTED:
9665 if 'ip' in nic_dict:
9666 nic_ip = nic_dict['ip']
9670 raise errors.OpPrereqError('Cannot set the nic ip to None'
9671 ' on a routed nic', errors.ECODE_INVAL)
9672 if 'mac' in nic_dict:
9673 nic_mac = nic_dict['mac']
9675 raise errors.OpPrereqError('Cannot set the nic mac to None',
9677 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9678 # otherwise generate the mac
9679 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9681 # or validate/reserve the current one
9683 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9684 except errors.ReservationError:
9685 raise errors.OpPrereqError("MAC address %s already in use"
9686 " in cluster" % nic_mac,
9687 errors.ECODE_NOTUNIQUE)
9690 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9691 raise errors.OpPrereqError("Disk operations not supported for"
9692 " diskless instances",
9694 for disk_op, _ in self.op.disks:
9695 if disk_op == constants.DDM_REMOVE:
9696 if len(instance.disks) == 1:
9697 raise errors.OpPrereqError("Cannot remove the last disk of"
9698 " an instance", errors.ECODE_INVAL)
9699 _CheckInstanceDown(self, instance, "cannot remove disks")
9701 if (disk_op == constants.DDM_ADD and
9702 len(instance.disks) >= constants.MAX_DISKS):
9703 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9704 " add more" % constants.MAX_DISKS,
9706 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9708 if disk_op < 0 or disk_op >= len(instance.disks):
9709 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9711 (disk_op, len(instance.disks)),
9716 def _ConvertPlainToDrbd(self, feedback_fn):
9717 """Converts an instance from plain to drbd.
9720 feedback_fn("Converting template to drbd")
9721 instance = self.instance
9722 pnode = instance.primary_node
9723 snode = self.op.remote_node
9725 # create a fake disk info for _GenerateDiskTemplate
9726 disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9727 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9728 instance.name, pnode, [snode],
9729 disk_info, None, None, 0, feedback_fn)
9730 info = _GetInstanceInfoText(instance)
9731 feedback_fn("Creating aditional volumes...")
9732 # first, create the missing data and meta devices
9733 for disk in new_disks:
9734 # unfortunately this is... not too nice
9735 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9737 for child in disk.children:
9738 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9739 # at this stage, all new LVs have been created, we can rename the
9741 feedback_fn("Renaming original volumes...")
9742 rename_list = [(o, n.children[0].logical_id)
9743 for (o, n) in zip(instance.disks, new_disks)]
9744 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9745 result.Raise("Failed to rename original LVs")
9747 feedback_fn("Initializing DRBD devices...")
9748 # all child devices are in place, we can now create the DRBD devices
9749 for disk in new_disks:
9750 for node in [pnode, snode]:
9751 f_create = node == pnode
9752 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9754 # at this point, the instance has been modified
9755 instance.disk_template = constants.DT_DRBD8
9756 instance.disks = new_disks
9757 self.cfg.Update(instance, feedback_fn)
9759 # disks are created, waiting for sync
9760 disk_abort = not _WaitForSync(self, instance)
9762 raise errors.OpExecError("There are some degraded disks for"
9763 " this instance, please cleanup manually")
9765 def _ConvertDrbdToPlain(self, feedback_fn):
9766 """Converts an instance from drbd to plain.
9769 instance = self.instance
9770 assert len(instance.secondary_nodes) == 1
9771 pnode = instance.primary_node
9772 snode = instance.secondary_nodes[0]
9773 feedback_fn("Converting template to plain")
9775 old_disks = instance.disks
9776 new_disks = [d.children[0] for d in old_disks]
9778 # copy over size and mode
9779 for parent, child in zip(old_disks, new_disks):
9780 child.size = parent.size
9781 child.mode = parent.mode
9783 # update instance structure
9784 instance.disks = new_disks
9785 instance.disk_template = constants.DT_PLAIN
9786 self.cfg.Update(instance, feedback_fn)
9788 feedback_fn("Removing volumes on the secondary node...")
9789 for disk in old_disks:
9790 self.cfg.SetDiskID(disk, snode)
9791 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9793 self.LogWarning("Could not remove block device %s on node %s,"
9794 " continuing anyway: %s", disk.iv_name, snode, msg)
9796 feedback_fn("Removing unneeded volumes on the primary node...")
9797 for idx, disk in enumerate(old_disks):
9798 meta = disk.children[1]
9799 self.cfg.SetDiskID(meta, pnode)
9800 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9802 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9803 " continuing anyway: %s", idx, pnode, msg)
9805 def Exec(self, feedback_fn):
9806 """Modifies an instance.
9808 All parameters take effect only at the next restart of the instance.
9811 # Process here the warnings from CheckPrereq, as we don't have a
9812 # feedback_fn there.
9813 for warn in self.warn:
9814 feedback_fn("WARNING: %s" % warn)
9817 instance = self.instance
9819 for disk_op, disk_dict in self.op.disks:
9820 if disk_op == constants.DDM_REMOVE:
9821 # remove the last disk
9822 device = instance.disks.pop()
9823 device_idx = len(instance.disks)
9824 for node, disk in device.ComputeNodeTree(instance.primary_node):
9825 self.cfg.SetDiskID(disk, node)
9826 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9828 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9829 " continuing anyway", device_idx, node, msg)
9830 result.append(("disk/%d" % device_idx, "remove"))
9831 elif disk_op == constants.DDM_ADD:
9833 if instance.disk_template in (constants.DT_FILE,
9834 constants.DT_SHARED_FILE):
9835 file_driver, file_path = instance.disks[0].logical_id
9836 file_path = os.path.dirname(file_path)
9838 file_driver = file_path = None
9839 disk_idx_base = len(instance.disks)
9840 new_disk = _GenerateDiskTemplate(self,
9841 instance.disk_template,
9842 instance.name, instance.primary_node,
9843 instance.secondary_nodes,
9847 disk_idx_base, feedback_fn)[0]
9848 instance.disks.append(new_disk)
9849 info = _GetInstanceInfoText(instance)
9851 logging.info("Creating volume %s for instance %s",
9852 new_disk.iv_name, instance.name)
9853 # Note: this needs to be kept in sync with _CreateDisks
9855 for node in instance.all_nodes:
9856 f_create = node == instance.primary_node
9858 _CreateBlockDev(self, node, instance, new_disk,
9859 f_create, info, f_create)
9860 except errors.OpExecError, err:
9861 self.LogWarning("Failed to create volume %s (%s) on"
9863 new_disk.iv_name, new_disk, node, err)
9864 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9865 (new_disk.size, new_disk.mode)))
9867 # change a given disk
9868 instance.disks[disk_op].mode = disk_dict['mode']
9869 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9871 if self.op.disk_template:
9872 r_shut = _ShutdownInstanceDisks(self, instance)
9874 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9875 " proceed with disk template conversion")
9876 mode = (instance.disk_template, self.op.disk_template)
9878 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9880 self.cfg.ReleaseDRBDMinors(instance.name)
9882 result.append(("disk_template", self.op.disk_template))
9885 for nic_op, nic_dict in self.op.nics:
9886 if nic_op == constants.DDM_REMOVE:
9887 # remove the last nic
9888 del instance.nics[-1]
9889 result.append(("nic.%d" % len(instance.nics), "remove"))
9890 elif nic_op == constants.DDM_ADD:
9891 # mac and bridge should be set, by now
9892 mac = nic_dict['mac']
9893 ip = nic_dict.get('ip', None)
9894 nicparams = self.nic_pinst[constants.DDM_ADD]
9895 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9896 instance.nics.append(new_nic)
9897 result.append(("nic.%d" % (len(instance.nics) - 1),
9898 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9899 (new_nic.mac, new_nic.ip,
9900 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9901 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9904 for key in 'mac', 'ip':
9906 setattr(instance.nics[nic_op], key, nic_dict[key])
9907 if nic_op in self.nic_pinst:
9908 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9909 for key, val in nic_dict.iteritems():
9910 result.append(("nic.%s/%d" % (key, nic_op), val))
9913 if self.op.hvparams:
9914 instance.hvparams = self.hv_inst
9915 for key, val in self.op.hvparams.iteritems():
9916 result.append(("hv/%s" % key, val))
9919 if self.op.beparams:
9920 instance.beparams = self.be_inst
9921 for key, val in self.op.beparams.iteritems():
9922 result.append(("be/%s" % key, val))
9926 instance.os = self.op.os_name
9929 if self.op.osparams:
9930 instance.osparams = self.os_inst
9931 for key, val in self.op.osparams.iteritems():
9932 result.append(("os/%s" % key, val))
9934 self.cfg.Update(instance, feedback_fn)
9938 _DISK_CONVERSIONS = {
9939 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9940 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9944 class LUBackupQuery(NoHooksLU):
9945 """Query the exports list
9950 def ExpandNames(self):
9951 self.needed_locks = {}
9952 self.share_locks[locking.LEVEL_NODE] = 1
9953 if not self.op.nodes:
9954 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9956 self.needed_locks[locking.LEVEL_NODE] = \
9957 _GetWantedNodes(self, self.op.nodes)
9959 def Exec(self, feedback_fn):
9960 """Compute the list of all the exported system images.
9963 @return: a dictionary with the structure node->(export-list)
9964 where export-list is a list of the instances exported on
9968 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9969 rpcresult = self.rpc.call_export_list(self.nodes)
9971 for node in rpcresult:
9972 if rpcresult[node].fail_msg:
9973 result[node] = False
9975 result[node] = rpcresult[node].payload
9980 class LUBackupPrepare(NoHooksLU):
9981 """Prepares an instance for an export and returns useful information.
9986 def ExpandNames(self):
9987 self._ExpandAndLockInstance()
9989 def CheckPrereq(self):
9990 """Check prerequisites.
9993 instance_name = self.op.instance_name
9995 self.instance = self.cfg.GetInstanceInfo(instance_name)
9996 assert self.instance is not None, \
9997 "Cannot retrieve locked instance %s" % self.op.instance_name
9998 _CheckNodeOnline(self, self.instance.primary_node)
10000 self._cds = _GetClusterDomainSecret()
10002 def Exec(self, feedback_fn):
10003 """Prepares an instance for an export.
10006 instance = self.instance
10008 if self.op.mode == constants.EXPORT_MODE_REMOTE:
10009 salt = utils.GenerateSecret(8)
10011 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
10012 result = self.rpc.call_x509_cert_create(instance.primary_node,
10013 constants.RIE_CERT_VALIDITY)
10014 result.Raise("Can't create X509 key and certificate on %s" % result.node)
10016 (name, cert_pem) = result.payload
10018 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
10022 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
10023 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
10025 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
10031 class LUBackupExport(LogicalUnit):
10032 """Export an instance to an image in the cluster.
10035 HPATH = "instance-export"
10036 HTYPE = constants.HTYPE_INSTANCE
10039 def CheckArguments(self):
10040 """Check the arguments.
10043 self.x509_key_name = self.op.x509_key_name
10044 self.dest_x509_ca_pem = self.op.destination_x509_ca
10046 if self.op.mode == constants.EXPORT_MODE_REMOTE:
10047 if not self.x509_key_name:
10048 raise errors.OpPrereqError("Missing X509 key name for encryption",
10049 errors.ECODE_INVAL)
10051 if not self.dest_x509_ca_pem:
10052 raise errors.OpPrereqError("Missing destination X509 CA",
10053 errors.ECODE_INVAL)
10055 def ExpandNames(self):
10056 self._ExpandAndLockInstance()
10058 # Lock all nodes for local exports
10059 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10060 # FIXME: lock only instance primary and destination node
10062 # Sad but true, for now we have do lock all nodes, as we don't know where
10063 # the previous export might be, and in this LU we search for it and
10064 # remove it from its current node. In the future we could fix this by:
10065 # - making a tasklet to search (share-lock all), then create the
10066 # new one, then one to remove, after
10067 # - removing the removal operation altogether
10068 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10070 def DeclareLocks(self, level):
10071 """Last minute lock declaration."""
10072 # All nodes are locked anyway, so nothing to do here.
10074 def BuildHooksEnv(self):
10075 """Build hooks env.
10077 This will run on the master, primary node and target node.
10081 "EXPORT_MODE": self.op.mode,
10082 "EXPORT_NODE": self.op.target_node,
10083 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
10084 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
10085 # TODO: Generic function for boolean env variables
10086 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
10089 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10091 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
10093 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10094 nl.append(self.op.target_node)
10098 def CheckPrereq(self):
10099 """Check prerequisites.
10101 This checks that the instance and node names are valid.
10104 instance_name = self.op.instance_name
10106 self.instance = self.cfg.GetInstanceInfo(instance_name)
10107 assert self.instance is not None, \
10108 "Cannot retrieve locked instance %s" % self.op.instance_name
10109 _CheckNodeOnline(self, self.instance.primary_node)
10111 if (self.op.remove_instance and self.instance.admin_up and
10112 not self.op.shutdown):
10113 raise errors.OpPrereqError("Can not remove instance without shutting it"
10116 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10117 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
10118 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
10119 assert self.dst_node is not None
10121 _CheckNodeOnline(self, self.dst_node.name)
10122 _CheckNodeNotDrained(self, self.dst_node.name)
10125 self.dest_disk_info = None
10126 self.dest_x509_ca = None
10128 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10129 self.dst_node = None
10131 if len(self.op.target_node) != len(self.instance.disks):
10132 raise errors.OpPrereqError(("Received destination information for %s"
10133 " disks, but instance %s has %s disks") %
10134 (len(self.op.target_node), instance_name,
10135 len(self.instance.disks)),
10136 errors.ECODE_INVAL)
10138 cds = _GetClusterDomainSecret()
10140 # Check X509 key name
10142 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
10143 except (TypeError, ValueError), err:
10144 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
10146 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10147 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10148 errors.ECODE_INVAL)
10150 # Load and verify CA
10152 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10153 except OpenSSL.crypto.Error, err:
10154 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10155 (err, ), errors.ECODE_INVAL)
10157 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10158 if errcode is not None:
10159 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10160 (msg, ), errors.ECODE_INVAL)
10162 self.dest_x509_ca = cert
10164 # Verify target information
10166 for idx, disk_data in enumerate(self.op.target_node):
10168 (host, port, magic) = \
10169 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10170 except errors.GenericError, err:
10171 raise errors.OpPrereqError("Target info for disk %s: %s" %
10172 (idx, err), errors.ECODE_INVAL)
10174 disk_info.append((host, port, magic))
10176 assert len(disk_info) == len(self.op.target_node)
10177 self.dest_disk_info = disk_info
10180 raise errors.ProgrammerError("Unhandled export mode %r" %
10183 # instance disk type verification
10184 # TODO: Implement export support for file-based disks
10185 for disk in self.instance.disks:
10186 if disk.dev_type == constants.LD_FILE:
10187 raise errors.OpPrereqError("Export not supported for instances with"
10188 " file-based disks", errors.ECODE_INVAL)
10190 def _CleanupExports(self, feedback_fn):
10191 """Removes exports of current instance from all other nodes.
10193 If an instance in a cluster with nodes A..D was exported to node C, its
10194 exports will be removed from the nodes A, B and D.
10197 assert self.op.mode != constants.EXPORT_MODE_REMOTE
10199 nodelist = self.cfg.GetNodeList()
10200 nodelist.remove(self.dst_node.name)
10202 # on one-node clusters nodelist will be empty after the removal
10203 # if we proceed the backup would be removed because OpBackupQuery
10204 # substitutes an empty list with the full cluster node list.
10205 iname = self.instance.name
10207 feedback_fn("Removing old exports for instance %s" % iname)
10208 exportlist = self.rpc.call_export_list(nodelist)
10209 for node in exportlist:
10210 if exportlist[node].fail_msg:
10212 if iname in exportlist[node].payload:
10213 msg = self.rpc.call_export_remove(node, iname).fail_msg
10215 self.LogWarning("Could not remove older export for instance %s"
10216 " on node %s: %s", iname, node, msg)
10218 def Exec(self, feedback_fn):
10219 """Export an instance to an image in the cluster.
10222 assert self.op.mode in constants.EXPORT_MODES
10224 instance = self.instance
10225 src_node = instance.primary_node
10227 if self.op.shutdown:
10228 # shutdown the instance, but not the disks
10229 feedback_fn("Shutting down instance %s" % instance.name)
10230 result = self.rpc.call_instance_shutdown(src_node, instance,
10231 self.op.shutdown_timeout)
10232 # TODO: Maybe ignore failures if ignore_remove_failures is set
10233 result.Raise("Could not shutdown instance %s on"
10234 " node %s" % (instance.name, src_node))
10236 # set the disks ID correctly since call_instance_start needs the
10237 # correct drbd minor to create the symlinks
10238 for disk in instance.disks:
10239 self.cfg.SetDiskID(disk, src_node)
10241 activate_disks = (not instance.admin_up)
10244 # Activate the instance disks if we'exporting a stopped instance
10245 feedback_fn("Activating disks for %s" % instance.name)
10246 _StartInstanceDisks(self, instance, None)
10249 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10252 helper.CreateSnapshots()
10254 if (self.op.shutdown and instance.admin_up and
10255 not self.op.remove_instance):
10256 assert not activate_disks
10257 feedback_fn("Starting instance %s" % instance.name)
10258 result = self.rpc.call_instance_start(src_node, instance, None, None)
10259 msg = result.fail_msg
10261 feedback_fn("Failed to start instance: %s" % msg)
10262 _ShutdownInstanceDisks(self, instance)
10263 raise errors.OpExecError("Could not start instance: %s" % msg)
10265 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10266 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10267 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10268 connect_timeout = constants.RIE_CONNECT_TIMEOUT
10269 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10271 (key_name, _, _) = self.x509_key_name
10274 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10277 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10278 key_name, dest_ca_pem,
10283 # Check for backwards compatibility
10284 assert len(dresults) == len(instance.disks)
10285 assert compat.all(isinstance(i, bool) for i in dresults), \
10286 "Not all results are boolean: %r" % dresults
10290 feedback_fn("Deactivating disks for %s" % instance.name)
10291 _ShutdownInstanceDisks(self, instance)
10293 if not (compat.all(dresults) and fin_resu):
10296 failures.append("export finalization")
10297 if not compat.all(dresults):
10298 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10300 failures.append("disk export: disk(s) %s" % fdsk)
10302 raise errors.OpExecError("Export failed, errors in %s" %
10303 utils.CommaJoin(failures))
10305 # At this point, the export was successful, we can cleanup/finish
10307 # Remove instance if requested
10308 if self.op.remove_instance:
10309 feedback_fn("Removing instance %s" % instance.name)
10310 _RemoveInstance(self, feedback_fn, instance,
10311 self.op.ignore_remove_failures)
10313 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10314 self._CleanupExports(feedback_fn)
10316 return fin_resu, dresults
10319 class LUBackupRemove(NoHooksLU):
10320 """Remove exports related to the named instance.
10325 def ExpandNames(self):
10326 self.needed_locks = {}
10327 # We need all nodes to be locked in order for RemoveExport to work, but we
10328 # don't need to lock the instance itself, as nothing will happen to it (and
10329 # we can remove exports also for a removed instance)
10330 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10332 def Exec(self, feedback_fn):
10333 """Remove any export.
10336 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10337 # If the instance was not found we'll try with the name that was passed in.
10338 # This will only work if it was an FQDN, though.
10340 if not instance_name:
10342 instance_name = self.op.instance_name
10344 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10345 exportlist = self.rpc.call_export_list(locked_nodes)
10347 for node in exportlist:
10348 msg = exportlist[node].fail_msg
10350 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10352 if instance_name in exportlist[node].payload:
10354 result = self.rpc.call_export_remove(node, instance_name)
10355 msg = result.fail_msg
10357 logging.error("Could not remove export for instance %s"
10358 " on node %s: %s", instance_name, node, msg)
10360 if fqdn_warn and not found:
10361 feedback_fn("Export not found. If trying to remove an export belonging"
10362 " to a deleted instance please use its Fully Qualified"
10366 class LUGroupAdd(LogicalUnit):
10367 """Logical unit for creating node groups.
10370 HPATH = "group-add"
10371 HTYPE = constants.HTYPE_GROUP
10374 def ExpandNames(self):
10375 # We need the new group's UUID here so that we can create and acquire the
10376 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10377 # that it should not check whether the UUID exists in the configuration.
10378 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10379 self.needed_locks = {}
10380 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10382 def CheckPrereq(self):
10383 """Check prerequisites.
10385 This checks that the given group name is not an existing node group
10390 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10391 except errors.OpPrereqError:
10394 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10395 " node group (UUID: %s)" %
10396 (self.op.group_name, existing_uuid),
10397 errors.ECODE_EXISTS)
10399 if self.op.ndparams:
10400 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10402 def BuildHooksEnv(self):
10403 """Build hooks env.
10407 "GROUP_NAME": self.op.group_name,
10409 mn = self.cfg.GetMasterNode()
10410 return env, [mn], [mn]
10412 def Exec(self, feedback_fn):
10413 """Add the node group to the cluster.
10416 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10417 uuid=self.group_uuid,
10418 alloc_policy=self.op.alloc_policy,
10419 ndparams=self.op.ndparams)
10421 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10422 del self.remove_locks[locking.LEVEL_NODEGROUP]
10425 class LUGroupAssignNodes(NoHooksLU):
10426 """Logical unit for assigning nodes to groups.
10431 def ExpandNames(self):
10432 # These raise errors.OpPrereqError on their own:
10433 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10434 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10436 # We want to lock all the affected nodes and groups. We have readily
10437 # available the list of nodes, and the *destination* group. To gather the
10438 # list of "source" groups, we need to fetch node information.
10439 self.node_data = self.cfg.GetAllNodesInfo()
10440 affected_groups = set(self.node_data[node].group for node in self.op.nodes)
10441 affected_groups.add(self.group_uuid)
10443 self.needed_locks = {
10444 locking.LEVEL_NODEGROUP: list(affected_groups),
10445 locking.LEVEL_NODE: self.op.nodes,
10448 def CheckPrereq(self):
10449 """Check prerequisites.
10452 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10453 instance_data = self.cfg.GetAllInstancesInfo()
10455 if self.group is None:
10456 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10457 (self.op.group_name, self.group_uuid))
10459 (new_splits, previous_splits) = \
10460 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10461 for node in self.op.nodes],
10462 self.node_data, instance_data)
10465 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10467 if not self.op.force:
10468 raise errors.OpExecError("The following instances get split by this"
10469 " change and --force was not given: %s" %
10472 self.LogWarning("This operation will split the following instances: %s",
10475 if previous_splits:
10476 self.LogWarning("In addition, these already-split instances continue"
10477 " to be spit across groups: %s",
10478 utils.CommaJoin(utils.NiceSort(previous_splits)))
10480 def Exec(self, feedback_fn):
10481 """Assign nodes to a new group.
10484 for node in self.op.nodes:
10485 self.node_data[node].group = self.group_uuid
10487 self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10490 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10491 """Check for split instances after a node assignment.
10493 This method considers a series of node assignments as an atomic operation,
10494 and returns information about split instances after applying the set of
10497 In particular, it returns information about newly split instances, and
10498 instances that were already split, and remain so after the change.
10500 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
10503 @type changes: list of (node_name, new_group_uuid) pairs.
10504 @param changes: list of node assignments to consider.
10505 @param node_data: a dict with data for all nodes
10506 @param instance_data: a dict with all instances to consider
10507 @rtype: a two-tuple
10508 @return: a list of instances that were previously okay and result split as a
10509 consequence of this change, and a list of instances that were previously
10510 split and this change does not fix.
10513 changed_nodes = dict((node, group) for node, group in changes
10514 if node_data[node].group != group)
10516 all_split_instances = set()
10517 previously_split_instances = set()
10519 def InstanceNodes(instance):
10520 return [instance.primary_node] + list(instance.secondary_nodes)
10522 for inst in instance_data.values():
10523 if inst.disk_template not in constants.DTS_INT_MIRROR:
10526 instance_nodes = InstanceNodes(inst)
10528 if len(set(node_data[node].group for node in instance_nodes)) > 1:
10529 previously_split_instances.add(inst.name)
10531 if len(set(changed_nodes.get(node, node_data[node].group)
10532 for node in instance_nodes)) > 1:
10533 all_split_instances.add(inst.name)
10535 return (list(all_split_instances - previously_split_instances),
10536 list(previously_split_instances & all_split_instances))
10539 class _GroupQuery(_QueryBase):
10540 FIELDS = query.GROUP_FIELDS
10542 def ExpandNames(self, lu):
10543 lu.needed_locks = {}
10545 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10546 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10549 self.wanted = [name_to_uuid[name]
10550 for name in utils.NiceSort(name_to_uuid.keys())]
10552 # Accept names to be either names or UUIDs.
10555 all_uuid = frozenset(self._all_groups.keys())
10557 for name in self.names:
10558 if name in all_uuid:
10559 self.wanted.append(name)
10560 elif name in name_to_uuid:
10561 self.wanted.append(name_to_uuid[name])
10563 missing.append(name)
10566 raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
10567 errors.ECODE_NOENT)
10569 def DeclareLocks(self, lu, level):
10572 def _GetQueryData(self, lu):
10573 """Computes the list of node groups and their attributes.
10576 do_nodes = query.GQ_NODE in self.requested_data
10577 do_instances = query.GQ_INST in self.requested_data
10579 group_to_nodes = None
10580 group_to_instances = None
10582 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10583 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10584 # latter GetAllInstancesInfo() is not enough, for we have to go through
10585 # instance->node. Hence, we will need to process nodes even if we only need
10586 # instance information.
10587 if do_nodes or do_instances:
10588 all_nodes = lu.cfg.GetAllNodesInfo()
10589 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10592 for node in all_nodes.values():
10593 if node.group in group_to_nodes:
10594 group_to_nodes[node.group].append(node.name)
10595 node_to_group[node.name] = node.group
10598 all_instances = lu.cfg.GetAllInstancesInfo()
10599 group_to_instances = dict((uuid, []) for uuid in self.wanted)
10601 for instance in all_instances.values():
10602 node = instance.primary_node
10603 if node in node_to_group:
10604 group_to_instances[node_to_group[node]].append(instance.name)
10607 # Do not pass on node information if it was not requested.
10608 group_to_nodes = None
10610 return query.GroupQueryData([self._all_groups[uuid]
10611 for uuid in self.wanted],
10612 group_to_nodes, group_to_instances)
10615 class LUGroupQuery(NoHooksLU):
10616 """Logical unit for querying node groups.
10621 def CheckArguments(self):
10622 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
10623 self.op.output_fields, False)
10625 def ExpandNames(self):
10626 self.gq.ExpandNames(self)
10628 def Exec(self, feedback_fn):
10629 return self.gq.OldStyleQuery(self)
10632 class LUGroupSetParams(LogicalUnit):
10633 """Modifies the parameters of a node group.
10636 HPATH = "group-modify"
10637 HTYPE = constants.HTYPE_GROUP
10640 def CheckArguments(self):
10643 self.op.alloc_policy,
10646 if all_changes.count(None) == len(all_changes):
10647 raise errors.OpPrereqError("Please pass at least one modification",
10648 errors.ECODE_INVAL)
10650 def ExpandNames(self):
10651 # This raises errors.OpPrereqError on its own:
10652 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10654 self.needed_locks = {
10655 locking.LEVEL_NODEGROUP: [self.group_uuid],
10658 def CheckPrereq(self):
10659 """Check prerequisites.
10662 self.group = self.cfg.GetNodeGroup(self.group_uuid)
10664 if self.group is None:
10665 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10666 (self.op.group_name, self.group_uuid))
10668 if self.op.ndparams:
10669 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10670 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10671 self.new_ndparams = new_ndparams
10673 def BuildHooksEnv(self):
10674 """Build hooks env.
10678 "GROUP_NAME": self.op.group_name,
10679 "NEW_ALLOC_POLICY": self.op.alloc_policy,
10681 mn = self.cfg.GetMasterNode()
10682 return env, [mn], [mn]
10684 def Exec(self, feedback_fn):
10685 """Modifies the node group.
10690 if self.op.ndparams:
10691 self.group.ndparams = self.new_ndparams
10692 result.append(("ndparams", str(self.group.ndparams)))
10694 if self.op.alloc_policy:
10695 self.group.alloc_policy = self.op.alloc_policy
10697 self.cfg.Update(self.group, feedback_fn)
10702 class LUGroupRemove(LogicalUnit):
10703 HPATH = "group-remove"
10704 HTYPE = constants.HTYPE_GROUP
10707 def ExpandNames(self):
10708 # This will raises errors.OpPrereqError on its own:
10709 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10710 self.needed_locks = {
10711 locking.LEVEL_NODEGROUP: [self.group_uuid],
10714 def CheckPrereq(self):
10715 """Check prerequisites.
10717 This checks that the given group name exists as a node group, that is
10718 empty (i.e., contains no nodes), and that is not the last group of the
10722 # Verify that the group is empty.
10723 group_nodes = [node.name
10724 for node in self.cfg.GetAllNodesInfo().values()
10725 if node.group == self.group_uuid]
10728 raise errors.OpPrereqError("Group '%s' not empty, has the following"
10730 (self.op.group_name,
10731 utils.CommaJoin(utils.NiceSort(group_nodes))),
10732 errors.ECODE_STATE)
10734 # Verify the cluster would not be left group-less.
10735 if len(self.cfg.GetNodeGroupList()) == 1:
10736 raise errors.OpPrereqError("Group '%s' is the only group,"
10737 " cannot be removed" %
10738 self.op.group_name,
10739 errors.ECODE_STATE)
10741 def BuildHooksEnv(self):
10742 """Build hooks env.
10746 "GROUP_NAME": self.op.group_name,
10748 mn = self.cfg.GetMasterNode()
10749 return env, [mn], [mn]
10751 def Exec(self, feedback_fn):
10752 """Remove the node group.
10756 self.cfg.RemoveNodeGroup(self.group_uuid)
10757 except errors.ConfigurationError:
10758 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10759 (self.op.group_name, self.group_uuid))
10761 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10764 class LUGroupRename(LogicalUnit):
10765 HPATH = "group-rename"
10766 HTYPE = constants.HTYPE_GROUP
10769 def ExpandNames(self):
10770 # This raises errors.OpPrereqError on its own:
10771 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10773 self.needed_locks = {
10774 locking.LEVEL_NODEGROUP: [self.group_uuid],
10777 def CheckPrereq(self):
10778 """Check prerequisites.
10780 Ensures requested new name is not yet used.
10784 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10785 except errors.OpPrereqError:
10788 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10789 " node group (UUID: %s)" %
10790 (self.op.new_name, new_name_uuid),
10791 errors.ECODE_EXISTS)
10793 def BuildHooksEnv(self):
10794 """Build hooks env.
10798 "OLD_NAME": self.op.group_name,
10799 "NEW_NAME": self.op.new_name,
10802 mn = self.cfg.GetMasterNode()
10803 all_nodes = self.cfg.GetAllNodesInfo()
10805 all_nodes.pop(mn, None)
10807 for node in all_nodes.values():
10808 if node.group == self.group_uuid:
10809 run_nodes.append(node.name)
10811 return env, run_nodes, run_nodes
10813 def Exec(self, feedback_fn):
10814 """Rename the node group.
10817 group = self.cfg.GetNodeGroup(self.group_uuid)
10820 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10821 (self.op.group_name, self.group_uuid))
10823 group.name = self.op.new_name
10824 self.cfg.Update(group, feedback_fn)
10826 return self.op.new_name
10829 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10830 """Generic tags LU.
10832 This is an abstract class which is the parent of all the other tags LUs.
10836 def ExpandNames(self):
10837 self.needed_locks = {}
10838 if self.op.kind == constants.TAG_NODE:
10839 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10840 self.needed_locks[locking.LEVEL_NODE] = self.op.name
10841 elif self.op.kind == constants.TAG_INSTANCE:
10842 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10843 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10845 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10846 # not possible to acquire the BGL based on opcode parameters)
10848 def CheckPrereq(self):
10849 """Check prerequisites.
10852 if self.op.kind == constants.TAG_CLUSTER:
10853 self.target = self.cfg.GetClusterInfo()
10854 elif self.op.kind == constants.TAG_NODE:
10855 self.target = self.cfg.GetNodeInfo(self.op.name)
10856 elif self.op.kind == constants.TAG_INSTANCE:
10857 self.target = self.cfg.GetInstanceInfo(self.op.name)
10859 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10860 str(self.op.kind), errors.ECODE_INVAL)
10863 class LUTagsGet(TagsLU):
10864 """Returns the tags of a given object.
10869 def ExpandNames(self):
10870 TagsLU.ExpandNames(self)
10872 # Share locks as this is only a read operation
10873 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10875 def Exec(self, feedback_fn):
10876 """Returns the tag list.
10879 return list(self.target.GetTags())
10882 class LUTagsSearch(NoHooksLU):
10883 """Searches the tags for a given pattern.
10888 def ExpandNames(self):
10889 self.needed_locks = {}
10891 def CheckPrereq(self):
10892 """Check prerequisites.
10894 This checks the pattern passed for validity by compiling it.
10898 self.re = re.compile(self.op.pattern)
10899 except re.error, err:
10900 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10901 (self.op.pattern, err), errors.ECODE_INVAL)
10903 def Exec(self, feedback_fn):
10904 """Returns the tag list.
10908 tgts = [("/cluster", cfg.GetClusterInfo())]
10909 ilist = cfg.GetAllInstancesInfo().values()
10910 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10911 nlist = cfg.GetAllNodesInfo().values()
10912 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10914 for path, target in tgts:
10915 for tag in target.GetTags():
10916 if self.re.search(tag):
10917 results.append((path, tag))
10921 class LUTagsSet(TagsLU):
10922 """Sets a tag on a given object.
10927 def CheckPrereq(self):
10928 """Check prerequisites.
10930 This checks the type and length of the tag name and value.
10933 TagsLU.CheckPrereq(self)
10934 for tag in self.op.tags:
10935 objects.TaggableObject.ValidateTag(tag)
10937 def Exec(self, feedback_fn):
10942 for tag in self.op.tags:
10943 self.target.AddTag(tag)
10944 except errors.TagError, err:
10945 raise errors.OpExecError("Error while setting tag: %s" % str(err))
10946 self.cfg.Update(self.target, feedback_fn)
10949 class LUTagsDel(TagsLU):
10950 """Delete a list of tags from a given object.
10955 def CheckPrereq(self):
10956 """Check prerequisites.
10958 This checks that we have the given tag.
10961 TagsLU.CheckPrereq(self)
10962 for tag in self.op.tags:
10963 objects.TaggableObject.ValidateTag(tag)
10964 del_tags = frozenset(self.op.tags)
10965 cur_tags = self.target.GetTags()
10967 diff_tags = del_tags - cur_tags
10969 diff_names = ("'%s'" % i for i in sorted(diff_tags))
10970 raise errors.OpPrereqError("Tag(s) %s not found" %
10971 (utils.CommaJoin(diff_names), ),
10972 errors.ECODE_NOENT)
10974 def Exec(self, feedback_fn):
10975 """Remove the tag from the object.
10978 for tag in self.op.tags:
10979 self.target.RemoveTag(tag)
10980 self.cfg.Update(self.target, feedback_fn)
10983 class LUTestDelay(NoHooksLU):
10984 """Sleep for a specified amount of time.
10986 This LU sleeps on the master and/or nodes for a specified amount of
10992 def ExpandNames(self):
10993 """Expand names and set required locks.
10995 This expands the node list, if any.
10998 self.needed_locks = {}
10999 if self.op.on_nodes:
11000 # _GetWantedNodes can be used here, but is not always appropriate to use
11001 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
11002 # more information.
11003 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
11004 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
11006 def _TestDelay(self):
11007 """Do the actual sleep.
11010 if self.op.on_master:
11011 if not utils.TestDelay(self.op.duration):
11012 raise errors.OpExecError("Error during master delay test")
11013 if self.op.on_nodes:
11014 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
11015 for node, node_result in result.items():
11016 node_result.Raise("Failure during rpc call to node %s" % node)
11018 def Exec(self, feedback_fn):
11019 """Execute the test delay opcode, with the wanted repetitions.
11022 if self.op.repeat == 0:
11025 top_value = self.op.repeat - 1
11026 for i in range(self.op.repeat):
11027 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
11031 class LUTestJqueue(NoHooksLU):
11032 """Utility LU to test some aspects of the job queue.
11037 # Must be lower than default timeout for WaitForJobChange to see whether it
11038 # notices changed jobs
11039 _CLIENT_CONNECT_TIMEOUT = 20.0
11040 _CLIENT_CONFIRM_TIMEOUT = 60.0
11043 def _NotifyUsingSocket(cls, cb, errcls):
11044 """Opens a Unix socket and waits for another program to connect.
11047 @param cb: Callback to send socket name to client
11048 @type errcls: class
11049 @param errcls: Exception class to use for errors
11052 # Using a temporary directory as there's no easy way to create temporary
11053 # sockets without writing a custom loop around tempfile.mktemp and
11055 tmpdir = tempfile.mkdtemp()
11057 tmpsock = utils.PathJoin(tmpdir, "sock")
11059 logging.debug("Creating temporary socket at %s", tmpsock)
11060 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
11065 # Send details to client
11068 # Wait for client to connect before continuing
11069 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
11071 (conn, _) = sock.accept()
11072 except socket.error, err:
11073 raise errcls("Client didn't connect in time (%s)" % err)
11077 # Remove as soon as client is connected
11078 shutil.rmtree(tmpdir)
11080 # Wait for client to close
11083 # pylint: disable-msg=E1101
11084 # Instance of '_socketobject' has no ... member
11085 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
11087 except socket.error, err:
11088 raise errcls("Client failed to confirm notification (%s)" % err)
11092 def _SendNotification(self, test, arg, sockname):
11093 """Sends a notification to the client.
11096 @param test: Test name
11097 @param arg: Test argument (depends on test)
11098 @type sockname: string
11099 @param sockname: Socket path
11102 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
11104 def _Notify(self, prereq, test, arg):
11105 """Notifies the client of a test.
11108 @param prereq: Whether this is a prereq-phase test
11110 @param test: Test name
11111 @param arg: Test argument (depends on test)
11115 errcls = errors.OpPrereqError
11117 errcls = errors.OpExecError
11119 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11123 def CheckArguments(self):
11124 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11125 self.expandnames_calls = 0
11127 def ExpandNames(self):
11128 checkargs_calls = getattr(self, "checkargs_calls", 0)
11129 if checkargs_calls < 1:
11130 raise errors.ProgrammerError("CheckArguments was not called")
11132 self.expandnames_calls += 1
11134 if self.op.notify_waitlock:
11135 self._Notify(True, constants.JQT_EXPANDNAMES, None)
11137 self.LogInfo("Expanding names")
11139 # Get lock on master node (just to get a lock, not for a particular reason)
11140 self.needed_locks = {
11141 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11144 def Exec(self, feedback_fn):
11145 if self.expandnames_calls < 1:
11146 raise errors.ProgrammerError("ExpandNames was not called")
11148 if self.op.notify_exec:
11149 self._Notify(False, constants.JQT_EXEC, None)
11151 self.LogInfo("Executing")
11153 if self.op.log_messages:
11154 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11155 for idx, msg in enumerate(self.op.log_messages):
11156 self.LogInfo("Sending log message %s", idx + 1)
11157 feedback_fn(constants.JQT_MSGPREFIX + msg)
11158 # Report how many test messages have been sent
11159 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11162 raise errors.OpExecError("Opcode failure was requested")
11167 class IAllocator(object):
11168 """IAllocator framework.
11170 An IAllocator instance has three sets of attributes:
11171 - cfg that is needed to query the cluster
11172 - input data (all members of the _KEYS class attribute are required)
11173 - four buffer attributes (in|out_data|text), that represent the
11174 input (to the external script) in text and data structure format,
11175 and the output from it, again in two formats
11176 - the result variables from the script (success, info, nodes) for
11180 # pylint: disable-msg=R0902
11181 # lots of instance attributes
11183 "name", "mem_size", "disks", "disk_template",
11184 "os", "tags", "nics", "vcpus", "hypervisor",
11187 "name", "relocate_from",
11193 def __init__(self, cfg, rpc, mode, **kwargs):
11196 # init buffer variables
11197 self.in_text = self.out_text = self.in_data = self.out_data = None
11198 # init all input fields so that pylint is happy
11200 self.mem_size = self.disks = self.disk_template = None
11201 self.os = self.tags = self.nics = self.vcpus = None
11202 self.hypervisor = None
11203 self.relocate_from = None
11205 self.evac_nodes = None
11207 self.required_nodes = None
11208 # init result fields
11209 self.success = self.info = self.result = None
11210 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11211 keyset = self._ALLO_KEYS
11212 fn = self._AddNewInstance
11213 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11214 keyset = self._RELO_KEYS
11215 fn = self._AddRelocateInstance
11216 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11217 keyset = self._EVAC_KEYS
11218 fn = self._AddEvacuateNodes
11220 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11221 " IAllocator" % self.mode)
11223 if key not in keyset:
11224 raise errors.ProgrammerError("Invalid input parameter '%s' to"
11225 " IAllocator" % key)
11226 setattr(self, key, kwargs[key])
11229 if key not in kwargs:
11230 raise errors.ProgrammerError("Missing input parameter '%s' to"
11231 " IAllocator" % key)
11232 self._BuildInputData(fn)
11234 def _ComputeClusterData(self):
11235 """Compute the generic allocator input data.
11237 This is the data that is independent of the actual operation.
11241 cluster_info = cfg.GetClusterInfo()
11244 "version": constants.IALLOCATOR_VERSION,
11245 "cluster_name": cfg.GetClusterName(),
11246 "cluster_tags": list(cluster_info.GetTags()),
11247 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11248 # we don't have job IDs
11250 ninfo = cfg.GetAllNodesInfo()
11251 iinfo = cfg.GetAllInstancesInfo().values()
11252 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11255 node_list = [n.name for n in ninfo.values() if n.vm_capable]
11257 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11258 hypervisor_name = self.hypervisor
11259 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11260 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11261 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11262 hypervisor_name = cluster_info.enabled_hypervisors[0]
11264 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11267 self.rpc.call_all_instances_info(node_list,
11268 cluster_info.enabled_hypervisors)
11270 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11272 config_ndata = self._ComputeBasicNodeData(ninfo)
11273 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11274 i_list, config_ndata)
11275 assert len(data["nodes"]) == len(ninfo), \
11276 "Incomplete node data computed"
11278 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11280 self.in_data = data
11283 def _ComputeNodeGroupData(cfg):
11284 """Compute node groups data.
11288 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
11290 "name": gdata.name,
11291 "alloc_policy": gdata.alloc_policy,
11296 def _ComputeBasicNodeData(node_cfg):
11297 """Compute global node data.
11300 @returns: a dict of name: (node dict, node config)
11304 for ninfo in node_cfg.values():
11305 # fill in static (config-based) values
11307 "tags": list(ninfo.GetTags()),
11308 "primary_ip": ninfo.primary_ip,
11309 "secondary_ip": ninfo.secondary_ip,
11310 "offline": ninfo.offline,
11311 "drained": ninfo.drained,
11312 "master_candidate": ninfo.master_candidate,
11313 "group": ninfo.group,
11314 "master_capable": ninfo.master_capable,
11315 "vm_capable": ninfo.vm_capable,
11318 node_results[ninfo.name] = pnr
11320 return node_results
11323 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11325 """Compute global node data.
11327 @param node_results: the basic node structures as filled from the config
11330 # make a copy of the current dict
11331 node_results = dict(node_results)
11332 for nname, nresult in node_data.items():
11333 assert nname in node_results, "Missing basic data for node %s" % nname
11334 ninfo = node_cfg[nname]
11336 if not (ninfo.offline or ninfo.drained):
11337 nresult.Raise("Can't get data for node %s" % nname)
11338 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11340 remote_info = nresult.payload
11342 for attr in ['memory_total', 'memory_free', 'memory_dom0',
11343 'vg_size', 'vg_free', 'cpu_total']:
11344 if attr not in remote_info:
11345 raise errors.OpExecError("Node '%s' didn't return attribute"
11346 " '%s'" % (nname, attr))
11347 if not isinstance(remote_info[attr], int):
11348 raise errors.OpExecError("Node '%s' returned invalid value"
11350 (nname, attr, remote_info[attr]))
11351 # compute memory used by primary instances
11352 i_p_mem = i_p_up_mem = 0
11353 for iinfo, beinfo in i_list:
11354 if iinfo.primary_node == nname:
11355 i_p_mem += beinfo[constants.BE_MEMORY]
11356 if iinfo.name not in node_iinfo[nname].payload:
11359 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11360 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11361 remote_info['memory_free'] -= max(0, i_mem_diff)
11364 i_p_up_mem += beinfo[constants.BE_MEMORY]
11366 # compute memory used by instances
11368 "total_memory": remote_info['memory_total'],
11369 "reserved_memory": remote_info['memory_dom0'],
11370 "free_memory": remote_info['memory_free'],
11371 "total_disk": remote_info['vg_size'],
11372 "free_disk": remote_info['vg_free'],
11373 "total_cpus": remote_info['cpu_total'],
11374 "i_pri_memory": i_p_mem,
11375 "i_pri_up_memory": i_p_up_mem,
11377 pnr_dyn.update(node_results[nname])
11378 node_results[nname] = pnr_dyn
11380 return node_results
11383 def _ComputeInstanceData(cluster_info, i_list):
11384 """Compute global instance data.
11388 for iinfo, beinfo in i_list:
11390 for nic in iinfo.nics:
11391 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11392 nic_dict = {"mac": nic.mac,
11394 "mode": filled_params[constants.NIC_MODE],
11395 "link": filled_params[constants.NIC_LINK],
11397 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11398 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11399 nic_data.append(nic_dict)
11401 "tags": list(iinfo.GetTags()),
11402 "admin_up": iinfo.admin_up,
11403 "vcpus": beinfo[constants.BE_VCPUS],
11404 "memory": beinfo[constants.BE_MEMORY],
11406 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11408 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11409 "disk_template": iinfo.disk_template,
11410 "hypervisor": iinfo.hypervisor,
11412 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11414 instance_data[iinfo.name] = pir
11416 return instance_data
11418 def _AddNewInstance(self):
11419 """Add new instance data to allocator structure.
11421 This in combination with _AllocatorGetClusterData will create the
11422 correct structure needed as input for the allocator.
11424 The checks for the completeness of the opcode must have already been
11428 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11430 if self.disk_template in constants.DTS_INT_MIRROR:
11431 self.required_nodes = 2
11433 self.required_nodes = 1
11436 "disk_template": self.disk_template,
11439 "vcpus": self.vcpus,
11440 "memory": self.mem_size,
11441 "disks": self.disks,
11442 "disk_space_total": disk_space,
11444 "required_nodes": self.required_nodes,
11448 def _AddRelocateInstance(self):
11449 """Add relocate instance data to allocator structure.
11451 This in combination with _IAllocatorGetClusterData will create the
11452 correct structure needed as input for the allocator.
11454 The checks for the completeness of the opcode must have already been
11458 instance = self.cfg.GetInstanceInfo(self.name)
11459 if instance is None:
11460 raise errors.ProgrammerError("Unknown instance '%s' passed to"
11461 " IAllocator" % self.name)
11463 if instance.disk_template not in constants.DTS_MIRRORED:
11464 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11465 errors.ECODE_INVAL)
11467 if instance.disk_template in constants.DTS_INT_MIRROR and \
11468 len(instance.secondary_nodes) != 1:
11469 raise errors.OpPrereqError("Instance has not exactly one secondary node",
11470 errors.ECODE_STATE)
11472 self.required_nodes = 1
11473 disk_sizes = [{'size': disk.size} for disk in instance.disks]
11474 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11478 "disk_space_total": disk_space,
11479 "required_nodes": self.required_nodes,
11480 "relocate_from": self.relocate_from,
11484 def _AddEvacuateNodes(self):
11485 """Add evacuate nodes data to allocator structure.
11489 "evac_nodes": self.evac_nodes
11493 def _BuildInputData(self, fn):
11494 """Build input data structures.
11497 self._ComputeClusterData()
11500 request["type"] = self.mode
11501 self.in_data["request"] = request
11503 self.in_text = serializer.Dump(self.in_data)
11505 def Run(self, name, validate=True, call_fn=None):
11506 """Run an instance allocator and return the results.
11509 if call_fn is None:
11510 call_fn = self.rpc.call_iallocator_runner
11512 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11513 result.Raise("Failure while running the iallocator script")
11515 self.out_text = result.payload
11517 self._ValidateResult()
11519 def _ValidateResult(self):
11520 """Process the allocator results.
11522 This will process and if successful save the result in
11523 self.out_data and the other parameters.
11527 rdict = serializer.Load(self.out_text)
11528 except Exception, err:
11529 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11531 if not isinstance(rdict, dict):
11532 raise errors.OpExecError("Can't parse iallocator results: not a dict")
11534 # TODO: remove backwards compatiblity in later versions
11535 if "nodes" in rdict and "result" not in rdict:
11536 rdict["result"] = rdict["nodes"]
11539 for key in "success", "info", "result":
11540 if key not in rdict:
11541 raise errors.OpExecError("Can't parse iallocator results:"
11542 " missing key '%s'" % key)
11543 setattr(self, key, rdict[key])
11545 if not isinstance(rdict["result"], list):
11546 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11548 self.out_data = rdict
11551 class LUTestAllocator(NoHooksLU):
11552 """Run allocator tests.
11554 This LU runs the allocator tests
11557 def CheckPrereq(self):
11558 """Check prerequisites.
11560 This checks the opcode parameters depending on the director and mode test.
11563 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11564 for attr in ["mem_size", "disks", "disk_template",
11565 "os", "tags", "nics", "vcpus"]:
11566 if not hasattr(self.op, attr):
11567 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11568 attr, errors.ECODE_INVAL)
11569 iname = self.cfg.ExpandInstanceName(self.op.name)
11570 if iname is not None:
11571 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11572 iname, errors.ECODE_EXISTS)
11573 if not isinstance(self.op.nics, list):
11574 raise errors.OpPrereqError("Invalid parameter 'nics'",
11575 errors.ECODE_INVAL)
11576 if not isinstance(self.op.disks, list):
11577 raise errors.OpPrereqError("Invalid parameter 'disks'",
11578 errors.ECODE_INVAL)
11579 for row in self.op.disks:
11580 if (not isinstance(row, dict) or
11581 "size" not in row or
11582 not isinstance(row["size"], int) or
11583 "mode" not in row or
11584 row["mode"] not in ['r', 'w']):
11585 raise errors.OpPrereqError("Invalid contents of the 'disks'"
11586 " parameter", errors.ECODE_INVAL)
11587 if self.op.hypervisor is None:
11588 self.op.hypervisor = self.cfg.GetHypervisorType()
11589 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11590 fname = _ExpandInstanceName(self.cfg, self.op.name)
11591 self.op.name = fname
11592 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11593 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11594 if not hasattr(self.op, "evac_nodes"):
11595 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11596 " opcode input", errors.ECODE_INVAL)
11598 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11599 self.op.mode, errors.ECODE_INVAL)
11601 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11602 if self.op.allocator is None:
11603 raise errors.OpPrereqError("Missing allocator name",
11604 errors.ECODE_INVAL)
11605 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11606 raise errors.OpPrereqError("Wrong allocator test '%s'" %
11607 self.op.direction, errors.ECODE_INVAL)
11609 def Exec(self, feedback_fn):
11610 """Run the allocator test.
11613 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11614 ial = IAllocator(self.cfg, self.rpc,
11617 mem_size=self.op.mem_size,
11618 disks=self.op.disks,
11619 disk_template=self.op.disk_template,
11623 vcpus=self.op.vcpus,
11624 hypervisor=self.op.hypervisor,
11626 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11627 ial = IAllocator(self.cfg, self.rpc,
11630 relocate_from=list(self.relocate_from),
11632 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11633 ial = IAllocator(self.cfg, self.rpc,
11635 evac_nodes=self.op.evac_nodes)
11637 raise errors.ProgrammerError("Uncatched mode %s in"
11638 " LUTestAllocator.Exec", self.op.mode)
11640 if self.op.direction == constants.IALLOCATOR_DIR_IN:
11641 result = ial.in_text
11643 ial.Run(self.op.allocator, validate=False)
11644 result = ial.out_text
11648 #: Query type implementations
11650 constants.QR_INSTANCE: _InstanceQuery,
11651 constants.QR_NODE: _NodeQuery,
11652 constants.QR_GROUP: _GroupQuery,
11653 constants.QR_OS: _OsQuery,
11656 assert set(_QUERY_IMPL.keys()) == constants.QR_OP_QUERY
11659 def _GetQueryImplementation(name):
11660 """Returns the implemtnation for a query type.
11662 @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11666 return _QUERY_IMPL[name]
11668 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11669 errors.ECODE_INVAL)