4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
43 from ganeti import ssh
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import hypervisor
47 from ganeti import locking
48 from ganeti import constants
49 from ganeti import objects
50 from ganeti import serializer
51 from ganeti import ssconf
52 from ganeti import uidpool
53 from ganeti import compat
54 from ganeti import masterd
55 from ganeti import netutils
58 import ganeti.masterd.instance # pylint: disable-msg=W0611
60 # Common opcode attributes
62 #: output fields for a query operation
63 _POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString))
66 #: the shutdown timeout
67 _PShutdownTimeout = ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
70 #: the force parameter
71 _PForce = ("force", False, ht.TBool)
73 #: a required instance name (for single-instance LUs)
74 _PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString)
76 #: Whether to ignore offline nodes
77 _PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool)
79 #: a required node name (for single-node LUs)
80 _PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString)
82 #: the migration type (live/non-live)
83 _PMigrationMode = ("mode", None,
84 ht.TOr(ht.TNone, ht.TElemOf(constants.HT_MIGRATION_MODES)))
86 #: the obsolete 'live' mode (boolean)
87 _PMigrationLive = ("live", None, ht.TMaybeBool)
91 class LogicalUnit(object):
92 """Logical Unit base class.
94 Subclasses must follow these rules:
95 - implement ExpandNames
96 - implement CheckPrereq (except when tasklets are used)
97 - implement Exec (except when tasklets are used)
98 - implement BuildHooksEnv
99 - redefine HPATH and HTYPE
100 - optionally redefine their run requirements:
101 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
103 Note that all commands require root permissions.
105 @ivar dry_run_result: the value (if any) that will be returned to the caller
106 in dry-run mode (signalled by opcode dry_run parameter)
107 @cvar _OP_PARAMS: a list of opcode attributes, their defaults values
108 they should get if not already defined, and types they must match
116 def __init__(self, processor, op, context, rpc):
117 """Constructor for LogicalUnit.
119 This needs to be overridden in derived classes in order to check op
123 self.proc = processor
125 self.cfg = context.cfg
126 self.context = context
128 # Dicts used to declare locking needs to mcpu
129 self.needed_locks = None
130 self.acquired_locks = {}
131 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
133 self.remove_locks = {}
134 # Used to force good behavior when calling helper functions
135 self.recalculate_locks = {}
138 self.Log = processor.Log # pylint: disable-msg=C0103
139 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
140 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
141 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
142 # support for dry-run
143 self.dry_run_result = None
144 # support for generic debug attribute
145 if (not hasattr(self.op, "debug_level") or
146 not isinstance(self.op.debug_level, int)):
147 self.op.debug_level = 0
152 # The new kind-of-type-system
153 op_id = self.op.OP_ID
154 for attr_name, aval, test in self._OP_PARAMS:
155 if not hasattr(op, attr_name):
156 if aval == ht.NoDefault:
157 raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
158 (op_id, attr_name), errors.ECODE_INVAL)
164 setattr(self.op, attr_name, dval)
165 attr_val = getattr(op, attr_name)
166 if test == ht.NoType:
169 if not callable(test):
170 raise errors.ProgrammerError("Validation for parameter '%s.%s' failed,"
171 " given type is not a proper type (%s)" %
172 (op_id, attr_name, test))
173 if not test(attr_val):
174 logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
175 self.op.OP_ID, attr_name, type(attr_val), attr_val)
176 raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
177 (op_id, attr_name), errors.ECODE_INVAL)
179 self.CheckArguments()
182 """Returns the SshRunner object
186 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
189 ssh = property(fget=__GetSSH)
191 def CheckArguments(self):
192 """Check syntactic validity for the opcode arguments.
194 This method is for doing a simple syntactic check and ensure
195 validity of opcode parameters, without any cluster-related
196 checks. While the same can be accomplished in ExpandNames and/or
197 CheckPrereq, doing these separate is better because:
199 - ExpandNames is left as as purely a lock-related function
200 - CheckPrereq is run after we have acquired locks (and possible
203 The function is allowed to change the self.op attribute so that
204 later methods can no longer worry about missing parameters.
209 def ExpandNames(self):
210 """Expand names for this LU.
212 This method is called before starting to execute the opcode, and it should
213 update all the parameters of the opcode to their canonical form (e.g. a
214 short node name must be fully expanded after this method has successfully
215 completed). This way locking, hooks, logging, ecc. can work correctly.
217 LUs which implement this method must also populate the self.needed_locks
218 member, as a dict with lock levels as keys, and a list of needed lock names
221 - use an empty dict if you don't need any lock
222 - if you don't need any lock at a particular level omit that level
223 - don't put anything for the BGL level
224 - if you want all locks at a level use locking.ALL_SET as a value
226 If you need to share locks (rather than acquire them exclusively) at one
227 level you can modify self.share_locks, setting a true value (usually 1) for
228 that level. By default locks are not shared.
230 This function can also define a list of tasklets, which then will be
231 executed in order instead of the usual LU-level CheckPrereq and Exec
232 functions, if those are not defined by the LU.
236 # Acquire all nodes and one instance
237 self.needed_locks = {
238 locking.LEVEL_NODE: locking.ALL_SET,
239 locking.LEVEL_INSTANCE: ['instance1.example.com'],
241 # Acquire just two nodes
242 self.needed_locks = {
243 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
246 self.needed_locks = {} # No, you can't leave it to the default value None
249 # The implementation of this method is mandatory only if the new LU is
250 # concurrent, so that old LUs don't need to be changed all at the same
253 self.needed_locks = {} # Exclusive LUs don't need locks.
255 raise NotImplementedError
257 def DeclareLocks(self, level):
258 """Declare LU locking needs for a level
260 While most LUs can just declare their locking needs at ExpandNames time,
261 sometimes there's the need to calculate some locks after having acquired
262 the ones before. This function is called just before acquiring locks at a
263 particular level, but after acquiring the ones at lower levels, and permits
264 such calculations. It can be used to modify self.needed_locks, and by
265 default it does nothing.
267 This function is only called if you have something already set in
268 self.needed_locks for the level.
270 @param level: Locking level which is going to be locked
271 @type level: member of ganeti.locking.LEVELS
275 def CheckPrereq(self):
276 """Check prerequisites for this LU.
278 This method should check that the prerequisites for the execution
279 of this LU are fulfilled. It can do internode communication, but
280 it should be idempotent - no cluster or system changes are
283 The method should raise errors.OpPrereqError in case something is
284 not fulfilled. Its return value is ignored.
286 This method should also update all the parameters of the opcode to
287 their canonical form if it hasn't been done by ExpandNames before.
290 if self.tasklets is not None:
291 for (idx, tl) in enumerate(self.tasklets):
292 logging.debug("Checking prerequisites for tasklet %s/%s",
293 idx + 1, len(self.tasklets))
298 def Exec(self, feedback_fn):
301 This method should implement the actual work. It should raise
302 errors.OpExecError for failures that are somewhat dealt with in
306 if self.tasklets is not None:
307 for (idx, tl) in enumerate(self.tasklets):
308 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
311 raise NotImplementedError
313 def BuildHooksEnv(self):
314 """Build hooks environment for this LU.
316 This method should return a three-node tuple consisting of: a dict
317 containing the environment that will be used for running the
318 specific hook for this LU, a list of node names on which the hook
319 should run before the execution, and a list of node names on which
320 the hook should run after the execution.
322 The keys of the dict must not have 'GANETI_' prefixed as this will
323 be handled in the hooks runner. Also note additional keys will be
324 added by the hooks runner. If the LU doesn't define any
325 environment, an empty dict (and not None) should be returned.
327 No nodes should be returned as an empty list (and not None).
329 Note that if the HPATH for a LU class is None, this function will
333 raise NotImplementedError
335 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
336 """Notify the LU about the results of its hooks.
338 This method is called every time a hooks phase is executed, and notifies
339 the Logical Unit about the hooks' result. The LU can then use it to alter
340 its result based on the hooks. By default the method does nothing and the
341 previous result is passed back unchanged but any LU can define it if it
342 wants to use the local cluster hook-scripts somehow.
344 @param phase: one of L{constants.HOOKS_PHASE_POST} or
345 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
346 @param hook_results: the results of the multi-node hooks rpc call
347 @param feedback_fn: function used send feedback back to the caller
348 @param lu_result: the previous Exec result this LU had, or None
350 @return: the new Exec result, based on the previous result
354 # API must be kept, thus we ignore the unused argument and could
355 # be a function warnings
356 # pylint: disable-msg=W0613,R0201
359 def _ExpandAndLockInstance(self):
360 """Helper function to expand and lock an instance.
362 Many LUs that work on an instance take its name in self.op.instance_name
363 and need to expand it and then declare the expanded name for locking. This
364 function does it, and then updates self.op.instance_name to the expanded
365 name. It also initializes needed_locks as a dict, if this hasn't been done
369 if self.needed_locks is None:
370 self.needed_locks = {}
372 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
373 "_ExpandAndLockInstance called with instance-level locks set"
374 self.op.instance_name = _ExpandInstanceName(self.cfg,
375 self.op.instance_name)
376 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
378 def _LockInstancesNodes(self, primary_only=False):
379 """Helper function to declare instances' nodes for locking.
381 This function should be called after locking one or more instances to lock
382 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
383 with all primary or secondary nodes for instances already locked and
384 present in self.needed_locks[locking.LEVEL_INSTANCE].
386 It should be called from DeclareLocks, and for safety only works if
387 self.recalculate_locks[locking.LEVEL_NODE] is set.
389 In the future it may grow parameters to just lock some instance's nodes, or
390 to just lock primaries or secondary nodes, if needed.
392 If should be called in DeclareLocks in a way similar to::
394 if level == locking.LEVEL_NODE:
395 self._LockInstancesNodes()
397 @type primary_only: boolean
398 @param primary_only: only lock primary nodes of locked instances
401 assert locking.LEVEL_NODE in self.recalculate_locks, \
402 "_LockInstancesNodes helper function called with no nodes to recalculate"
404 # TODO: check if we're really been called with the instance locks held
406 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
407 # future we might want to have different behaviors depending on the value
408 # of self.recalculate_locks[locking.LEVEL_NODE]
410 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
411 instance = self.context.cfg.GetInstanceInfo(instance_name)
412 wanted_nodes.append(instance.primary_node)
414 wanted_nodes.extend(instance.secondary_nodes)
416 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
417 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
418 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
419 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
421 del self.recalculate_locks[locking.LEVEL_NODE]
424 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
425 """Simple LU which runs no hooks.
427 This LU is intended as a parent for other LogicalUnits which will
428 run no hooks, in order to reduce duplicate code.
434 def BuildHooksEnv(self):
435 """Empty BuildHooksEnv for NoHooksLu.
437 This just raises an error.
440 assert False, "BuildHooksEnv called for NoHooksLUs"
444 """Tasklet base class.
446 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447 they can mix legacy code with tasklets. Locking needs to be done in the LU,
448 tasklets know nothing about locks.
450 Subclasses must follow these rules:
451 - Implement CheckPrereq
455 def __init__(self, lu):
462 def CheckPrereq(self):
463 """Check prerequisites for this tasklets.
465 This method should check whether the prerequisites for the execution of
466 this tasklet are fulfilled. It can do internode communication, but it
467 should be idempotent - no cluster or system changes are allowed.
469 The method should raise errors.OpPrereqError in case something is not
470 fulfilled. Its return value is ignored.
472 This method should also update all parameters to their canonical form if it
473 hasn't been done before.
478 def Exec(self, feedback_fn):
479 """Execute the tasklet.
481 This method should implement the actual work. It should raise
482 errors.OpExecError for failures that are somewhat dealt with in code, or
486 raise NotImplementedError
489 def _GetWantedNodes(lu, nodes):
490 """Returns list of checked and expanded node names.
492 @type lu: L{LogicalUnit}
493 @param lu: the logical unit on whose behalf we execute
495 @param nodes: list of node names or None for all nodes
497 @return: the list of nodes, sorted
498 @raise errors.ProgrammerError: if the nodes parameter is wrong type
502 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
503 " non-empty list of nodes whose name is to be expanded.")
505 wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
506 return utils.NiceSort(wanted)
509 def _GetWantedInstances(lu, instances):
510 """Returns list of checked and expanded instance names.
512 @type lu: L{LogicalUnit}
513 @param lu: the logical unit on whose behalf we execute
514 @type instances: list
515 @param instances: list of instance names or None for all instances
517 @return: the list of instances, sorted
518 @raise errors.OpPrereqError: if the instances parameter is wrong type
519 @raise errors.OpPrereqError: if any of the passed instances is not found
523 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
525 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
529 def _GetUpdatedParams(old_params, update_dict,
530 use_default=True, use_none=False):
531 """Return the new version of a parameter dictionary.
533 @type old_params: dict
534 @param old_params: old parameters
535 @type update_dict: dict
536 @param update_dict: dict containing new parameter values, or
537 constants.VALUE_DEFAULT to reset the parameter to its default
539 @param use_default: boolean
540 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
541 values as 'to be deleted' values
542 @param use_none: boolean
543 @type use_none: whether to recognise C{None} values as 'to be
546 @return: the new parameter dictionary
549 params_copy = copy.deepcopy(old_params)
550 for key, val in update_dict.iteritems():
551 if ((use_default and val == constants.VALUE_DEFAULT) or
552 (use_none and val is None)):
558 params_copy[key] = val
562 def _CheckOutputFields(static, dynamic, selected):
563 """Checks whether all selected fields are valid.
565 @type static: L{utils.FieldSet}
566 @param static: static fields set
567 @type dynamic: L{utils.FieldSet}
568 @param dynamic: dynamic fields set
575 delta = f.NonMatching(selected)
577 raise errors.OpPrereqError("Unknown output fields selected: %s"
578 % ",".join(delta), errors.ECODE_INVAL)
581 def _CheckGlobalHvParams(params):
582 """Validates that given hypervisor params are not global ones.
584 This will ensure that instances don't get customised versions of
588 used_globals = constants.HVC_GLOBALS.intersection(params)
590 msg = ("The following hypervisor parameters are global and cannot"
591 " be customized at instance level, please modify them at"
592 " cluster level: %s" % utils.CommaJoin(used_globals))
593 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
596 def _CheckNodeOnline(lu, node, msg=None):
597 """Ensure that a given node is online.
599 @param lu: the LU on behalf of which we make the check
600 @param node: the node to check
601 @param msg: if passed, should be a message to replace the default one
602 @raise errors.OpPrereqError: if the node is offline
606 msg = "Can't use offline node"
607 if lu.cfg.GetNodeInfo(node).offline:
608 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
611 def _CheckNodeNotDrained(lu, node):
612 """Ensure that a given node is not drained.
614 @param lu: the LU on behalf of which we make the check
615 @param node: the node to check
616 @raise errors.OpPrereqError: if the node is drained
619 if lu.cfg.GetNodeInfo(node).drained:
620 raise errors.OpPrereqError("Can't use drained node %s" % node,
624 def _CheckNodeVmCapable(lu, node):
625 """Ensure that a given node is vm capable.
627 @param lu: the LU on behalf of which we make the check
628 @param node: the node to check
629 @raise errors.OpPrereqError: if the node is not vm capable
632 if not lu.cfg.GetNodeInfo(node).vm_capable:
633 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
637 def _CheckNodeHasOS(lu, node, os_name, force_variant):
638 """Ensure that a node supports a given OS.
640 @param lu: the LU on behalf of which we make the check
641 @param node: the node to check
642 @param os_name: the OS to query about
643 @param force_variant: whether to ignore variant errors
644 @raise errors.OpPrereqError: if the node is not supporting the OS
647 result = lu.rpc.call_os_get(node, os_name)
648 result.Raise("OS '%s' not in supported OS list for node %s" %
650 prereq=True, ecode=errors.ECODE_INVAL)
651 if not force_variant:
652 _CheckOSVariant(result.payload, os_name)
655 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
656 """Ensure that a node has the given secondary ip.
658 @type lu: L{LogicalUnit}
659 @param lu: the LU on behalf of which we make the check
661 @param node: the node to check
662 @type secondary_ip: string
663 @param secondary_ip: the ip to check
664 @type prereq: boolean
665 @param prereq: whether to throw a prerequisite or an execute error
666 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
667 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
670 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
671 result.Raise("Failure checking secondary ip on node %s" % node,
672 prereq=prereq, ecode=errors.ECODE_ENVIRON)
673 if not result.payload:
674 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
675 " please fix and re-run this command" % secondary_ip)
677 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
679 raise errors.OpExecError(msg)
682 def _RequireFileStorage():
683 """Checks that file storage is enabled.
685 @raise errors.OpPrereqError: when file storage is disabled
688 if not constants.ENABLE_FILE_STORAGE:
689 raise errors.OpPrereqError("File storage disabled at configure time",
693 def _CheckDiskTemplate(template):
694 """Ensure a given disk template is valid.
697 if template not in constants.DISK_TEMPLATES:
698 msg = ("Invalid disk template name '%s', valid templates are: %s" %
699 (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
700 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
701 if template == constants.DT_FILE:
702 _RequireFileStorage()
706 def _CheckStorageType(storage_type):
707 """Ensure a given storage type is valid.
710 if storage_type not in constants.VALID_STORAGE_TYPES:
711 raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
713 if storage_type == constants.ST_FILE:
714 _RequireFileStorage()
718 def _GetClusterDomainSecret():
719 """Reads the cluster domain secret.
722 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
726 def _CheckInstanceDown(lu, instance, reason):
727 """Ensure that an instance is not running."""
728 if instance.admin_up:
729 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
730 (instance.name, reason), errors.ECODE_STATE)
732 pnode = instance.primary_node
733 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
734 ins_l.Raise("Can't contact node %s for instance information" % pnode,
735 prereq=True, ecode=errors.ECODE_ENVIRON)
737 if instance.name in ins_l.payload:
738 raise errors.OpPrereqError("Instance %s is running, %s" %
739 (instance.name, reason), errors.ECODE_STATE)
742 def _ExpandItemName(fn, name, kind):
743 """Expand an item name.
745 @param fn: the function to use for expansion
746 @param name: requested item name
747 @param kind: text description ('Node' or 'Instance')
748 @return: the resolved (full) name
749 @raise errors.OpPrereqError: if the item is not found
753 if full_name is None:
754 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
759 def _ExpandNodeName(cfg, name):
760 """Wrapper over L{_ExpandItemName} for nodes."""
761 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
764 def _ExpandInstanceName(cfg, name):
765 """Wrapper over L{_ExpandItemName} for instance."""
766 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
769 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
770 memory, vcpus, nics, disk_template, disks,
771 bep, hvp, hypervisor_name):
772 """Builds instance related env variables for hooks
774 This builds the hook environment from individual variables.
777 @param name: the name of the instance
778 @type primary_node: string
779 @param primary_node: the name of the instance's primary node
780 @type secondary_nodes: list
781 @param secondary_nodes: list of secondary nodes as strings
782 @type os_type: string
783 @param os_type: the name of the instance's OS
784 @type status: boolean
785 @param status: the should_run status of the instance
787 @param memory: the memory size of the instance
789 @param vcpus: the count of VCPUs the instance has
791 @param nics: list of tuples (ip, mac, mode, link) representing
792 the NICs the instance has
793 @type disk_template: string
794 @param disk_template: the disk template of the instance
796 @param disks: the list of (size, mode) pairs
798 @param bep: the backend parameters for the instance
800 @param hvp: the hypervisor parameters for the instance
801 @type hypervisor_name: string
802 @param hypervisor_name: the hypervisor for the instance
804 @return: the hook environment for this instance
813 "INSTANCE_NAME": name,
814 "INSTANCE_PRIMARY": primary_node,
815 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
816 "INSTANCE_OS_TYPE": os_type,
817 "INSTANCE_STATUS": str_status,
818 "INSTANCE_MEMORY": memory,
819 "INSTANCE_VCPUS": vcpus,
820 "INSTANCE_DISK_TEMPLATE": disk_template,
821 "INSTANCE_HYPERVISOR": hypervisor_name,
825 nic_count = len(nics)
826 for idx, (ip, mac, mode, link) in enumerate(nics):
829 env["INSTANCE_NIC%d_IP" % idx] = ip
830 env["INSTANCE_NIC%d_MAC" % idx] = mac
831 env["INSTANCE_NIC%d_MODE" % idx] = mode
832 env["INSTANCE_NIC%d_LINK" % idx] = link
833 if mode == constants.NIC_MODE_BRIDGED:
834 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
838 env["INSTANCE_NIC_COUNT"] = nic_count
841 disk_count = len(disks)
842 for idx, (size, mode) in enumerate(disks):
843 env["INSTANCE_DISK%d_SIZE" % idx] = size
844 env["INSTANCE_DISK%d_MODE" % idx] = mode
848 env["INSTANCE_DISK_COUNT"] = disk_count
850 for source, kind in [(bep, "BE"), (hvp, "HV")]:
851 for key, value in source.items():
852 env["INSTANCE_%s_%s" % (kind, key)] = value
857 def _NICListToTuple(lu, nics):
858 """Build a list of nic information tuples.
860 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
861 value in LUQueryInstanceData.
863 @type lu: L{LogicalUnit}
864 @param lu: the logical unit on whose behalf we execute
865 @type nics: list of L{objects.NIC}
866 @param nics: list of nics to convert to hooks tuples
870 cluster = lu.cfg.GetClusterInfo()
874 filled_params = cluster.SimpleFillNIC(nic.nicparams)
875 mode = filled_params[constants.NIC_MODE]
876 link = filled_params[constants.NIC_LINK]
877 hooks_nics.append((ip, mac, mode, link))
881 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
882 """Builds instance related env variables for hooks from an object.
884 @type lu: L{LogicalUnit}
885 @param lu: the logical unit on whose behalf we execute
886 @type instance: L{objects.Instance}
887 @param instance: the instance for which we should build the
890 @param override: dictionary with key/values that will override
893 @return: the hook environment dictionary
896 cluster = lu.cfg.GetClusterInfo()
897 bep = cluster.FillBE(instance)
898 hvp = cluster.FillHV(instance)
900 'name': instance.name,
901 'primary_node': instance.primary_node,
902 'secondary_nodes': instance.secondary_nodes,
903 'os_type': instance.os,
904 'status': instance.admin_up,
905 'memory': bep[constants.BE_MEMORY],
906 'vcpus': bep[constants.BE_VCPUS],
907 'nics': _NICListToTuple(lu, instance.nics),
908 'disk_template': instance.disk_template,
909 'disks': [(disk.size, disk.mode) for disk in instance.disks],
912 'hypervisor_name': instance.hypervisor,
915 args.update(override)
916 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
919 def _AdjustCandidatePool(lu, exceptions):
920 """Adjust the candidate pool after node operations.
923 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
925 lu.LogInfo("Promoted nodes to master candidate role: %s",
926 utils.CommaJoin(node.name for node in mod_list))
927 for name in mod_list:
928 lu.context.ReaddNode(name)
929 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
931 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
935 def _DecideSelfPromotion(lu, exceptions=None):
936 """Decide whether I should promote myself as a master candidate.
939 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
940 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
941 # the new node will increase mc_max with one, so:
942 mc_should = min(mc_should + 1, cp_size)
943 return mc_now < mc_should
946 def _CheckNicsBridgesExist(lu, target_nics, target_node):
947 """Check that the brigdes needed by a list of nics exist.
950 cluster = lu.cfg.GetClusterInfo()
951 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
952 brlist = [params[constants.NIC_LINK] for params in paramslist
953 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
955 result = lu.rpc.call_bridges_exist(target_node, brlist)
956 result.Raise("Error checking bridges on destination node '%s'" %
957 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
960 def _CheckInstanceBridgesExist(lu, instance, node=None):
961 """Check that the brigdes needed by an instance exist.
965 node = instance.primary_node
966 _CheckNicsBridgesExist(lu, instance.nics, node)
969 def _CheckOSVariant(os_obj, name):
970 """Check whether an OS name conforms to the os variants specification.
972 @type os_obj: L{objects.OS}
973 @param os_obj: OS object to check
975 @param name: OS name passed by the user, to check for validity
978 if not os_obj.supported_variants:
980 variant = objects.OS.GetVariant(name)
982 raise errors.OpPrereqError("OS name must include a variant",
985 if variant not in os_obj.supported_variants:
986 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
989 def _GetNodeInstancesInner(cfg, fn):
990 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
993 def _GetNodeInstances(cfg, node_name):
994 """Returns a list of all primary and secondary instances on a node.
998 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1001 def _GetNodePrimaryInstances(cfg, node_name):
1002 """Returns primary instances on a node.
1005 return _GetNodeInstancesInner(cfg,
1006 lambda inst: node_name == inst.primary_node)
1009 def _GetNodeSecondaryInstances(cfg, node_name):
1010 """Returns secondary instances on a node.
1013 return _GetNodeInstancesInner(cfg,
1014 lambda inst: node_name in inst.secondary_nodes)
1017 def _GetStorageTypeArgs(cfg, storage_type):
1018 """Returns the arguments for a storage type.
1021 # Special case for file storage
1022 if storage_type == constants.ST_FILE:
1023 # storage.FileStorage wants a list of storage directories
1024 return [[cfg.GetFileStorageDir()]]
1029 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1032 for dev in instance.disks:
1033 cfg.SetDiskID(dev, node_name)
1035 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1036 result.Raise("Failed to get disk status from node %s" % node_name,
1037 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1039 for idx, bdev_status in enumerate(result.payload):
1040 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1046 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1047 """Check the sanity of iallocator and node arguments and use the
1048 cluster-wide iallocator if appropriate.
1050 Check that at most one of (iallocator, node) is specified. If none is
1051 specified, then the LU's opcode's iallocator slot is filled with the
1052 cluster-wide default iallocator.
1054 @type iallocator_slot: string
1055 @param iallocator_slot: the name of the opcode iallocator slot
1056 @type node_slot: string
1057 @param node_slot: the name of the opcode target node slot
1060 node = getattr(lu.op, node_slot, None)
1061 iallocator = getattr(lu.op, iallocator_slot, None)
1063 if node is not None and iallocator is not None:
1064 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1066 elif node is None and iallocator is None:
1067 default_iallocator = lu.cfg.GetDefaultIAllocator()
1068 if default_iallocator:
1069 setattr(lu.op, iallocator_slot, default_iallocator)
1071 raise errors.OpPrereqError("No iallocator or node given and no"
1072 " cluster-wide default iallocator found."
1073 " Please specify either an iallocator or a"
1074 " node, or set a cluster-wide default"
1078 class LUPostInitCluster(LogicalUnit):
1079 """Logical unit for running hooks after cluster initialization.
1082 HPATH = "cluster-init"
1083 HTYPE = constants.HTYPE_CLUSTER
1085 def BuildHooksEnv(self):
1089 env = {"OP_TARGET": self.cfg.GetClusterName()}
1090 mn = self.cfg.GetMasterNode()
1091 return env, [], [mn]
1093 def Exec(self, feedback_fn):
1100 class LUDestroyCluster(LogicalUnit):
1101 """Logical unit for destroying the cluster.
1104 HPATH = "cluster-destroy"
1105 HTYPE = constants.HTYPE_CLUSTER
1107 def BuildHooksEnv(self):
1111 env = {"OP_TARGET": self.cfg.GetClusterName()}
1114 def CheckPrereq(self):
1115 """Check prerequisites.
1117 This checks whether the cluster is empty.
1119 Any errors are signaled by raising errors.OpPrereqError.
1122 master = self.cfg.GetMasterNode()
1124 nodelist = self.cfg.GetNodeList()
1125 if len(nodelist) != 1 or nodelist[0] != master:
1126 raise errors.OpPrereqError("There are still %d node(s) in"
1127 " this cluster." % (len(nodelist) - 1),
1129 instancelist = self.cfg.GetInstanceList()
1131 raise errors.OpPrereqError("There are still %d instance(s) in"
1132 " this cluster." % len(instancelist),
1135 def Exec(self, feedback_fn):
1136 """Destroys the cluster.
1139 master = self.cfg.GetMasterNode()
1141 # Run post hooks on master node before it's removed
1142 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1144 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1146 # pylint: disable-msg=W0702
1147 self.LogWarning("Errors occurred running hooks on %s" % master)
1149 result = self.rpc.call_node_stop_master(master, False)
1150 result.Raise("Could not disable the master role")
1155 def _VerifyCertificate(filename):
1156 """Verifies a certificate for LUVerifyCluster.
1158 @type filename: string
1159 @param filename: Path to PEM file
1163 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1164 utils.ReadFile(filename))
1165 except Exception, err: # pylint: disable-msg=W0703
1166 return (LUVerifyCluster.ETYPE_ERROR,
1167 "Failed to load X509 certificate %s: %s" % (filename, err))
1170 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1171 constants.SSL_CERT_EXPIRATION_ERROR)
1174 fnamemsg = "While verifying %s: %s" % (filename, msg)
1179 return (None, fnamemsg)
1180 elif errcode == utils.CERT_WARNING:
1181 return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1182 elif errcode == utils.CERT_ERROR:
1183 return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1185 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1188 class LUVerifyCluster(LogicalUnit):
1189 """Verifies the cluster status.
1192 HPATH = "cluster-verify"
1193 HTYPE = constants.HTYPE_CLUSTER
1195 ("skip_checks", ht.EmptyList,
1196 ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
1197 ("verbose", False, ht.TBool),
1198 ("error_codes", False, ht.TBool),
1199 ("debug_simulate_errors", False, ht.TBool),
1203 TCLUSTER = "cluster"
1205 TINSTANCE = "instance"
1207 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1208 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1209 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1210 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1211 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1212 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1213 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1214 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1215 ENODEDRBD = (TNODE, "ENODEDRBD")
1216 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1217 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1218 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1219 ENODEHV = (TNODE, "ENODEHV")
1220 ENODELVM = (TNODE, "ENODELVM")
1221 ENODEN1 = (TNODE, "ENODEN1")
1222 ENODENET = (TNODE, "ENODENET")
1223 ENODEOS = (TNODE, "ENODEOS")
1224 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1225 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1226 ENODERPC = (TNODE, "ENODERPC")
1227 ENODESSH = (TNODE, "ENODESSH")
1228 ENODEVERSION = (TNODE, "ENODEVERSION")
1229 ENODESETUP = (TNODE, "ENODESETUP")
1230 ENODETIME = (TNODE, "ENODETIME")
1232 ETYPE_FIELD = "code"
1233 ETYPE_ERROR = "ERROR"
1234 ETYPE_WARNING = "WARNING"
1236 class NodeImage(object):
1237 """A class representing the logical and physical status of a node.
1240 @ivar name: the node name to which this object refers
1241 @ivar volumes: a structure as returned from
1242 L{ganeti.backend.GetVolumeList} (runtime)
1243 @ivar instances: a list of running instances (runtime)
1244 @ivar pinst: list of configured primary instances (config)
1245 @ivar sinst: list of configured secondary instances (config)
1246 @ivar sbp: diction of {secondary-node: list of instances} of all peers
1247 of this node (config)
1248 @ivar mfree: free memory, as reported by hypervisor (runtime)
1249 @ivar dfree: free disk, as reported by the node (runtime)
1250 @ivar offline: the offline status (config)
1251 @type rpc_fail: boolean
1252 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1253 not whether the individual keys were correct) (runtime)
1254 @type lvm_fail: boolean
1255 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1256 @type hyp_fail: boolean
1257 @ivar hyp_fail: whether the RPC call didn't return the instance list
1258 @type ghost: boolean
1259 @ivar ghost: whether this is a known node or not (config)
1260 @type os_fail: boolean
1261 @ivar os_fail: whether the RPC call didn't return valid OS data
1263 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1264 @type vm_capable: boolean
1265 @ivar vm_capable: whether the node can host instances
1268 def __init__(self, offline=False, name=None, vm_capable=True):
1277 self.offline = offline
1278 self.vm_capable = vm_capable
1279 self.rpc_fail = False
1280 self.lvm_fail = False
1281 self.hyp_fail = False
1283 self.os_fail = False
1286 def ExpandNames(self):
1287 self.needed_locks = {
1288 locking.LEVEL_NODE: locking.ALL_SET,
1289 locking.LEVEL_INSTANCE: locking.ALL_SET,
1291 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1293 def _Error(self, ecode, item, msg, *args, **kwargs):
1294 """Format an error message.
1296 Based on the opcode's error_codes parameter, either format a
1297 parseable error code, or a simpler error string.
1299 This must be called only from Exec and functions called from Exec.
1302 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1304 # first complete the msg
1307 # then format the whole message
1308 if self.op.error_codes:
1309 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1315 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1316 # and finally report it via the feedback_fn
1317 self._feedback_fn(" - %s" % msg)
1319 def _ErrorIf(self, cond, *args, **kwargs):
1320 """Log an error message if the passed condition is True.
1323 cond = bool(cond) or self.op.debug_simulate_errors
1325 self._Error(*args, **kwargs)
1326 # do not mark the operation as failed for WARN cases only
1327 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1328 self.bad = self.bad or cond
1330 def _VerifyNode(self, ninfo, nresult):
1331 """Perform some basic validation on data returned from a node.
1333 - check the result data structure is well formed and has all the
1335 - check ganeti version
1337 @type ninfo: L{objects.Node}
1338 @param ninfo: the node to check
1339 @param nresult: the results from the node
1341 @return: whether overall this call was successful (and we can expect
1342 reasonable values in the respose)
1346 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1348 # main result, nresult should be a non-empty dict
1349 test = not nresult or not isinstance(nresult, dict)
1350 _ErrorIf(test, self.ENODERPC, node,
1351 "unable to verify node: no data returned")
1355 # compares ganeti version
1356 local_version = constants.PROTOCOL_VERSION
1357 remote_version = nresult.get("version", None)
1358 test = not (remote_version and
1359 isinstance(remote_version, (list, tuple)) and
1360 len(remote_version) == 2)
1361 _ErrorIf(test, self.ENODERPC, node,
1362 "connection to node returned invalid data")
1366 test = local_version != remote_version[0]
1367 _ErrorIf(test, self.ENODEVERSION, node,
1368 "incompatible protocol versions: master %s,"
1369 " node %s", local_version, remote_version[0])
1373 # node seems compatible, we can actually try to look into its results
1375 # full package version
1376 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1377 self.ENODEVERSION, node,
1378 "software version mismatch: master %s, node %s",
1379 constants.RELEASE_VERSION, remote_version[1],
1380 code=self.ETYPE_WARNING)
1382 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1383 if ninfo.vm_capable and isinstance(hyp_result, dict):
1384 for hv_name, hv_result in hyp_result.iteritems():
1385 test = hv_result is not None
1386 _ErrorIf(test, self.ENODEHV, node,
1387 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1389 test = nresult.get(constants.NV_NODESETUP,
1390 ["Missing NODESETUP results"])
1391 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1396 def _VerifyNodeTime(self, ninfo, nresult,
1397 nvinfo_starttime, nvinfo_endtime):
1398 """Check the node time.
1400 @type ninfo: L{objects.Node}
1401 @param ninfo: the node to check
1402 @param nresult: the remote results for the node
1403 @param nvinfo_starttime: the start time of the RPC call
1404 @param nvinfo_endtime: the end time of the RPC call
1408 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1410 ntime = nresult.get(constants.NV_TIME, None)
1412 ntime_merged = utils.MergeTime(ntime)
1413 except (ValueError, TypeError):
1414 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1417 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1418 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1419 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1420 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1424 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1425 "Node time diverges by at least %s from master node time",
1428 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1429 """Check the node time.
1431 @type ninfo: L{objects.Node}
1432 @param ninfo: the node to check
1433 @param nresult: the remote results for the node
1434 @param vg_name: the configured VG name
1441 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1443 # checks vg existence and size > 20G
1444 vglist = nresult.get(constants.NV_VGLIST, None)
1446 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1448 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1449 constants.MIN_VG_SIZE)
1450 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1453 pvlist = nresult.get(constants.NV_PVLIST, None)
1454 test = pvlist is None
1455 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1457 # check that ':' is not present in PV names, since it's a
1458 # special character for lvcreate (denotes the range of PEs to
1460 for _, pvname, owner_vg in pvlist:
1461 test = ":" in pvname
1462 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1463 " '%s' of VG '%s'", pvname, owner_vg)
1465 def _VerifyNodeNetwork(self, ninfo, nresult):
1466 """Check the node time.
1468 @type ninfo: L{objects.Node}
1469 @param ninfo: the node to check
1470 @param nresult: the remote results for the node
1474 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1476 test = constants.NV_NODELIST not in nresult
1477 _ErrorIf(test, self.ENODESSH, node,
1478 "node hasn't returned node ssh connectivity data")
1480 if nresult[constants.NV_NODELIST]:
1481 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1482 _ErrorIf(True, self.ENODESSH, node,
1483 "ssh communication with node '%s': %s", a_node, a_msg)
1485 test = constants.NV_NODENETTEST not in nresult
1486 _ErrorIf(test, self.ENODENET, node,
1487 "node hasn't returned node tcp connectivity data")
1489 if nresult[constants.NV_NODENETTEST]:
1490 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1492 _ErrorIf(True, self.ENODENET, node,
1493 "tcp communication with node '%s': %s",
1494 anode, nresult[constants.NV_NODENETTEST][anode])
1496 test = constants.NV_MASTERIP not in nresult
1497 _ErrorIf(test, self.ENODENET, node,
1498 "node hasn't returned node master IP reachability data")
1500 if not nresult[constants.NV_MASTERIP]:
1501 if node == self.master_node:
1502 msg = "the master node cannot reach the master IP (not configured?)"
1504 msg = "cannot reach the master IP"
1505 _ErrorIf(True, self.ENODENET, node, msg)
1507 def _VerifyInstance(self, instance, instanceconfig, node_image,
1509 """Verify an instance.
1511 This function checks to see if the required block devices are
1512 available on the instance's node.
1515 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1516 node_current = instanceconfig.primary_node
1518 node_vol_should = {}
1519 instanceconfig.MapLVsByNode(node_vol_should)
1521 for node in node_vol_should:
1522 n_img = node_image[node]
1523 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1524 # ignore missing volumes on offline or broken nodes
1526 for volume in node_vol_should[node]:
1527 test = volume not in n_img.volumes
1528 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1529 "volume %s missing on node %s", volume, node)
1531 if instanceconfig.admin_up:
1532 pri_img = node_image[node_current]
1533 test = instance not in pri_img.instances and not pri_img.offline
1534 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1535 "instance not running on its primary node %s",
1538 for node, n_img in node_image.items():
1539 if (not node == node_current):
1540 test = instance in n_img.instances
1541 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1542 "instance should not run on node %s", node)
1544 diskdata = [(nname, success, status, idx)
1545 for (nname, disks) in diskstatus.items()
1546 for idx, (success, status) in enumerate(disks)]
1548 for nname, success, bdev_status, idx in diskdata:
1549 _ErrorIf(instanceconfig.admin_up and not success,
1550 self.EINSTANCEFAULTYDISK, instance,
1551 "couldn't retrieve status for disk/%s on %s: %s",
1552 idx, nname, bdev_status)
1553 _ErrorIf((instanceconfig.admin_up and success and
1554 bdev_status.ldisk_status == constants.LDS_FAULTY),
1555 self.EINSTANCEFAULTYDISK, instance,
1556 "disk/%s on %s is faulty", idx, nname)
1558 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1559 """Verify if there are any unknown volumes in the cluster.
1561 The .os, .swap and backup volumes are ignored. All other volumes are
1562 reported as unknown.
1564 @type reserved: L{ganeti.utils.FieldSet}
1565 @param reserved: a FieldSet of reserved volume names
1568 for node, n_img in node_image.items():
1569 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1570 # skip non-healthy nodes
1572 for volume in n_img.volumes:
1573 test = ((node not in node_vol_should or
1574 volume not in node_vol_should[node]) and
1575 not reserved.Matches(volume))
1576 self._ErrorIf(test, self.ENODEORPHANLV, node,
1577 "volume %s is unknown", volume)
1579 def _VerifyOrphanInstances(self, instancelist, node_image):
1580 """Verify the list of running instances.
1582 This checks what instances are running but unknown to the cluster.
1585 for node, n_img in node_image.items():
1586 for o_inst in n_img.instances:
1587 test = o_inst not in instancelist
1588 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1589 "instance %s on node %s should not exist", o_inst, node)
1591 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1592 """Verify N+1 Memory Resilience.
1594 Check that if one single node dies we can still start all the
1595 instances it was primary for.
1598 for node, n_img in node_image.items():
1599 # This code checks that every node which is now listed as
1600 # secondary has enough memory to host all instances it is
1601 # supposed to should a single other node in the cluster fail.
1602 # FIXME: not ready for failover to an arbitrary node
1603 # FIXME: does not support file-backed instances
1604 # WARNING: we currently take into account down instances as well
1605 # as up ones, considering that even if they're down someone
1606 # might want to start them even in the event of a node failure.
1607 for prinode, instances in n_img.sbp.items():
1609 for instance in instances:
1610 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1611 if bep[constants.BE_AUTO_BALANCE]:
1612 needed_mem += bep[constants.BE_MEMORY]
1613 test = n_img.mfree < needed_mem
1614 self._ErrorIf(test, self.ENODEN1, node,
1615 "not enough memory on to accommodate"
1616 " failovers should peer node %s fail", prinode)
1618 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1620 """Verifies and computes the node required file checksums.
1622 @type ninfo: L{objects.Node}
1623 @param ninfo: the node to check
1624 @param nresult: the remote results for the node
1625 @param file_list: required list of files
1626 @param local_cksum: dictionary of local files and their checksums
1627 @param master_files: list of files that only masters should have
1631 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1633 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1634 test = not isinstance(remote_cksum, dict)
1635 _ErrorIf(test, self.ENODEFILECHECK, node,
1636 "node hasn't returned file checksum data")
1640 for file_name in file_list:
1641 node_is_mc = ninfo.master_candidate
1642 must_have = (file_name not in master_files) or node_is_mc
1644 test1 = file_name not in remote_cksum
1646 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1648 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1649 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1650 "file '%s' missing", file_name)
1651 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1652 "file '%s' has wrong checksum", file_name)
1653 # not candidate and this is not a must-have file
1654 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1655 "file '%s' should not exist on non master"
1656 " candidates (and the file is outdated)", file_name)
1657 # all good, except non-master/non-must have combination
1658 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1659 "file '%s' should not exist"
1660 " on non master candidates", file_name)
1662 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1664 """Verifies and the node DRBD status.
1666 @type ninfo: L{objects.Node}
1667 @param ninfo: the node to check
1668 @param nresult: the remote results for the node
1669 @param instanceinfo: the dict of instances
1670 @param drbd_helper: the configured DRBD usermode helper
1671 @param drbd_map: the DRBD map as returned by
1672 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1676 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1679 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1680 test = (helper_result == None)
1681 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1682 "no drbd usermode helper returned")
1684 status, payload = helper_result
1686 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1687 "drbd usermode helper check unsuccessful: %s", payload)
1688 test = status and (payload != drbd_helper)
1689 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1690 "wrong drbd usermode helper: %s", payload)
1692 # compute the DRBD minors
1694 for minor, instance in drbd_map[node].items():
1695 test = instance not in instanceinfo
1696 _ErrorIf(test, self.ECLUSTERCFG, None,
1697 "ghost instance '%s' in temporary DRBD map", instance)
1698 # ghost instance should not be running, but otherwise we
1699 # don't give double warnings (both ghost instance and
1700 # unallocated minor in use)
1702 node_drbd[minor] = (instance, False)
1704 instance = instanceinfo[instance]
1705 node_drbd[minor] = (instance.name, instance.admin_up)
1707 # and now check them
1708 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1709 test = not isinstance(used_minors, (tuple, list))
1710 _ErrorIf(test, self.ENODEDRBD, node,
1711 "cannot parse drbd status file: %s", str(used_minors))
1713 # we cannot check drbd status
1716 for minor, (iname, must_exist) in node_drbd.items():
1717 test = minor not in used_minors and must_exist
1718 _ErrorIf(test, self.ENODEDRBD, node,
1719 "drbd minor %d of instance %s is not active", minor, iname)
1720 for minor in used_minors:
1721 test = minor not in node_drbd
1722 _ErrorIf(test, self.ENODEDRBD, node,
1723 "unallocated drbd minor %d is in use", minor)
1725 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1726 """Builds the node OS structures.
1728 @type ninfo: L{objects.Node}
1729 @param ninfo: the node to check
1730 @param nresult: the remote results for the node
1731 @param nimg: the node image object
1735 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1737 remote_os = nresult.get(constants.NV_OSLIST, None)
1738 test = (not isinstance(remote_os, list) or
1739 not compat.all(isinstance(v, list) and len(v) == 7
1740 for v in remote_os))
1742 _ErrorIf(test, self.ENODEOS, node,
1743 "node hasn't returned valid OS data")
1752 for (name, os_path, status, diagnose,
1753 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1755 if name not in os_dict:
1758 # parameters is a list of lists instead of list of tuples due to
1759 # JSON lacking a real tuple type, fix it:
1760 parameters = [tuple(v) for v in parameters]
1761 os_dict[name].append((os_path, status, diagnose,
1762 set(variants), set(parameters), set(api_ver)))
1764 nimg.oslist = os_dict
1766 def _VerifyNodeOS(self, ninfo, nimg, base):
1767 """Verifies the node OS list.
1769 @type ninfo: L{objects.Node}
1770 @param ninfo: the node to check
1771 @param nimg: the node image object
1772 @param base: the 'template' node we match against (e.g. from the master)
1776 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1778 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1780 for os_name, os_data in nimg.oslist.items():
1781 assert os_data, "Empty OS status for OS %s?!" % os_name
1782 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1783 _ErrorIf(not f_status, self.ENODEOS, node,
1784 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1785 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1786 "OS '%s' has multiple entries (first one shadows the rest): %s",
1787 os_name, utils.CommaJoin([v[0] for v in os_data]))
1788 # this will catched in backend too
1789 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1790 and not f_var, self.ENODEOS, node,
1791 "OS %s with API at least %d does not declare any variant",
1792 os_name, constants.OS_API_V15)
1793 # comparisons with the 'base' image
1794 test = os_name not in base.oslist
1795 _ErrorIf(test, self.ENODEOS, node,
1796 "Extra OS %s not present on reference node (%s)",
1800 assert base.oslist[os_name], "Base node has empty OS status?"
1801 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1803 # base OS is invalid, skipping
1805 for kind, a, b in [("API version", f_api, b_api),
1806 ("variants list", f_var, b_var),
1807 ("parameters", f_param, b_param)]:
1808 _ErrorIf(a != b, self.ENODEOS, node,
1809 "OS %s %s differs from reference node %s: %s vs. %s",
1810 kind, os_name, base.name,
1811 utils.CommaJoin(a), utils.CommaJoin(b))
1813 # check any missing OSes
1814 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1815 _ErrorIf(missing, self.ENODEOS, node,
1816 "OSes present on reference node %s but missing on this node: %s",
1817 base.name, utils.CommaJoin(missing))
1819 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1820 """Verifies and updates the node volume data.
1822 This function will update a L{NodeImage}'s internal structures
1823 with data from the remote call.
1825 @type ninfo: L{objects.Node}
1826 @param ninfo: the node to check
1827 @param nresult: the remote results for the node
1828 @param nimg: the node image object
1829 @param vg_name: the configured VG name
1833 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1835 nimg.lvm_fail = True
1836 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1839 elif isinstance(lvdata, basestring):
1840 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1841 utils.SafeEncode(lvdata))
1842 elif not isinstance(lvdata, dict):
1843 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1845 nimg.volumes = lvdata
1846 nimg.lvm_fail = False
1848 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1849 """Verifies and updates the node instance list.
1851 If the listing was successful, then updates this node's instance
1852 list. Otherwise, it marks the RPC call as failed for the instance
1855 @type ninfo: L{objects.Node}
1856 @param ninfo: the node to check
1857 @param nresult: the remote results for the node
1858 @param nimg: the node image object
1861 idata = nresult.get(constants.NV_INSTANCELIST, None)
1862 test = not isinstance(idata, list)
1863 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1864 " (instancelist): %s", utils.SafeEncode(str(idata)))
1866 nimg.hyp_fail = True
1868 nimg.instances = idata
1870 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1871 """Verifies and computes a node information map
1873 @type ninfo: L{objects.Node}
1874 @param ninfo: the node to check
1875 @param nresult: the remote results for the node
1876 @param nimg: the node image object
1877 @param vg_name: the configured VG name
1881 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1883 # try to read free memory (from the hypervisor)
1884 hv_info = nresult.get(constants.NV_HVINFO, None)
1885 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1886 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1889 nimg.mfree = int(hv_info["memory_free"])
1890 except (ValueError, TypeError):
1891 _ErrorIf(True, self.ENODERPC, node,
1892 "node returned invalid nodeinfo, check hypervisor")
1894 # FIXME: devise a free space model for file based instances as well
1895 if vg_name is not None:
1896 test = (constants.NV_VGLIST not in nresult or
1897 vg_name not in nresult[constants.NV_VGLIST])
1898 _ErrorIf(test, self.ENODELVM, node,
1899 "node didn't return data for the volume group '%s'"
1900 " - it is either missing or broken", vg_name)
1903 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1904 except (ValueError, TypeError):
1905 _ErrorIf(True, self.ENODERPC, node,
1906 "node returned invalid LVM info, check LVM status")
1908 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1909 """Gets per-disk status information for all instances.
1911 @type nodelist: list of strings
1912 @param nodelist: Node names
1913 @type node_image: dict of (name, L{objects.Node})
1914 @param node_image: Node objects
1915 @type instanceinfo: dict of (name, L{objects.Instance})
1916 @param instanceinfo: Instance objects
1919 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1922 node_disks_devonly = {}
1924 for nname in nodelist:
1925 disks = [(inst, disk)
1926 for instlist in [node_image[nname].pinst,
1927 node_image[nname].sinst]
1928 for inst in instlist
1929 for disk in instanceinfo[inst].disks]
1932 # No need to collect data
1935 node_disks[nname] = disks
1937 # Creating copies as SetDiskID below will modify the objects and that can
1938 # lead to incorrect data returned from nodes
1939 devonly = [dev.Copy() for (_, dev) in disks]
1942 self.cfg.SetDiskID(dev, nname)
1944 node_disks_devonly[nname] = devonly
1946 assert len(node_disks) == len(node_disks_devonly)
1948 # Collect data from all nodes with disks
1949 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
1952 assert len(result) == len(node_disks)
1956 for (nname, nres) in result.items():
1958 # Ignore offline node
1961 disks = node_disks[nname]
1964 _ErrorIf(msg, self.ENODERPC, nname,
1965 "while getting disk information: %s", nres.fail_msg)
1967 # No data from this node
1968 data = len(disks) * [None]
1972 for ((inst, _), status) in zip(disks, data):
1973 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
1975 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
1976 len(nnames) <= len(instanceinfo[inst].all_nodes)
1977 for inst, nnames in instdisk.items()
1978 for nname, statuses in nnames.items())
1982 def BuildHooksEnv(self):
1985 Cluster-Verify hooks just ran in the post phase and their failure makes
1986 the output be logged in the verify output and the verification to fail.
1989 all_nodes = self.cfg.GetNodeList()
1991 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1993 for node in self.cfg.GetAllNodesInfo().values():
1994 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1996 return env, [], all_nodes
1998 def Exec(self, feedback_fn):
1999 """Verify integrity of cluster, performing various test on nodes.
2003 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2004 verbose = self.op.verbose
2005 self._feedback_fn = feedback_fn
2006 feedback_fn("* Verifying global settings")
2007 for msg in self.cfg.VerifyConfig():
2008 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2010 # Check the cluster certificates
2011 for cert_filename in constants.ALL_CERT_FILES:
2012 (errcode, msg) = _VerifyCertificate(cert_filename)
2013 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2015 vg_name = self.cfg.GetVGName()
2016 drbd_helper = self.cfg.GetDRBDHelper()
2017 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2018 cluster = self.cfg.GetClusterInfo()
2019 nodelist = utils.NiceSort(self.cfg.GetNodeList())
2020 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2021 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2022 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2023 for iname in instancelist)
2024 i_non_redundant = [] # Non redundant instances
2025 i_non_a_balanced = [] # Non auto-balanced instances
2026 n_offline = 0 # Count of offline nodes
2027 n_drained = 0 # Count of nodes being drained
2028 node_vol_should = {}
2030 # FIXME: verify OS list
2031 # do local checksums
2032 master_files = [constants.CLUSTER_CONF_FILE]
2033 master_node = self.master_node = self.cfg.GetMasterNode()
2034 master_ip = self.cfg.GetMasterIP()
2036 file_names = ssconf.SimpleStore().GetFileList()
2037 file_names.extend(constants.ALL_CERT_FILES)
2038 file_names.extend(master_files)
2039 if cluster.modify_etc_hosts:
2040 file_names.append(constants.ETC_HOSTS)
2042 local_checksums = utils.FingerprintFiles(file_names)
2044 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2045 node_verify_param = {
2046 constants.NV_FILELIST: file_names,
2047 constants.NV_NODELIST: [node.name for node in nodeinfo
2048 if not node.offline],
2049 constants.NV_HYPERVISOR: hypervisors,
2050 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2051 node.secondary_ip) for node in nodeinfo
2052 if not node.offline],
2053 constants.NV_INSTANCELIST: hypervisors,
2054 constants.NV_VERSION: None,
2055 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2056 constants.NV_NODESETUP: None,
2057 constants.NV_TIME: None,
2058 constants.NV_MASTERIP: (master_node, master_ip),
2059 constants.NV_OSLIST: None,
2060 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2063 if vg_name is not None:
2064 node_verify_param[constants.NV_VGLIST] = None
2065 node_verify_param[constants.NV_LVLIST] = vg_name
2066 node_verify_param[constants.NV_PVLIST] = [vg_name]
2067 node_verify_param[constants.NV_DRBDLIST] = None
2070 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2072 # Build our expected cluster state
2073 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2075 vm_capable=node.vm_capable))
2076 for node in nodeinfo)
2078 for instance in instancelist:
2079 inst_config = instanceinfo[instance]
2081 for nname in inst_config.all_nodes:
2082 if nname not in node_image:
2084 gnode = self.NodeImage(name=nname)
2086 node_image[nname] = gnode
2088 inst_config.MapLVsByNode(node_vol_should)
2090 pnode = inst_config.primary_node
2091 node_image[pnode].pinst.append(instance)
2093 for snode in inst_config.secondary_nodes:
2094 nimg = node_image[snode]
2095 nimg.sinst.append(instance)
2096 if pnode not in nimg.sbp:
2097 nimg.sbp[pnode] = []
2098 nimg.sbp[pnode].append(instance)
2100 # At this point, we have the in-memory data structures complete,
2101 # except for the runtime information, which we'll gather next
2103 # Due to the way our RPC system works, exact response times cannot be
2104 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2105 # time before and after executing the request, we can at least have a time
2107 nvinfo_starttime = time.time()
2108 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2109 self.cfg.GetClusterName())
2110 nvinfo_endtime = time.time()
2112 all_drbd_map = self.cfg.ComputeDRBDMap()
2114 feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2115 instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2117 feedback_fn("* Verifying node status")
2121 for node_i in nodeinfo:
2123 nimg = node_image[node]
2127 feedback_fn("* Skipping offline node %s" % (node,))
2131 if node == master_node:
2133 elif node_i.master_candidate:
2134 ntype = "master candidate"
2135 elif node_i.drained:
2141 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2143 msg = all_nvinfo[node].fail_msg
2144 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2146 nimg.rpc_fail = True
2149 nresult = all_nvinfo[node].payload
2151 nimg.call_ok = self._VerifyNode(node_i, nresult)
2152 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2153 self._VerifyNodeNetwork(node_i, nresult)
2154 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2158 self._VerifyNodeLVM(node_i, nresult, vg_name)
2159 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2162 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2163 self._UpdateNodeInstances(node_i, nresult, nimg)
2164 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2165 self._UpdateNodeOS(node_i, nresult, nimg)
2166 if not nimg.os_fail:
2167 if refos_img is None:
2169 self._VerifyNodeOS(node_i, nimg, refos_img)
2171 feedback_fn("* Verifying instance status")
2172 for instance in instancelist:
2174 feedback_fn("* Verifying instance %s" % instance)
2175 inst_config = instanceinfo[instance]
2176 self._VerifyInstance(instance, inst_config, node_image,
2178 inst_nodes_offline = []
2180 pnode = inst_config.primary_node
2181 pnode_img = node_image[pnode]
2182 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2183 self.ENODERPC, pnode, "instance %s, connection to"
2184 " primary node failed", instance)
2186 if pnode_img.offline:
2187 inst_nodes_offline.append(pnode)
2189 # If the instance is non-redundant we cannot survive losing its primary
2190 # node, so we are not N+1 compliant. On the other hand we have no disk
2191 # templates with more than one secondary so that situation is not well
2193 # FIXME: does not support file-backed instances
2194 if not inst_config.secondary_nodes:
2195 i_non_redundant.append(instance)
2196 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2197 instance, "instance has multiple secondary nodes: %s",
2198 utils.CommaJoin(inst_config.secondary_nodes),
2199 code=self.ETYPE_WARNING)
2201 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2202 i_non_a_balanced.append(instance)
2204 for snode in inst_config.secondary_nodes:
2205 s_img = node_image[snode]
2206 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2207 "instance %s, connection to secondary node failed", instance)
2210 inst_nodes_offline.append(snode)
2212 # warn that the instance lives on offline nodes
2213 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2214 "instance lives on offline node(s) %s",
2215 utils.CommaJoin(inst_nodes_offline))
2216 # ... or ghost/non-vm_capable nodes
2217 for node in inst_config.all_nodes:
2218 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2219 "instance lives on ghost node %s", node)
2220 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2221 instance, "instance lives on non-vm_capable node %s", node)
2223 feedback_fn("* Verifying orphan volumes")
2224 reserved = utils.FieldSet(*cluster.reserved_lvs)
2225 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2227 feedback_fn("* Verifying orphan instances")
2228 self._VerifyOrphanInstances(instancelist, node_image)
2230 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2231 feedback_fn("* Verifying N+1 Memory redundancy")
2232 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2234 feedback_fn("* Other Notes")
2236 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2237 % len(i_non_redundant))
2239 if i_non_a_balanced:
2240 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2241 % len(i_non_a_balanced))
2244 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2247 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2251 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2252 """Analyze the post-hooks' result
2254 This method analyses the hook result, handles it, and sends some
2255 nicely-formatted feedback back to the user.
2257 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2258 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2259 @param hooks_results: the results of the multi-node hooks rpc call
2260 @param feedback_fn: function used send feedback back to the caller
2261 @param lu_result: previous Exec result
2262 @return: the new Exec result, based on the previous result
2266 # We only really run POST phase hooks, and are only interested in
2268 if phase == constants.HOOKS_PHASE_POST:
2269 # Used to change hooks' output to proper indentation
2270 indent_re = re.compile('^', re.M)
2271 feedback_fn("* Hooks Results")
2272 assert hooks_results, "invalid result from hooks"
2274 for node_name in hooks_results:
2275 res = hooks_results[node_name]
2277 test = msg and not res.offline
2278 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2279 "Communication failure in hooks execution: %s", msg)
2280 if res.offline or msg:
2281 # No need to investigate payload if node is offline or gave an error.
2282 # override manually lu_result here as _ErrorIf only
2283 # overrides self.bad
2286 for script, hkr, output in res.payload:
2287 test = hkr == constants.HKR_FAIL
2288 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2289 "Script %s failed, output:", script)
2291 output = indent_re.sub(' ', output)
2292 feedback_fn("%s" % output)
2298 class LUVerifyDisks(NoHooksLU):
2299 """Verifies the cluster disks status.
2304 def ExpandNames(self):
2305 self.needed_locks = {
2306 locking.LEVEL_NODE: locking.ALL_SET,
2307 locking.LEVEL_INSTANCE: locking.ALL_SET,
2309 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2311 def Exec(self, feedback_fn):
2312 """Verify integrity of cluster disks.
2314 @rtype: tuple of three items
2315 @return: a tuple of (dict of node-to-node_error, list of instances
2316 which need activate-disks, dict of instance: (node, volume) for
2320 result = res_nodes, res_instances, res_missing = {}, [], {}
2322 vg_name = self.cfg.GetVGName()
2323 nodes = utils.NiceSort(self.cfg.GetNodeList())
2324 instances = [self.cfg.GetInstanceInfo(name)
2325 for name in self.cfg.GetInstanceList()]
2328 for inst in instances:
2330 if (not inst.admin_up or
2331 inst.disk_template not in constants.DTS_NET_MIRROR):
2333 inst.MapLVsByNode(inst_lvs)
2334 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2335 for node, vol_list in inst_lvs.iteritems():
2336 for vol in vol_list:
2337 nv_dict[(node, vol)] = inst
2342 node_lvs = self.rpc.call_lv_list(nodes, vg_name)
2346 node_res = node_lvs[node]
2347 if node_res.offline:
2349 msg = node_res.fail_msg
2351 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2352 res_nodes[node] = msg
2355 lvs = node_res.payload
2356 for lv_name, (_, _, lv_online) in lvs.items():
2357 inst = nv_dict.pop((node, lv_name), None)
2358 if (not lv_online and inst is not None
2359 and inst.name not in res_instances):
2360 res_instances.append(inst.name)
2362 # any leftover items in nv_dict are missing LVs, let's arrange the
2364 for key, inst in nv_dict.iteritems():
2365 if inst.name not in res_missing:
2366 res_missing[inst.name] = []
2367 res_missing[inst.name].append(key)
2372 class LURepairDiskSizes(NoHooksLU):
2373 """Verifies the cluster disks sizes.
2376 _OP_PARAMS = [("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString))]
2379 def ExpandNames(self):
2380 if self.op.instances:
2381 self.wanted_names = []
2382 for name in self.op.instances:
2383 full_name = _ExpandInstanceName(self.cfg, name)
2384 self.wanted_names.append(full_name)
2385 self.needed_locks = {
2386 locking.LEVEL_NODE: [],
2387 locking.LEVEL_INSTANCE: self.wanted_names,
2389 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2391 self.wanted_names = None
2392 self.needed_locks = {
2393 locking.LEVEL_NODE: locking.ALL_SET,
2394 locking.LEVEL_INSTANCE: locking.ALL_SET,
2396 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2398 def DeclareLocks(self, level):
2399 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2400 self._LockInstancesNodes(primary_only=True)
2402 def CheckPrereq(self):
2403 """Check prerequisites.
2405 This only checks the optional instance list against the existing names.
2408 if self.wanted_names is None:
2409 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2411 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2412 in self.wanted_names]
2414 def _EnsureChildSizes(self, disk):
2415 """Ensure children of the disk have the needed disk size.
2417 This is valid mainly for DRBD8 and fixes an issue where the
2418 children have smaller disk size.
2420 @param disk: an L{ganeti.objects.Disk} object
2423 if disk.dev_type == constants.LD_DRBD8:
2424 assert disk.children, "Empty children for DRBD8?"
2425 fchild = disk.children[0]
2426 mismatch = fchild.size < disk.size
2428 self.LogInfo("Child disk has size %d, parent %d, fixing",
2429 fchild.size, disk.size)
2430 fchild.size = disk.size
2432 # and we recurse on this child only, not on the metadev
2433 return self._EnsureChildSizes(fchild) or mismatch
2437 def Exec(self, feedback_fn):
2438 """Verify the size of cluster disks.
2441 # TODO: check child disks too
2442 # TODO: check differences in size between primary/secondary nodes
2444 for instance in self.wanted_instances:
2445 pnode = instance.primary_node
2446 if pnode not in per_node_disks:
2447 per_node_disks[pnode] = []
2448 for idx, disk in enumerate(instance.disks):
2449 per_node_disks[pnode].append((instance, idx, disk))
2452 for node, dskl in per_node_disks.items():
2453 newl = [v[2].Copy() for v in dskl]
2455 self.cfg.SetDiskID(dsk, node)
2456 result = self.rpc.call_blockdev_getsizes(node, newl)
2458 self.LogWarning("Failure in blockdev_getsizes call to node"
2459 " %s, ignoring", node)
2461 if len(result.data) != len(dskl):
2462 self.LogWarning("Invalid result from node %s, ignoring node results",
2465 for ((instance, idx, disk), size) in zip(dskl, result.data):
2467 self.LogWarning("Disk %d of instance %s did not return size"
2468 " information, ignoring", idx, instance.name)
2470 if not isinstance(size, (int, long)):
2471 self.LogWarning("Disk %d of instance %s did not return valid"
2472 " size information, ignoring", idx, instance.name)
2475 if size != disk.size:
2476 self.LogInfo("Disk %d of instance %s has mismatched size,"
2477 " correcting: recorded %d, actual %d", idx,
2478 instance.name, disk.size, size)
2480 self.cfg.Update(instance, feedback_fn)
2481 changed.append((instance.name, idx, size))
2482 if self._EnsureChildSizes(disk):
2483 self.cfg.Update(instance, feedback_fn)
2484 changed.append((instance.name, idx, disk.size))
2488 class LURenameCluster(LogicalUnit):
2489 """Rename the cluster.
2492 HPATH = "cluster-rename"
2493 HTYPE = constants.HTYPE_CLUSTER
2494 _OP_PARAMS = [("name", ht.NoDefault, ht.TNonEmptyString)]
2496 def BuildHooksEnv(self):
2501 "OP_TARGET": self.cfg.GetClusterName(),
2502 "NEW_NAME": self.op.name,
2504 mn = self.cfg.GetMasterNode()
2505 all_nodes = self.cfg.GetNodeList()
2506 return env, [mn], all_nodes
2508 def CheckPrereq(self):
2509 """Verify that the passed name is a valid one.
2512 hostname = netutils.GetHostname(name=self.op.name,
2513 family=self.cfg.GetPrimaryIPFamily())
2515 new_name = hostname.name
2516 self.ip = new_ip = hostname.ip
2517 old_name = self.cfg.GetClusterName()
2518 old_ip = self.cfg.GetMasterIP()
2519 if new_name == old_name and new_ip == old_ip:
2520 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2521 " cluster has changed",
2523 if new_ip != old_ip:
2524 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2525 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2526 " reachable on the network" %
2527 new_ip, errors.ECODE_NOTUNIQUE)
2529 self.op.name = new_name
2531 def Exec(self, feedback_fn):
2532 """Rename the cluster.
2535 clustername = self.op.name
2538 # shutdown the master IP
2539 master = self.cfg.GetMasterNode()
2540 result = self.rpc.call_node_stop_master(master, False)
2541 result.Raise("Could not disable the master role")
2544 cluster = self.cfg.GetClusterInfo()
2545 cluster.cluster_name = clustername
2546 cluster.master_ip = ip
2547 self.cfg.Update(cluster, feedback_fn)
2549 # update the known hosts file
2550 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2551 node_list = self.cfg.GetNodeList()
2553 node_list.remove(master)
2556 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2558 result = self.rpc.call_node_start_master(master, False, False)
2559 msg = result.fail_msg
2561 self.LogWarning("Could not re-enable the master role on"
2562 " the master, please restart manually: %s", msg)
2567 class LUSetClusterParams(LogicalUnit):
2568 """Change the parameters of the cluster.
2571 HPATH = "cluster-modify"
2572 HTYPE = constants.HTYPE_CLUSTER
2574 ("vg_name", None, ht.TMaybeString),
2575 ("enabled_hypervisors", None,
2576 ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
2578 ("hvparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2580 ("beparams", None, ht.TOr(ht.TDict, ht.TNone)),
2581 ("os_hvp", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2583 ("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2585 ("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone)),
2586 ("uid_pool", None, ht.NoType),
2587 ("add_uids", None, ht.NoType),
2588 ("remove_uids", None, ht.NoType),
2589 ("maintain_node_health", None, ht.TMaybeBool),
2590 ("prealloc_wipe_disks", None, ht.TMaybeBool),
2591 ("nicparams", None, ht.TOr(ht.TDict, ht.TNone)),
2592 ("ndparams", None, ht.TOr(ht.TDict, ht.TNone)),
2593 ("drbd_helper", None, ht.TOr(ht.TString, ht.TNone)),
2594 ("default_iallocator", None, ht.TOr(ht.TString, ht.TNone)),
2595 ("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone)),
2596 ("hidden_os", None, ht.TOr(ht.TListOf(\
2599 ht.TMap(lambda v: v[0], ht.TElemOf(constants.DDMS_VALUES)))),
2601 ("blacklisted_os", None, ht.TOr(ht.TListOf(\
2604 ht.TMap(lambda v: v[0], ht.TElemOf(constants.DDMS_VALUES)))),
2609 def CheckArguments(self):
2613 if self.op.uid_pool:
2614 uidpool.CheckUidPool(self.op.uid_pool)
2616 if self.op.add_uids:
2617 uidpool.CheckUidPool(self.op.add_uids)
2619 if self.op.remove_uids:
2620 uidpool.CheckUidPool(self.op.remove_uids)
2622 def ExpandNames(self):
2623 # FIXME: in the future maybe other cluster params won't require checking on
2624 # all nodes to be modified.
2625 self.needed_locks = {
2626 locking.LEVEL_NODE: locking.ALL_SET,
2628 self.share_locks[locking.LEVEL_NODE] = 1
2630 def BuildHooksEnv(self):
2635 "OP_TARGET": self.cfg.GetClusterName(),
2636 "NEW_VG_NAME": self.op.vg_name,
2638 mn = self.cfg.GetMasterNode()
2639 return env, [mn], [mn]
2641 def CheckPrereq(self):
2642 """Check prerequisites.
2644 This checks whether the given params don't conflict and
2645 if the given volume group is valid.
2648 if self.op.vg_name is not None and not self.op.vg_name:
2649 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2650 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2651 " instances exist", errors.ECODE_INVAL)
2653 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2654 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2655 raise errors.OpPrereqError("Cannot disable drbd helper while"
2656 " drbd-based instances exist",
2659 node_list = self.acquired_locks[locking.LEVEL_NODE]
2661 # if vg_name not None, checks given volume group on all nodes
2663 vglist = self.rpc.call_vg_list(node_list)
2664 for node in node_list:
2665 msg = vglist[node].fail_msg
2667 # ignoring down node
2668 self.LogWarning("Error while gathering data on node %s"
2669 " (ignoring node): %s", node, msg)
2671 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2673 constants.MIN_VG_SIZE)
2675 raise errors.OpPrereqError("Error on node '%s': %s" %
2676 (node, vgstatus), errors.ECODE_ENVIRON)
2678 if self.op.drbd_helper:
2679 # checks given drbd helper on all nodes
2680 helpers = self.rpc.call_drbd_helper(node_list)
2681 for node in node_list:
2682 ninfo = self.cfg.GetNodeInfo(node)
2684 self.LogInfo("Not checking drbd helper on offline node %s", node)
2686 msg = helpers[node].fail_msg
2688 raise errors.OpPrereqError("Error checking drbd helper on node"
2689 " '%s': %s" % (node, msg),
2690 errors.ECODE_ENVIRON)
2691 node_helper = helpers[node].payload
2692 if node_helper != self.op.drbd_helper:
2693 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2694 (node, node_helper), errors.ECODE_ENVIRON)
2696 self.cluster = cluster = self.cfg.GetClusterInfo()
2697 # validate params changes
2698 if self.op.beparams:
2699 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2700 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2702 if self.op.ndparams:
2703 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2704 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2706 if self.op.nicparams:
2707 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2708 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2709 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2712 # check all instances for consistency
2713 for instance in self.cfg.GetAllInstancesInfo().values():
2714 for nic_idx, nic in enumerate(instance.nics):
2715 params_copy = copy.deepcopy(nic.nicparams)
2716 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2718 # check parameter syntax
2720 objects.NIC.CheckParameterSyntax(params_filled)
2721 except errors.ConfigurationError, err:
2722 nic_errors.append("Instance %s, nic/%d: %s" %
2723 (instance.name, nic_idx, err))
2725 # if we're moving instances to routed, check that they have an ip
2726 target_mode = params_filled[constants.NIC_MODE]
2727 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2728 nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2729 (instance.name, nic_idx))
2731 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2732 "\n".join(nic_errors))
2734 # hypervisor list/parameters
2735 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2736 if self.op.hvparams:
2737 for hv_name, hv_dict in self.op.hvparams.items():
2738 if hv_name not in self.new_hvparams:
2739 self.new_hvparams[hv_name] = hv_dict
2741 self.new_hvparams[hv_name].update(hv_dict)
2743 # os hypervisor parameters
2744 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2746 for os_name, hvs in self.op.os_hvp.items():
2747 if os_name not in self.new_os_hvp:
2748 self.new_os_hvp[os_name] = hvs
2750 for hv_name, hv_dict in hvs.items():
2751 if hv_name not in self.new_os_hvp[os_name]:
2752 self.new_os_hvp[os_name][hv_name] = hv_dict
2754 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2757 self.new_osp = objects.FillDict(cluster.osparams, {})
2758 if self.op.osparams:
2759 for os_name, osp in self.op.osparams.items():
2760 if os_name not in self.new_osp:
2761 self.new_osp[os_name] = {}
2763 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2766 if not self.new_osp[os_name]:
2767 # we removed all parameters
2768 del self.new_osp[os_name]
2770 # check the parameter validity (remote check)
2771 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2772 os_name, self.new_osp[os_name])
2774 # changes to the hypervisor list
2775 if self.op.enabled_hypervisors is not None:
2776 self.hv_list = self.op.enabled_hypervisors
2777 for hv in self.hv_list:
2778 # if the hypervisor doesn't already exist in the cluster
2779 # hvparams, we initialize it to empty, and then (in both
2780 # cases) we make sure to fill the defaults, as we might not
2781 # have a complete defaults list if the hypervisor wasn't
2783 if hv not in new_hvp:
2785 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2786 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2788 self.hv_list = cluster.enabled_hypervisors
2790 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2791 # either the enabled list has changed, or the parameters have, validate
2792 for hv_name, hv_params in self.new_hvparams.items():
2793 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2794 (self.op.enabled_hypervisors and
2795 hv_name in self.op.enabled_hypervisors)):
2796 # either this is a new hypervisor, or its parameters have changed
2797 hv_class = hypervisor.GetHypervisor(hv_name)
2798 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2799 hv_class.CheckParameterSyntax(hv_params)
2800 _CheckHVParams(self, node_list, hv_name, hv_params)
2803 # no need to check any newly-enabled hypervisors, since the
2804 # defaults have already been checked in the above code-block
2805 for os_name, os_hvp in self.new_os_hvp.items():
2806 for hv_name, hv_params in os_hvp.items():
2807 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2808 # we need to fill in the new os_hvp on top of the actual hv_p
2809 cluster_defaults = self.new_hvparams.get(hv_name, {})
2810 new_osp = objects.FillDict(cluster_defaults, hv_params)
2811 hv_class = hypervisor.GetHypervisor(hv_name)
2812 hv_class.CheckParameterSyntax(new_osp)
2813 _CheckHVParams(self, node_list, hv_name, new_osp)
2815 if self.op.default_iallocator:
2816 alloc_script = utils.FindFile(self.op.default_iallocator,
2817 constants.IALLOCATOR_SEARCH_PATH,
2819 if alloc_script is None:
2820 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2821 " specified" % self.op.default_iallocator,
2824 def Exec(self, feedback_fn):
2825 """Change the parameters of the cluster.
2828 if self.op.vg_name is not None:
2829 new_volume = self.op.vg_name
2832 if new_volume != self.cfg.GetVGName():
2833 self.cfg.SetVGName(new_volume)
2835 feedback_fn("Cluster LVM configuration already in desired"
2836 " state, not changing")
2837 if self.op.drbd_helper is not None:
2838 new_helper = self.op.drbd_helper
2841 if new_helper != self.cfg.GetDRBDHelper():
2842 self.cfg.SetDRBDHelper(new_helper)
2844 feedback_fn("Cluster DRBD helper already in desired state,"
2846 if self.op.hvparams:
2847 self.cluster.hvparams = self.new_hvparams
2849 self.cluster.os_hvp = self.new_os_hvp
2850 if self.op.enabled_hypervisors is not None:
2851 self.cluster.hvparams = self.new_hvparams
2852 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2853 if self.op.beparams:
2854 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2855 if self.op.nicparams:
2856 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2857 if self.op.osparams:
2858 self.cluster.osparams = self.new_osp
2859 if self.op.ndparams:
2860 self.cluster.ndparams = self.new_ndparams
2862 if self.op.candidate_pool_size is not None:
2863 self.cluster.candidate_pool_size = self.op.candidate_pool_size
2864 # we need to update the pool size here, otherwise the save will fail
2865 _AdjustCandidatePool(self, [])
2867 if self.op.maintain_node_health is not None:
2868 self.cluster.maintain_node_health = self.op.maintain_node_health
2870 if self.op.prealloc_wipe_disks is not None:
2871 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2873 if self.op.add_uids is not None:
2874 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2876 if self.op.remove_uids is not None:
2877 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2879 if self.op.uid_pool is not None:
2880 self.cluster.uid_pool = self.op.uid_pool
2882 if self.op.default_iallocator is not None:
2883 self.cluster.default_iallocator = self.op.default_iallocator
2885 if self.op.reserved_lvs is not None:
2886 self.cluster.reserved_lvs = self.op.reserved_lvs
2888 def helper_os(aname, mods, desc):
2890 lst = getattr(self.cluster, aname)
2891 for key, val in mods:
2892 if key == constants.DDM_ADD:
2894 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
2897 elif key == constants.DDM_REMOVE:
2901 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
2903 raise errors.ProgrammerError("Invalid modification '%s'" % key)
2905 if self.op.hidden_os:
2906 helper_os("hidden_os", self.op.hidden_os, "hidden")
2908 if self.op.blacklisted_os:
2909 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
2911 self.cfg.Update(self.cluster, feedback_fn)
2914 def _UploadHelper(lu, nodes, fname):
2915 """Helper for uploading a file and showing warnings.
2918 if os.path.exists(fname):
2919 result = lu.rpc.call_upload_file(nodes, fname)
2920 for to_node, to_result in result.items():
2921 msg = to_result.fail_msg
2923 msg = ("Copy of file %s to node %s failed: %s" %
2924 (fname, to_node, msg))
2925 lu.proc.LogWarning(msg)
2928 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
2929 """Distribute additional files which are part of the cluster configuration.
2931 ConfigWriter takes care of distributing the config and ssconf files, but
2932 there are more files which should be distributed to all nodes. This function
2933 makes sure those are copied.
2935 @param lu: calling logical unit
2936 @param additional_nodes: list of nodes not in the config to distribute to
2937 @type additional_vm: boolean
2938 @param additional_vm: whether the additional nodes are vm-capable or not
2941 # 1. Gather target nodes
2942 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2943 dist_nodes = lu.cfg.GetOnlineNodeList()
2944 nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
2945 vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
2946 if additional_nodes is not None:
2947 dist_nodes.extend(additional_nodes)
2949 vm_nodes.extend(additional_nodes)
2950 if myself.name in dist_nodes:
2951 dist_nodes.remove(myself.name)
2952 if myself.name in vm_nodes:
2953 vm_nodes.remove(myself.name)
2955 # 2. Gather files to distribute
2956 dist_files = set([constants.ETC_HOSTS,
2957 constants.SSH_KNOWN_HOSTS_FILE,
2958 constants.RAPI_CERT_FILE,
2959 constants.RAPI_USERS_FILE,
2960 constants.CONFD_HMAC_KEY,
2961 constants.CLUSTER_DOMAIN_SECRET_FILE,
2965 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2966 for hv_name in enabled_hypervisors:
2967 hv_class = hypervisor.GetHypervisor(hv_name)
2968 vm_files.update(hv_class.GetAncillaryFiles())
2970 # 3. Perform the files upload
2971 for fname in dist_files:
2972 _UploadHelper(lu, dist_nodes, fname)
2973 for fname in vm_files:
2974 _UploadHelper(lu, vm_nodes, fname)
2977 class LURedistributeConfig(NoHooksLU):
2978 """Force the redistribution of cluster configuration.
2980 This is a very simple LU.
2985 def ExpandNames(self):
2986 self.needed_locks = {
2987 locking.LEVEL_NODE: locking.ALL_SET,
2989 self.share_locks[locking.LEVEL_NODE] = 1
2991 def Exec(self, feedback_fn):
2992 """Redistribute the configuration.
2995 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2996 _RedistributeAncillaryFiles(self)
2999 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3000 """Sleep and poll for an instance's disk to sync.
3003 if not instance.disks or disks is not None and not disks:
3006 disks = _ExpandCheckDisks(instance, disks)
3009 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3011 node = instance.primary_node
3014 lu.cfg.SetDiskID(dev, node)
3016 # TODO: Convert to utils.Retry
3019 degr_retries = 10 # in seconds, as we sleep 1 second each time
3023 cumul_degraded = False
3024 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3025 msg = rstats.fail_msg
3027 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3030 raise errors.RemoteError("Can't contact node %s for mirror data,"
3031 " aborting." % node)
3034 rstats = rstats.payload
3036 for i, mstat in enumerate(rstats):
3038 lu.LogWarning("Can't compute data for node %s/%s",
3039 node, disks[i].iv_name)
3042 cumul_degraded = (cumul_degraded or
3043 (mstat.is_degraded and mstat.sync_percent is None))
3044 if mstat.sync_percent is not None:
3046 if mstat.estimated_time is not None:
3047 rem_time = ("%s remaining (estimated)" %
3048 utils.FormatSeconds(mstat.estimated_time))
3049 max_time = mstat.estimated_time
3051 rem_time = "no time estimate"
3052 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3053 (disks[i].iv_name, mstat.sync_percent, rem_time))
3055 # if we're done but degraded, let's do a few small retries, to
3056 # make sure we see a stable and not transient situation; therefore
3057 # we force restart of the loop
3058 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3059 logging.info("Degraded disks found, %d retries left", degr_retries)
3067 time.sleep(min(60, max_time))
3070 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3071 return not cumul_degraded
3074 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3075 """Check that mirrors are not degraded.
3077 The ldisk parameter, if True, will change the test from the
3078 is_degraded attribute (which represents overall non-ok status for
3079 the device(s)) to the ldisk (representing the local storage status).
3082 lu.cfg.SetDiskID(dev, node)
3086 if on_primary or dev.AssembleOnSecondary():
3087 rstats = lu.rpc.call_blockdev_find(node, dev)
3088 msg = rstats.fail_msg
3090 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3092 elif not rstats.payload:
3093 lu.LogWarning("Can't find disk on node %s", node)
3097 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3099 result = result and not rstats.payload.is_degraded
3102 for child in dev.children:
3103 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3108 class LUDiagnoseOS(NoHooksLU):
3109 """Logical unit for OS diagnose/query.
3114 ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3118 _BLK = "blacklisted"
3120 _FIELDS_STATIC = utils.FieldSet()
3121 _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3122 "parameters", "api_versions", _HID, _BLK)
3124 def CheckArguments(self):
3126 raise errors.OpPrereqError("Selective OS query not supported",
3129 _CheckOutputFields(static=self._FIELDS_STATIC,
3130 dynamic=self._FIELDS_DYNAMIC,
3131 selected=self.op.output_fields)
3133 def ExpandNames(self):
3134 # Lock all nodes, in shared mode
3135 # Temporary removal of locks, should be reverted later
3136 # TODO: reintroduce locks when they are lighter-weight
3137 self.needed_locks = {}
3138 #self.share_locks[locking.LEVEL_NODE] = 1
3139 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3142 def _DiagnoseByOS(rlist):
3143 """Remaps a per-node return list into an a per-os per-node dictionary
3145 @param rlist: a map with node names as keys and OS objects as values
3148 @return: a dictionary with osnames as keys and as value another
3149 map, with nodes as keys and tuples of (path, status, diagnose,
3150 variants, parameters, api_versions) as values, eg::
3152 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3153 (/srv/..., False, "invalid api")],
3154 "node2": [(/srv/..., True, "", [], [])]}
3159 # we build here the list of nodes that didn't fail the RPC (at RPC
3160 # level), so that nodes with a non-responding node daemon don't
3161 # make all OSes invalid
3162 good_nodes = [node_name for node_name in rlist
3163 if not rlist[node_name].fail_msg]
3164 for node_name, nr in rlist.items():
3165 if nr.fail_msg or not nr.payload:
3167 for (name, path, status, diagnose, variants,
3168 params, api_versions) in nr.payload:
3169 if name not in all_os:
3170 # build a list of nodes for this os containing empty lists
3171 # for each node in node_list
3173 for nname in good_nodes:
3174 all_os[name][nname] = []
3175 # convert params from [name, help] to (name, help)
3176 params = [tuple(v) for v in params]
3177 all_os[name][node_name].append((path, status, diagnose,
3178 variants, params, api_versions))
3181 def Exec(self, feedback_fn):
3182 """Compute the list of OSes.
3185 valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
3186 node_data = self.rpc.call_os_diagnose(valid_nodes)
3187 pol = self._DiagnoseByOS(node_data)
3189 cluster = self.cfg.GetClusterInfo()
3191 for os_name in utils.NiceSort(pol.keys()):
3192 os_data = pol[os_name]
3195 (variants, params, api_versions) = null_state = (set(), set(), set())
3196 for idx, osl in enumerate(os_data.values()):
3197 valid = bool(valid and osl and osl[0][1])
3199 (variants, params, api_versions) = null_state
3201 node_variants, node_params, node_api = osl[0][3:6]
3202 if idx == 0: # first entry
3203 variants = set(node_variants)
3204 params = set(node_params)
3205 api_versions = set(node_api)
3206 else: # keep consistency
3207 variants.intersection_update(node_variants)
3208 params.intersection_update(node_params)
3209 api_versions.intersection_update(node_api)
3211 is_hid = os_name in cluster.hidden_os
3212 is_blk = os_name in cluster.blacklisted_os
3213 if ((self._HID not in self.op.output_fields and is_hid) or
3214 (self._BLK not in self.op.output_fields and is_blk) or
3215 (self._VLD not in self.op.output_fields and not valid)):
3218 for field in self.op.output_fields:
3221 elif field == self._VLD:
3223 elif field == "node_status":
3224 # this is just a copy of the dict
3226 for node_name, nos_list in os_data.items():
3227 val[node_name] = nos_list
3228 elif field == "variants":
3229 val = utils.NiceSort(list(variants))
3230 elif field == "parameters":
3232 elif field == "api_versions":
3233 val = list(api_versions)
3234 elif field == self._HID:
3236 elif field == self._BLK:
3239 raise errors.ParameterError(field)
3246 class LURemoveNode(LogicalUnit):
3247 """Logical unit for removing a node.
3250 HPATH = "node-remove"
3251 HTYPE = constants.HTYPE_NODE
3256 def BuildHooksEnv(self):
3259 This doesn't run on the target node in the pre phase as a failed
3260 node would then be impossible to remove.
3264 "OP_TARGET": self.op.node_name,
3265 "NODE_NAME": self.op.node_name,
3267 all_nodes = self.cfg.GetNodeList()
3269 all_nodes.remove(self.op.node_name)
3271 logging.warning("Node %s which is about to be removed not found"
3272 " in the all nodes list", self.op.node_name)
3273 return env, all_nodes, all_nodes
3275 def CheckPrereq(self):
3276 """Check prerequisites.
3279 - the node exists in the configuration
3280 - it does not have primary or secondary instances
3281 - it's not the master
3283 Any errors are signaled by raising errors.OpPrereqError.
3286 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3287 node = self.cfg.GetNodeInfo(self.op.node_name)
3288 assert node is not None
3290 instance_list = self.cfg.GetInstanceList()
3292 masternode = self.cfg.GetMasterNode()
3293 if node.name == masternode:
3294 raise errors.OpPrereqError("Node is the master node,"
3295 " you need to failover first.",
3298 for instance_name in instance_list:
3299 instance = self.cfg.GetInstanceInfo(instance_name)
3300 if node.name in instance.all_nodes:
3301 raise errors.OpPrereqError("Instance %s is still running on the node,"
3302 " please remove first." % instance_name,
3304 self.op.node_name = node.name
3307 def Exec(self, feedback_fn):
3308 """Removes the node from the cluster.
3312 logging.info("Stopping the node daemon and removing configs from node %s",
3315 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3317 # Promote nodes to master candidate as needed
3318 _AdjustCandidatePool(self, exceptions=[node.name])
3319 self.context.RemoveNode(node.name)
3321 # Run post hooks on the node before it's removed
3322 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3324 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3326 # pylint: disable-msg=W0702
3327 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3329 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3330 msg = result.fail_msg
3332 self.LogWarning("Errors encountered on the remote node while leaving"
3333 " the cluster: %s", msg)
3335 # Remove node from our /etc/hosts
3336 if self.cfg.GetClusterInfo().modify_etc_hosts:
3337 master_node = self.cfg.GetMasterNode()
3338 result = self.rpc.call_etc_hosts_modify(master_node,
3339 constants.ETC_HOSTS_REMOVE,
3341 result.Raise("Can't update hosts file with new host data")
3342 _RedistributeAncillaryFiles(self)
3345 class LUQueryNodes(NoHooksLU):
3346 """Logical unit for querying nodes.
3349 # pylint: disable-msg=W0142
3352 ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3353 ("use_locking", False, ht.TBool),
3357 _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
3358 "master_candidate", "offline", "drained",
3359 "master_capable", "vm_capable"]
3361 _FIELDS_DYNAMIC = utils.FieldSet(
3363 "mtotal", "mnode", "mfree",
3365 "ctotal", "cnodes", "csockets",
3368 _FIELDS_STATIC = utils.FieldSet(*[
3369 "pinst_cnt", "sinst_cnt",
3370 "pinst_list", "sinst_list",
3371 "pip", "sip", "tags",
3373 "group.uuid", "group",
3377 def CheckArguments(self):
3378 _CheckOutputFields(static=self._FIELDS_STATIC,
3379 dynamic=self._FIELDS_DYNAMIC,
3380 selected=self.op.output_fields)
3382 def ExpandNames(self):
3383 self.needed_locks = {}
3384 self.share_locks[locking.LEVEL_NODE] = 1
3387 self.wanted = _GetWantedNodes(self, self.op.names)
3389 self.wanted = locking.ALL_SET
3391 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3392 self.do_locking = self.do_node_query and self.op.use_locking
3394 # if we don't request only static fields, we need to lock the nodes
3395 self.needed_locks[locking.LEVEL_NODE] = self.wanted
3397 def Exec(self, feedback_fn):
3398 """Computes the list of nodes and their attributes.
3401 all_info = self.cfg.GetAllNodesInfo()
3403 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3404 elif self.wanted != locking.ALL_SET:
3405 nodenames = self.wanted
3406 missing = set(nodenames).difference(all_info.keys())
3408 raise errors.OpExecError(
3409 "Some nodes were removed before retrieving their data: %s" % missing)
3411 nodenames = all_info.keys()
3413 nodenames = utils.NiceSort(nodenames)
3414 nodelist = [all_info[name] for name in nodenames]
3416 if "group" in self.op.output_fields:
3417 groups = self.cfg.GetAllNodeGroupsInfo()
3421 # begin data gathering
3423 if self.do_node_query:
3425 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3426 self.cfg.GetHypervisorType())
3427 for name in nodenames:
3428 nodeinfo = node_data[name]
3429 if not nodeinfo.fail_msg and nodeinfo.payload:
3430 nodeinfo = nodeinfo.payload
3431 fn = utils.TryConvert
3433 "mtotal": fn(int, nodeinfo.get('memory_total', None)),
3434 "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
3435 "mfree": fn(int, nodeinfo.get('memory_free', None)),
3436 "dtotal": fn(int, nodeinfo.get('vg_size', None)),
3437 "dfree": fn(int, nodeinfo.get('vg_free', None)),
3438 "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
3439 "bootid": nodeinfo.get('bootid', None),
3440 "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
3441 "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
3444 live_data[name] = {}
3446 live_data = dict.fromkeys(nodenames, {})
3448 node_to_primary = dict([(name, set()) for name in nodenames])
3449 node_to_secondary = dict([(name, set()) for name in nodenames])
3451 inst_fields = frozenset(("pinst_cnt", "pinst_list",
3452 "sinst_cnt", "sinst_list"))
3453 if inst_fields & frozenset(self.op.output_fields):
3454 inst_data = self.cfg.GetAllInstancesInfo()
3456 for inst in inst_data.values():
3457 if inst.primary_node in node_to_primary:
3458 node_to_primary[inst.primary_node].add(inst.name)
3459 for secnode in inst.secondary_nodes:
3460 if secnode in node_to_secondary:
3461 node_to_secondary[secnode].add(inst.name)
3463 master_node = self.cfg.GetMasterNode()
3465 # end data gathering
3468 for node in nodelist:
3470 for field in self.op.output_fields:
3471 if field in self._SIMPLE_FIELDS:
3472 val = getattr(node, field)
3473 elif field == "pinst_list":
3474 val = list(node_to_primary[node.name])
3475 elif field == "sinst_list":
3476 val = list(node_to_secondary[node.name])
3477 elif field == "pinst_cnt":
3478 val = len(node_to_primary[node.name])
3479 elif field == "sinst_cnt":
3480 val = len(node_to_secondary[node.name])
3481 elif field == "pip":
3482 val = node.primary_ip
3483 elif field == "sip":
3484 val = node.secondary_ip
3485 elif field == "tags":
3486 val = list(node.GetTags())
3487 elif field == "master":
3488 val = node.name == master_node
3489 elif self._FIELDS_DYNAMIC.Matches(field):
3490 val = live_data[node.name].get(field, None)
3491 elif field == "role":
3492 if node.name == master_node:
3494 elif node.master_candidate:
3502 elif field == "group.uuid":
3504 elif field == "group":
3505 ng = groups.get(node.group, None)
3511 raise errors.ParameterError(field)
3512 node_output.append(val)
3513 output.append(node_output)
3518 class LUQueryNodeVolumes(NoHooksLU):
3519 """Logical unit for getting volumes on node(s).
3524 ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3527 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3528 _FIELDS_STATIC = utils.FieldSet("node")
3530 def CheckArguments(self):
3531 _CheckOutputFields(static=self._FIELDS_STATIC,
3532 dynamic=self._FIELDS_DYNAMIC,
3533 selected=self.op.output_fields)
3535 def ExpandNames(self):
3536 self.needed_locks = {}
3537 self.share_locks[locking.LEVEL_NODE] = 1
3538 if not self.op.nodes:
3539 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3541 self.needed_locks[locking.LEVEL_NODE] = \
3542 _GetWantedNodes(self, self.op.nodes)
3544 def Exec(self, feedback_fn):
3545 """Computes the list of nodes and their attributes.
3548 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3549 volumes = self.rpc.call_node_volumes(nodenames)
3551 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3552 in self.cfg.GetInstanceList()]
3554 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3557 for node in nodenames:
3558 nresult = volumes[node]
3561 msg = nresult.fail_msg
3563 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3566 node_vols = nresult.payload[:]
3567 node_vols.sort(key=lambda vol: vol['dev'])
3569 for vol in node_vols:
3571 for field in self.op.output_fields:
3574 elif field == "phys":
3578 elif field == "name":
3580 elif field == "size":
3581 val = int(float(vol['size']))
3582 elif field == "instance":
3584 if node not in lv_by_node[inst]:
3586 if vol['name'] in lv_by_node[inst][node]:
3592 raise errors.ParameterError(field)
3593 node_output.append(str(val))
3595 output.append(node_output)
3600 class LUQueryNodeStorage(NoHooksLU):
3601 """Logical unit for getting information on storage units on node(s).
3604 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3607 ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3608 ("storage_type", ht.NoDefault, _CheckStorageType),
3609 ("name", None, ht.TMaybeString),
3613 def CheckArguments(self):
3614 _CheckOutputFields(static=self._FIELDS_STATIC,
3615 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3616 selected=self.op.output_fields)
3618 def ExpandNames(self):
3619 self.needed_locks = {}
3620 self.share_locks[locking.LEVEL_NODE] = 1
3623 self.needed_locks[locking.LEVEL_NODE] = \
3624 _GetWantedNodes(self, self.op.nodes)
3626 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3628 def Exec(self, feedback_fn):
3629 """Computes the list of nodes and their attributes.
3632 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3634 # Always get name to sort by
3635 if constants.SF_NAME in self.op.output_fields:
3636 fields = self.op.output_fields[:]
3638 fields = [constants.SF_NAME] + self.op.output_fields
3640 # Never ask for node or type as it's only known to the LU
3641 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3642 while extra in fields:
3643 fields.remove(extra)
3645 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3646 name_idx = field_idx[constants.SF_NAME]
3648 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3649 data = self.rpc.call_storage_list(self.nodes,
3650 self.op.storage_type, st_args,
3651 self.op.name, fields)
3655 for node in utils.NiceSort(self.nodes):
3656 nresult = data[node]
3660 msg = nresult.fail_msg
3662 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3665 rows = dict([(row[name_idx], row) for row in nresult.payload])
3667 for name in utils.NiceSort(rows.keys()):
3672 for field in self.op.output_fields:
3673 if field == constants.SF_NODE:
3675 elif field == constants.SF_TYPE:
3676 val = self.op.storage_type
3677 elif field in field_idx:
3678 val = row[field_idx[field]]
3680 raise errors.ParameterError(field)
3689 class LUModifyNodeStorage(NoHooksLU):
3690 """Logical unit for modifying a storage volume on a node.
3695 ("storage_type", ht.NoDefault, _CheckStorageType),
3696 ("name", ht.NoDefault, ht.TNonEmptyString),
3697 ("changes", ht.NoDefault, ht.TDict),
3701 def CheckArguments(self):
3702 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3704 storage_type = self.op.storage_type
3707 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3709 raise errors.OpPrereqError("Storage units of type '%s' can not be"
3710 " modified" % storage_type,
3713 diff = set(self.op.changes.keys()) - modifiable
3715 raise errors.OpPrereqError("The following fields can not be modified for"
3716 " storage units of type '%s': %r" %
3717 (storage_type, list(diff)),
3720 def ExpandNames(self):
3721 self.needed_locks = {
3722 locking.LEVEL_NODE: self.op.node_name,
3725 def Exec(self, feedback_fn):
3726 """Computes the list of nodes and their attributes.
3729 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3730 result = self.rpc.call_storage_modify(self.op.node_name,
3731 self.op.storage_type, st_args,
3732 self.op.name, self.op.changes)
3733 result.Raise("Failed to modify storage unit '%s' on %s" %
3734 (self.op.name, self.op.node_name))
3737 class LUAddNode(LogicalUnit):
3738 """Logical unit for adding node to the cluster.
3742 HTYPE = constants.HTYPE_NODE
3745 ("primary_ip", None, ht.NoType),
3746 ("secondary_ip", None, ht.TMaybeString),
3747 ("readd", False, ht.TBool),
3748 ("group", None, ht.TMaybeString),
3749 ("master_capable", None, ht.TMaybeBool),
3750 ("vm_capable", None, ht.TMaybeBool),
3751 ("ndparams", None, ht.TOr(ht.TDict, ht.TNone)),
3753 _NFLAGS = ["master_capable", "vm_capable"]
3755 def CheckArguments(self):
3756 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
3757 # validate/normalize the node name
3758 self.hostname = netutils.GetHostname(name=self.op.node_name,
3759 family=self.primary_ip_family)
3760 self.op.node_name = self.hostname.name
3761 if self.op.readd and self.op.group:
3762 raise errors.OpPrereqError("Cannot pass a node group when a node is"
3763 " being readded", errors.ECODE_INVAL)
3765 def BuildHooksEnv(self):
3768 This will run on all nodes before, and on all nodes + the new node after.
3772 "OP_TARGET": self.op.node_name,
3773 "NODE_NAME": self.op.node_name,
3774 "NODE_PIP": self.op.primary_ip,
3775 "NODE_SIP": self.op.secondary_ip,
3776 "MASTER_CAPABLE": str(self.op.master_capable),
3777 "VM_CAPABLE": str(self.op.vm_capable),
3779 nodes_0 = self.cfg.GetNodeList()
3780 nodes_1 = nodes_0 + [self.op.node_name, ]
3781 return env, nodes_0, nodes_1
3783 def CheckPrereq(self):
3784 """Check prerequisites.
3787 - the new node is not already in the config
3789 - its parameters (single/dual homed) matches the cluster
3791 Any errors are signaled by raising errors.OpPrereqError.
3795 hostname = self.hostname
3796 node = hostname.name
3797 primary_ip = self.op.primary_ip = hostname.ip
3798 if self.op.secondary_ip is None:
3799 if self.primary_ip_family == netutils.IP6Address.family:
3800 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
3801 " IPv4 address must be given as secondary",
3803 self.op.secondary_ip = primary_ip
3805 secondary_ip = self.op.secondary_ip
3806 if not netutils.IP4Address.IsValid(secondary_ip):
3807 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
3808 " address" % secondary_ip, errors.ECODE_INVAL)
3810 node_list = cfg.GetNodeList()
3811 if not self.op.readd and node in node_list:
3812 raise errors.OpPrereqError("Node %s is already in the configuration" %
3813 node, errors.ECODE_EXISTS)
3814 elif self.op.readd and node not in node_list:
3815 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3818 self.changed_primary_ip = False
3820 for existing_node_name in node_list:
3821 existing_node = cfg.GetNodeInfo(existing_node_name)
3823 if self.op.readd and node == existing_node_name:
3824 if existing_node.secondary_ip != secondary_ip:
3825 raise errors.OpPrereqError("Readded node doesn't have the same IP"
3826 " address configuration as before",
3828 if existing_node.primary_ip != primary_ip:
3829 self.changed_primary_ip = True
3833 if (existing_node.primary_ip == primary_ip or
3834 existing_node.secondary_ip == primary_ip or
3835 existing_node.primary_ip == secondary_ip or
3836 existing_node.secondary_ip == secondary_ip):
3837 raise errors.OpPrereqError("New node ip address(es) conflict with"
3838 " existing node %s" % existing_node.name,
3839 errors.ECODE_NOTUNIQUE)
3841 # After this 'if' block, None is no longer a valid value for the
3842 # _capable op attributes
3844 old_node = self.cfg.GetNodeInfo(node)
3845 assert old_node is not None, "Can't retrieve locked node %s" % node
3846 for attr in self._NFLAGS:
3847 if getattr(self.op, attr) is None:
3848 setattr(self.op, attr, getattr(old_node, attr))
3850 for attr in self._NFLAGS:
3851 if getattr(self.op, attr) is None:
3852 setattr(self.op, attr, True)
3854 if self.op.readd and not self.op.vm_capable:
3855 pri, sec = cfg.GetNodeInstances(node)
3857 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
3858 " flag set to false, but it already holds"
3859 " instances" % node,
3862 # check that the type of the node (single versus dual homed) is the
3863 # same as for the master
3864 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3865 master_singlehomed = myself.secondary_ip == myself.primary_ip
3866 newbie_singlehomed = secondary_ip == primary_ip
3867 if master_singlehomed != newbie_singlehomed:
3868 if master_singlehomed:
3869 raise errors.OpPrereqError("The master has no secondary ip but the"
3870 " new node has one",
3873 raise errors.OpPrereqError("The master has a secondary ip but the"
3874 " new node doesn't have one",
3877 # checks reachability
3878 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3879 raise errors.OpPrereqError("Node not reachable by ping",
3880 errors.ECODE_ENVIRON)
3882 if not newbie_singlehomed:
3883 # check reachability from my secondary ip to newbie's secondary ip
3884 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3885 source=myself.secondary_ip):
3886 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3887 " based ping to node daemon port",
3888 errors.ECODE_ENVIRON)
3895 if self.op.master_capable:
3896 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3898 self.master_candidate = False
3901 self.new_node = old_node
3903 node_group = cfg.LookupNodeGroup(self.op.group)
3904 self.new_node = objects.Node(name=node,
3905 primary_ip=primary_ip,
3906 secondary_ip=secondary_ip,
3907 master_candidate=self.master_candidate,
3908 offline=False, drained=False,
3911 if self.op.ndparams:
3912 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3914 def Exec(self, feedback_fn):
3915 """Adds the new node to the cluster.
3918 new_node = self.new_node
3919 node = new_node.name
3921 # for re-adds, reset the offline/drained/master-candidate flags;
3922 # we need to reset here, otherwise offline would prevent RPC calls
3923 # later in the procedure; this also means that if the re-add
3924 # fails, we are left with a non-offlined, broken node
3926 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3927 self.LogInfo("Readding a node, the offline/drained flags were reset")
3928 # if we demote the node, we do cleanup later in the procedure
3929 new_node.master_candidate = self.master_candidate
3930 if self.changed_primary_ip:
3931 new_node.primary_ip = self.op.primary_ip
3933 # copy the master/vm_capable flags
3934 for attr in self._NFLAGS:
3935 setattr(new_node, attr, getattr(self.op, attr))
3937 # notify the user about any possible mc promotion
3938 if new_node.master_candidate:
3939 self.LogInfo("Node will be a master candidate")
3941 if self.op.ndparams:
3942 new_node.ndparams = self.op.ndparams
3944 # check connectivity
3945 result = self.rpc.call_version([node])[node]
3946 result.Raise("Can't get version information from node %s" % node)
3947 if constants.PROTOCOL_VERSION == result.payload:
3948 logging.info("Communication to node %s fine, sw version %s match",
3949 node, result.payload)
3951 raise errors.OpExecError("Version mismatch master version %s,"
3952 " node version %s" %
3953 (constants.PROTOCOL_VERSION, result.payload))
3955 # Add node to our /etc/hosts, and add key to known_hosts
3956 if self.cfg.GetClusterInfo().modify_etc_hosts:
3957 master_node = self.cfg.GetMasterNode()
3958 result = self.rpc.call_etc_hosts_modify(master_node,
3959 constants.ETC_HOSTS_ADD,
3962 result.Raise("Can't update hosts file with new host data")
3964 if new_node.secondary_ip != new_node.primary_ip:
3965 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
3968 node_verify_list = [self.cfg.GetMasterNode()]
3969 node_verify_param = {
3970 constants.NV_NODELIST: [node],
3971 # TODO: do a node-net-test as well?
3974 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3975 self.cfg.GetClusterName())
3976 for verifier in node_verify_list:
3977 result[verifier].Raise("Cannot communicate with node %s" % verifier)
3978 nl_payload = result[verifier].payload[constants.NV_NODELIST]
3980 for failed in nl_payload:
3981 feedback_fn("ssh/hostname verification failed"
3982 " (checking from %s): %s" %
3983 (verifier, nl_payload[failed]))
3984 raise errors.OpExecError("ssh/hostname verification failed.")
3987 _RedistributeAncillaryFiles(self)
3988 self.context.ReaddNode(new_node)
3989 # make sure we redistribute the config
3990 self.cfg.Update(new_node, feedback_fn)
3991 # and make sure the new node will not have old files around
3992 if not new_node.master_candidate:
3993 result = self.rpc.call_node_demote_from_mc(new_node.name)
3994 msg = result.fail_msg
3996 self.LogWarning("Node failed to demote itself from master"
3997 " candidate status: %s" % msg)
3999 _RedistributeAncillaryFiles(self, additional_nodes=[node],
4000 additional_vm=self.op.vm_capable)
4001 self.context.AddNode(new_node, self.proc.GetECId())
4004 class LUSetNodeParams(LogicalUnit):
4005 """Modifies the parameters of a node.
4007 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4008 to the node role (as _ROLE_*)
4009 @cvar _R2F: a dictionary from node role to tuples of flags
4010 @cvar _FLAGS: a list of attribute names corresponding to the flags
4013 HPATH = "node-modify"
4014 HTYPE = constants.HTYPE_NODE
4017 ("master_candidate", None, ht.TMaybeBool),
4018 ("offline", None, ht.TMaybeBool),
4019 ("drained", None, ht.TMaybeBool),
4020 ("auto_promote", False, ht.TBool),
4021 ("master_capable", None, ht.TMaybeBool),
4022 ("vm_capable", None, ht.TMaybeBool),
4023 ("secondary_ip", None, ht.TMaybeString),
4024 ("ndparams", None, ht.TOr(ht.TDict, ht.TNone)),
4028 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4030 (True, False, False): _ROLE_CANDIDATE,
4031 (False, True, False): _ROLE_DRAINED,
4032 (False, False, True): _ROLE_OFFLINE,
4033 (False, False, False): _ROLE_REGULAR,
4035 _R2F = dict((v, k) for k, v in _F2R.items())
4036 _FLAGS = ["master_candidate", "drained", "offline"]
4038 def CheckArguments(self):
4039 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4040 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4041 self.op.master_capable, self.op.vm_capable,
4042 self.op.secondary_ip]
4043 if all_mods.count(None) == len(all_mods):
4044 raise errors.OpPrereqError("Please pass at least one modification",
4046 if all_mods.count(True) > 1:
4047 raise errors.OpPrereqError("Can't set the node into more than one"
4048 " state at the same time",
4051 # Boolean value that tells us whether we might be demoting from MC
4052 self.might_demote = (self.op.master_candidate == False or
4053 self.op.offline == True or
4054 self.op.drained == True or
4055 self.op.master_capable == False)
4057 if self.op.secondary_ip:
4058 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4059 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4060 " address" % self.op.secondary_ip,
4063 self.lock_all = self.op.auto_promote and self.might_demote
4064 self.lock_instances = self.op.secondary_ip is not None
4066 def ExpandNames(self):
4068 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4070 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4072 if self.lock_instances:
4073 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4075 def DeclareLocks(self, level):
4076 # If we have locked all instances, before waiting to lock nodes, release
4077 # all the ones living on nodes unrelated to the current operation.
4078 if level == locking.LEVEL_NODE and self.lock_instances:
4079 instances_release = []
4081 self.affected_instances = []
4082 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4083 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4084 instance = self.context.cfg.GetInstanceInfo(instance_name)
4085 i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4086 if i_mirrored and self.op.node_name in instance.all_nodes:
4087 instances_keep.append(instance_name)
4088 self.affected_instances.append(instance)
4090 instances_release.append(instance_name)
4091 if instances_release:
4092 self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4093 self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4095 def BuildHooksEnv(self):
4098 This runs on the master node.
4102 "OP_TARGET": self.op.node_name,
4103 "MASTER_CANDIDATE": str(self.op.master_candidate),
4104 "OFFLINE": str(self.op.offline),
4105 "DRAINED": str(self.op.drained),
4106 "MASTER_CAPABLE": str(self.op.master_capable),
4107 "VM_CAPABLE": str(self.op.vm_capable),
4109 nl = [self.cfg.GetMasterNode(),
4113 def CheckPrereq(self):
4114 """Check prerequisites.
4116 This only checks the instance list against the existing names.
4119 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4121 if (self.op.master_candidate is not None or
4122 self.op.drained is not None or
4123 self.op.offline is not None):
4124 # we can't change the master's node flags
4125 if self.op.node_name == self.cfg.GetMasterNode():
4126 raise errors.OpPrereqError("The master role can be changed"
4127 " only via master-failover",
4130 if self.op.master_candidate and not node.master_capable:
4131 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4132 " it a master candidate" % node.name,
4135 if self.op.vm_capable == False:
4136 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4138 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4139 " the vm_capable flag" % node.name,
4142 if node.master_candidate and self.might_demote and not self.lock_all:
4143 assert not self.op.auto_promote, "auto-promote set but lock_all not"
4144 # check if after removing the current node, we're missing master
4146 (mc_remaining, mc_should, _) = \
4147 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4148 if mc_remaining < mc_should:
4149 raise errors.OpPrereqError("Not enough master candidates, please"
4150 " pass auto_promote to allow promotion",
4153 self.old_flags = old_flags = (node.master_candidate,
4154 node.drained, node.offline)
4155 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4156 self.old_role = old_role = self._F2R[old_flags]
4158 # Check for ineffective changes
4159 for attr in self._FLAGS:
4160 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4161 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4162 setattr(self.op, attr, None)
4164 # Past this point, any flag change to False means a transition
4165 # away from the respective state, as only real changes are kept
4167 # If we're being deofflined/drained, we'll MC ourself if needed
4168 if (self.op.drained == False or self.op.offline == False or
4169 (self.op.master_capable and not node.master_capable)):
4170 if _DecideSelfPromotion(self):
4171 self.op.master_candidate = True
4172 self.LogInfo("Auto-promoting node to master candidate")
4174 # If we're no longer master capable, we'll demote ourselves from MC
4175 if self.op.master_capable == False and node.master_candidate:
4176 self.LogInfo("Demoting from master candidate")
4177 self.op.master_candidate = False
4180 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4181 if self.op.master_candidate:
4182 new_role = self._ROLE_CANDIDATE
4183 elif self.op.drained:
4184 new_role = self._ROLE_DRAINED
4185 elif self.op.offline:
4186 new_role = self._ROLE_OFFLINE
4187 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4188 # False is still in new flags, which means we're un-setting (the
4190 new_role = self._ROLE_REGULAR
4191 else: # no new flags, nothing, keep old role
4194 self.new_role = new_role
4196 if old_role == self._ROLE_OFFLINE and new_role != old_role:
4197 # Trying to transition out of offline status
4198 result = self.rpc.call_version([node.name])[node.name]
4200 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4201 " to report its version: %s" %
4202 (node.name, result.fail_msg),
4205 self.LogWarning("Transitioning node from offline to online state"
4206 " without using re-add. Please make sure the node"
4209 if self.op.secondary_ip:
4210 # Ok even without locking, because this can't be changed by any LU
4211 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4212 master_singlehomed = master.secondary_ip == master.primary_ip
4213 if master_singlehomed and self.op.secondary_ip:
4214 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4215 " homed cluster", errors.ECODE_INVAL)
4218 if self.affected_instances:
4219 raise errors.OpPrereqError("Cannot change secondary ip: offline"
4220 " node has instances (%s) configured"
4221 " to use it" % self.affected_instances)
4223 # On online nodes, check that no instances are running, and that
4224 # the node has the new ip and we can reach it.
4225 for instance in self.affected_instances:
4226 _CheckInstanceDown(self, instance, "cannot change secondary ip")
4228 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4229 if master.name != node.name:
4230 # check reachability from master secondary ip to new secondary ip
4231 if not netutils.TcpPing(self.op.secondary_ip,
4232 constants.DEFAULT_NODED_PORT,
4233 source=master.secondary_ip):
4234 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4235 " based ping to node daemon port",
4236 errors.ECODE_ENVIRON)
4238 if self.op.ndparams:
4239 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4240 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4241 self.new_ndparams = new_ndparams
4243 def Exec(self, feedback_fn):
4248 old_role = self.old_role
4249 new_role = self.new_role
4253 if self.op.ndparams:
4254 node.ndparams = self.new_ndparams
4256 for attr in ["master_capable", "vm_capable"]:
4257 val = getattr(self.op, attr)
4259 setattr(node, attr, val)
4260 result.append((attr, str(val)))
4262 if new_role != old_role:
4263 # Tell the node to demote itself, if no longer MC and not offline
4264 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4265 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4267 self.LogWarning("Node failed to demote itself: %s", msg)
4269 new_flags = self._R2F[new_role]
4270 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4272 result.append((desc, str(nf)))
4273 (node.master_candidate, node.drained, node.offline) = new_flags
4275 # we locked all nodes, we adjust the CP before updating this node
4277 _AdjustCandidatePool(self, [node.name])
4279 if self.op.secondary_ip:
4280 node.secondary_ip = self.op.secondary_ip
4281 result.append(("secondary_ip", self.op.secondary_ip))
4283 # this will trigger configuration file update, if needed
4284 self.cfg.Update(node, feedback_fn)
4286 # this will trigger job queue propagation or cleanup if the mc
4288 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4289 self.context.ReaddNode(node)
4294 class LUPowercycleNode(NoHooksLU):
4295 """Powercycles a node.
4304 def CheckArguments(self):
4305 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4306 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4307 raise errors.OpPrereqError("The node is the master and the force"
4308 " parameter was not set",
4311 def ExpandNames(self):
4312 """Locking for PowercycleNode.
4314 This is a last-resort option and shouldn't block on other
4315 jobs. Therefore, we grab no locks.
4318 self.needed_locks = {}
4320 def Exec(self, feedback_fn):
4324 result = self.rpc.call_node_powercycle(self.op.node_name,
4325 self.cfg.GetHypervisorType())
4326 result.Raise("Failed to schedule the reboot")
4327 return result.payload
4330 class LUQueryClusterInfo(NoHooksLU):
4331 """Query cluster configuration.
4336 def ExpandNames(self):
4337 self.needed_locks = {}
4339 def Exec(self, feedback_fn):
4340 """Return cluster config.
4343 cluster = self.cfg.GetClusterInfo()
4346 # Filter just for enabled hypervisors
4347 for os_name, hv_dict in cluster.os_hvp.items():
4348 os_hvp[os_name] = {}
4349 for hv_name, hv_params in hv_dict.items():
4350 if hv_name in cluster.enabled_hypervisors:
4351 os_hvp[os_name][hv_name] = hv_params
4353 # Convert ip_family to ip_version
4354 primary_ip_version = constants.IP4_VERSION
4355 if cluster.primary_ip_family == netutils.IP6Address.family:
4356 primary_ip_version = constants.IP6_VERSION
4359 "software_version": constants.RELEASE_VERSION,
4360 "protocol_version": constants.PROTOCOL_VERSION,
4361 "config_version": constants.CONFIG_VERSION,
4362 "os_api_version": max(constants.OS_API_VERSIONS),
4363 "export_version": constants.EXPORT_VERSION,
4364 "architecture": (platform.architecture()[0], platform.machine()),
4365 "name": cluster.cluster_name,
4366 "master": cluster.master_node,
4367 "default_hypervisor": cluster.enabled_hypervisors[0],
4368 "enabled_hypervisors": cluster.enabled_hypervisors,
4369 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4370 for hypervisor_name in cluster.enabled_hypervisors]),
4372 "beparams": cluster.beparams,
4373 "osparams": cluster.osparams,
4374 "nicparams": cluster.nicparams,
4375 "candidate_pool_size": cluster.candidate_pool_size,
4376 "master_netdev": cluster.master_netdev,
4377 "volume_group_name": cluster.volume_group_name,
4378 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4379 "file_storage_dir": cluster.file_storage_dir,
4380 "maintain_node_health": cluster.maintain_node_health,
4381 "ctime": cluster.ctime,
4382 "mtime": cluster.mtime,
4383 "uuid": cluster.uuid,
4384 "tags": list(cluster.GetTags()),
4385 "uid_pool": cluster.uid_pool,
4386 "default_iallocator": cluster.default_iallocator,
4387 "reserved_lvs": cluster.reserved_lvs,
4388 "primary_ip_version": primary_ip_version,
4389 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4395 class LUQueryConfigValues(NoHooksLU):
4396 """Return configuration values.
4399 _OP_PARAMS = [_POutputFields]
4401 _FIELDS_DYNAMIC = utils.FieldSet()
4402 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4403 "watcher_pause", "volume_group_name")
4405 def CheckArguments(self):
4406 _CheckOutputFields(static=self._FIELDS_STATIC,
4407 dynamic=self._FIELDS_DYNAMIC,
4408 selected=self.op.output_fields)
4410 def ExpandNames(self):
4411 self.needed_locks = {}
4413 def Exec(self, feedback_fn):
4414 """Dump a representation of the cluster config to the standard output.
4418 for field in self.op.output_fields:
4419 if field == "cluster_name":
4420 entry = self.cfg.GetClusterName()
4421 elif field == "master_node":
4422 entry = self.cfg.GetMasterNode()
4423 elif field == "drain_flag":
4424 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4425 elif field == "watcher_pause":
4426 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4427 elif field == "volume_group_name":
4428 entry = self.cfg.GetVGName()
4430 raise errors.ParameterError(field)
4431 values.append(entry)
4435 class LUActivateInstanceDisks(NoHooksLU):
4436 """Bring up an instance's disks.
4441 ("ignore_size", False, ht.TBool),
4445 def ExpandNames(self):
4446 self._ExpandAndLockInstance()
4447 self.needed_locks[locking.LEVEL_NODE] = []
4448 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4450 def DeclareLocks(self, level):
4451 if level == locking.LEVEL_NODE:
4452 self._LockInstancesNodes()
4454 def CheckPrereq(self):
4455 """Check prerequisites.
4457 This checks that the instance is in the cluster.
4460 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4461 assert self.instance is not None, \
4462 "Cannot retrieve locked instance %s" % self.op.instance_name
4463 _CheckNodeOnline(self, self.instance.primary_node)
4465 def Exec(self, feedback_fn):
4466 """Activate the disks.
4469 disks_ok, disks_info = \
4470 _AssembleInstanceDisks(self, self.instance,
4471 ignore_size=self.op.ignore_size)
4473 raise errors.OpExecError("Cannot activate block devices")
4478 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4480 """Prepare the block devices for an instance.
4482 This sets up the block devices on all nodes.
4484 @type lu: L{LogicalUnit}
4485 @param lu: the logical unit on whose behalf we execute
4486 @type instance: L{objects.Instance}
4487 @param instance: the instance for whose disks we assemble
4488 @type disks: list of L{objects.Disk} or None
4489 @param disks: which disks to assemble (or all, if None)
4490 @type ignore_secondaries: boolean
4491 @param ignore_secondaries: if true, errors on secondary nodes
4492 won't result in an error return from the function
4493 @type ignore_size: boolean
4494 @param ignore_size: if true, the current known size of the disk
4495 will not be used during the disk activation, useful for cases
4496 when the size is wrong
4497 @return: False if the operation failed, otherwise a list of
4498 (host, instance_visible_name, node_visible_name)
4499 with the mapping from node devices to instance devices
4504 iname = instance.name
4505 disks = _ExpandCheckDisks(instance, disks)
4507 # With the two passes mechanism we try to reduce the window of
4508 # opportunity for the race condition of switching DRBD to primary
4509 # before handshaking occured, but we do not eliminate it
4511 # The proper fix would be to wait (with some limits) until the
4512 # connection has been made and drbd transitions from WFConnection
4513 # into any other network-connected state (Connected, SyncTarget,
4516 # 1st pass, assemble on all nodes in secondary mode
4517 for inst_disk in disks:
4518 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4520 node_disk = node_disk.Copy()
4521 node_disk.UnsetSize()
4522 lu.cfg.SetDiskID(node_disk, node)
4523 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
4524 msg = result.fail_msg
4526 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4527 " (is_primary=False, pass=1): %s",
4528 inst_disk.iv_name, node, msg)
4529 if not ignore_secondaries:
4532 # FIXME: race condition on drbd migration to primary
4534 # 2nd pass, do only the primary node
4535 for inst_disk in disks:
4538 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4539 if node != instance.primary_node:
4542 node_disk = node_disk.Copy()
4543 node_disk.UnsetSize()
4544 lu.cfg.SetDiskID(node_disk, node)
4545 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
4546 msg = result.fail_msg
4548 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4549 " (is_primary=True, pass=2): %s",
4550 inst_disk.iv_name, node, msg)
4553 dev_path = result.payload
4555 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4557 # leave the disks configured for the primary node
4558 # this is a workaround that would be fixed better by
4559 # improving the logical/physical id handling
4561 lu.cfg.SetDiskID(disk, instance.primary_node)
4563 return disks_ok, device_info
4566 def _StartInstanceDisks(lu, instance, force):
4567 """Start the disks of an instance.
4570 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4571 ignore_secondaries=force)
4573 _ShutdownInstanceDisks(lu, instance)
4574 if force is not None and not force:
4575 lu.proc.LogWarning("", hint="If the message above refers to a"
4577 " you can retry the operation using '--force'.")
4578 raise errors.OpExecError("Disk consistency error")
4581 class LUDeactivateInstanceDisks(NoHooksLU):
4582 """Shutdown an instance's disks.
4590 def ExpandNames(self):
4591 self._ExpandAndLockInstance()
4592 self.needed_locks[locking.LEVEL_NODE] = []
4593 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4595 def DeclareLocks(self, level):
4596 if level == locking.LEVEL_NODE:
4597 self._LockInstancesNodes()
4599 def CheckPrereq(self):
4600 """Check prerequisites.
4602 This checks that the instance is in the cluster.
4605 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4606 assert self.instance is not None, \
4607 "Cannot retrieve locked instance %s" % self.op.instance_name
4609 def Exec(self, feedback_fn):
4610 """Deactivate the disks
4613 instance = self.instance
4614 _SafeShutdownInstanceDisks(self, instance)
4617 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4618 """Shutdown block devices of an instance.
4620 This function checks if an instance is running, before calling
4621 _ShutdownInstanceDisks.
4624 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4625 _ShutdownInstanceDisks(lu, instance, disks=disks)
4628 def _ExpandCheckDisks(instance, disks):
4629 """Return the instance disks selected by the disks list
4631 @type disks: list of L{objects.Disk} or None
4632 @param disks: selected disks
4633 @rtype: list of L{objects.Disk}
4634 @return: selected instance disks to act on
4638 return instance.disks
4640 if not set(disks).issubset(instance.disks):
4641 raise errors.ProgrammerError("Can only act on disks belonging to the"
4646 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4647 """Shutdown block devices of an instance.
4649 This does the shutdown on all nodes of the instance.
4651 If the ignore_primary is false, errors on the primary node are
4656 disks = _ExpandCheckDisks(instance, disks)
4659 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4660 lu.cfg.SetDiskID(top_disk, node)
4661 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4662 msg = result.fail_msg
4664 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4665 disk.iv_name, node, msg)
4666 if not ignore_primary or node != instance.primary_node:
4671 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4672 """Checks if a node has enough free memory.
4674 This function check if a given node has the needed amount of free
4675 memory. In case the node has less memory or we cannot get the
4676 information from the node, this function raise an OpPrereqError
4679 @type lu: C{LogicalUnit}
4680 @param lu: a logical unit from which we get configuration data
4682 @param node: the node to check
4683 @type reason: C{str}
4684 @param reason: string to use in the error message
4685 @type requested: C{int}
4686 @param requested: the amount of memory in MiB to check for
4687 @type hypervisor_name: C{str}
4688 @param hypervisor_name: the hypervisor to ask for memory stats
4689 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4690 we cannot check the node
4693 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4694 nodeinfo[node].Raise("Can't get data from node %s" % node,
4695 prereq=True, ecode=errors.ECODE_ENVIRON)
4696 free_mem = nodeinfo[node].payload.get('memory_free', None)
4697 if not isinstance(free_mem, int):
4698 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4699 " was '%s'" % (node, free_mem),
4700 errors.ECODE_ENVIRON)
4701 if requested > free_mem:
4702 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4703 " needed %s MiB, available %s MiB" %
4704 (node, reason, requested, free_mem),
4708 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
4709 """Checks if nodes have enough free disk space in the all VGs.
4711 This function check if all given nodes have the needed amount of
4712 free disk. In case any node has less disk or we cannot get the
4713 information from the node, this function raise an OpPrereqError
4716 @type lu: C{LogicalUnit}
4717 @param lu: a logical unit from which we get configuration data
4718 @type nodenames: C{list}
4719 @param nodenames: the list of node names to check
4720 @type req_sizes: C{dict}
4721 @param req_sizes: the hash of vg and corresponding amount of disk in
4723 @raise errors.OpPrereqError: if the node doesn't have enough disk,
4724 or we cannot check the node
4727 if req_sizes is not None:
4728 for vg, req_size in req_sizes.iteritems():
4729 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
4732 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
4733 """Checks if nodes have enough free disk space in the specified VG.
4735 This function check if all given nodes have the needed amount of
4736 free disk. In case any node has less disk or we cannot get the
4737 information from the node, this function raise an OpPrereqError
4740 @type lu: C{LogicalUnit}
4741 @param lu: a logical unit from which we get configuration data
4742 @type nodenames: C{list}
4743 @param nodenames: the list of node names to check
4745 @param vg: the volume group to check
4746 @type requested: C{int}
4747 @param requested: the amount of disk in MiB to check for
4748 @raise errors.OpPrereqError: if the node doesn't have enough disk,
4749 or we cannot check the node
4752 nodeinfo = lu.rpc.call_node_info(nodenames, vg,
4753 lu.cfg.GetHypervisorType())
4754 for node in nodenames:
4755 info = nodeinfo[node]
4756 info.Raise("Cannot get current information from node %s" % node,
4757 prereq=True, ecode=errors.ECODE_ENVIRON)
4758 vg_free = info.payload.get("vg_free", None)
4759 if not isinstance(vg_free, int):
4760 raise errors.OpPrereqError("Can't compute free disk space on node"
4761 " %s for vg %s, result was '%s'" %
4762 (node, vg, vg_free), errors.ECODE_ENVIRON)
4763 if requested > vg_free:
4764 raise errors.OpPrereqError("Not enough disk space on target node %s"
4765 " vg %s: required %d MiB, available %d MiB" %
4766 (node, vg, requested, vg_free),
4770 def _CheckNodesFreeDisk(lu, nodenames, requested):
4771 """Checks if nodes have enough free disk space in the default VG.
4773 This function check if all given nodes have the needed amount of
4774 free disk. In case any node has less disk or we cannot get the
4775 information from the node, this function raise an OpPrereqError
4778 @type lu: C{LogicalUnit}
4779 @param lu: a logical unit from which we get configuration data
4780 @type nodenames: C{list}
4781 @param nodenames: the list of node names to check
4782 @type requested: C{int}
4783 @param requested: the amount of disk in MiB to check for
4784 @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4785 we cannot check the node
4788 nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4789 lu.cfg.GetHypervisorType())
4790 for node in nodenames:
4791 info = nodeinfo[node]
4792 info.Raise("Cannot get current information from node %s" % node,
4793 prereq=True, ecode=errors.ECODE_ENVIRON)
4794 vg_free = info.payload.get("vg_free", None)
4795 if not isinstance(vg_free, int):
4796 raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4797 " result was '%s'" % (node, vg_free),
4798 errors.ECODE_ENVIRON)
4799 if requested > vg_free:
4800 raise errors.OpPrereqError("Not enough disk space on target node %s:"
4801 " required %d MiB, available %d MiB" %
4802 (node, requested, vg_free),
4806 class LUStartupInstance(LogicalUnit):
4807 """Starts an instance.
4810 HPATH = "instance-start"
4811 HTYPE = constants.HTYPE_INSTANCE
4815 _PIgnoreOfflineNodes,
4816 ("hvparams", ht.EmptyDict, ht.TDict),
4817 ("beparams", ht.EmptyDict, ht.TDict),
4821 def CheckArguments(self):
4823 if self.op.beparams:
4824 # fill the beparams dict
4825 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4827 def ExpandNames(self):
4828 self._ExpandAndLockInstance()
4830 def BuildHooksEnv(self):
4833 This runs on master, primary and secondary nodes of the instance.
4837 "FORCE": self.op.force,
4839 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4840 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4843 def CheckPrereq(self):
4844 """Check prerequisites.
4846 This checks that the instance is in the cluster.
4849 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4850 assert self.instance is not None, \
4851 "Cannot retrieve locked instance %s" % self.op.instance_name
4854 if self.op.hvparams:
4855 # check hypervisor parameter syntax (locally)
4856 cluster = self.cfg.GetClusterInfo()
4857 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4858 filled_hvp = cluster.FillHV(instance)
4859 filled_hvp.update(self.op.hvparams)
4860 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4861 hv_type.CheckParameterSyntax(filled_hvp)
4862 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4864 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
4866 if self.primary_offline and self.op.ignore_offline_nodes:
4867 self.proc.LogWarning("Ignoring offline primary node")
4869 if self.op.hvparams or self.op.beparams:
4870 self.proc.LogWarning("Overridden parameters are ignored")
4872 _CheckNodeOnline(self, instance.primary_node)
4874 bep = self.cfg.GetClusterInfo().FillBE(instance)
4876 # check bridges existence
4877 _CheckInstanceBridgesExist(self, instance)
4879 remote_info = self.rpc.call_instance_info(instance.primary_node,
4881 instance.hypervisor)
4882 remote_info.Raise("Error checking node %s" % instance.primary_node,
4883 prereq=True, ecode=errors.ECODE_ENVIRON)
4884 if not remote_info.payload: # not running already
4885 _CheckNodeFreeMemory(self, instance.primary_node,
4886 "starting instance %s" % instance.name,
4887 bep[constants.BE_MEMORY], instance.hypervisor)
4889 def Exec(self, feedback_fn):
4890 """Start the instance.
4893 instance = self.instance
4894 force = self.op.force
4896 self.cfg.MarkInstanceUp(instance.name)
4898 if self.primary_offline:
4899 assert self.op.ignore_offline_nodes
4900 self.proc.LogInfo("Primary node offline, marked instance as started")
4902 node_current = instance.primary_node
4904 _StartInstanceDisks(self, instance, force)
4906 result = self.rpc.call_instance_start(node_current, instance,
4907 self.op.hvparams, self.op.beparams)
4908 msg = result.fail_msg
4910 _ShutdownInstanceDisks(self, instance)
4911 raise errors.OpExecError("Could not start instance: %s" % msg)
4914 class LURebootInstance(LogicalUnit):
4915 """Reboot an instance.
4918 HPATH = "instance-reboot"
4919 HTYPE = constants.HTYPE_INSTANCE
4922 ("ignore_secondaries", False, ht.TBool),
4923 ("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES)),
4928 def ExpandNames(self):
4929 self._ExpandAndLockInstance()
4931 def BuildHooksEnv(self):
4934 This runs on master, primary and secondary nodes of the instance.
4938 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4939 "REBOOT_TYPE": self.op.reboot_type,
4940 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
4942 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4943 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4946 def CheckPrereq(self):
4947 """Check prerequisites.
4949 This checks that the instance is in the cluster.
4952 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4953 assert self.instance is not None, \
4954 "Cannot retrieve locked instance %s" % self.op.instance_name
4956 _CheckNodeOnline(self, instance.primary_node)
4958 # check bridges existence
4959 _CheckInstanceBridgesExist(self, instance)
4961 def Exec(self, feedback_fn):
4962 """Reboot the instance.
4965 instance = self.instance
4966 ignore_secondaries = self.op.ignore_secondaries
4967 reboot_type = self.op.reboot_type
4969 node_current = instance.primary_node
4971 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4972 constants.INSTANCE_REBOOT_HARD]:
4973 for disk in instance.disks:
4974 self.cfg.SetDiskID(disk, node_current)
4975 result = self.rpc.call_instance_reboot(node_current, instance,
4977 self.op.shutdown_timeout)
4978 result.Raise("Could not reboot instance")
4980 result = self.rpc.call_instance_shutdown(node_current, instance,
4981 self.op.shutdown_timeout)
4982 result.Raise("Could not shutdown instance for full reboot")
4983 _ShutdownInstanceDisks(self, instance)
4984 _StartInstanceDisks(self, instance, ignore_secondaries)
4985 result = self.rpc.call_instance_start(node_current, instance, None, None)
4986 msg = result.fail_msg
4988 _ShutdownInstanceDisks(self, instance)
4989 raise errors.OpExecError("Could not start instance for"
4990 " full reboot: %s" % msg)
4992 self.cfg.MarkInstanceUp(instance.name)
4995 class LUShutdownInstance(LogicalUnit):
4996 """Shutdown an instance.
4999 HPATH = "instance-stop"
5000 HTYPE = constants.HTYPE_INSTANCE
5003 _PIgnoreOfflineNodes,
5004 ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt),
5008 def ExpandNames(self):
5009 self._ExpandAndLockInstance()
5011 def BuildHooksEnv(self):
5014 This runs on master, primary and secondary nodes of the instance.
5017 env = _BuildInstanceHookEnvByObject(self, self.instance)
5018 env["TIMEOUT"] = self.op.timeout
5019 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5022 def CheckPrereq(self):
5023 """Check prerequisites.
5025 This checks that the instance is in the cluster.
5028 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5029 assert self.instance is not None, \
5030 "Cannot retrieve locked instance %s" % self.op.instance_name
5032 self.primary_offline = \
5033 self.cfg.GetNodeInfo(self.instance.primary_node).offline
5035 if self.primary_offline and self.op.ignore_offline_nodes:
5036 self.proc.LogWarning("Ignoring offline primary node")
5038 _CheckNodeOnline(self, self.instance.primary_node)
5040 def Exec(self, feedback_fn):
5041 """Shutdown the instance.
5044 instance = self.instance
5045 node_current = instance.primary_node
5046 timeout = self.op.timeout
5048 self.cfg.MarkInstanceDown(instance.name)
5050 if self.primary_offline:
5051 assert self.op.ignore_offline_nodes
5052 self.proc.LogInfo("Primary node offline, marked instance as stopped")
5054 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5055 msg = result.fail_msg
5057 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5059 _ShutdownInstanceDisks(self, instance)
5062 class LUReinstallInstance(LogicalUnit):
5063 """Reinstall an instance.
5066 HPATH = "instance-reinstall"
5067 HTYPE = constants.HTYPE_INSTANCE
5070 ("os_type", None, ht.TMaybeString),
5071 ("force_variant", False, ht.TBool),
5072 ("osparams", None, ht.TOr(ht.TDict, ht.TNone)),
5076 def ExpandNames(self):
5077 self._ExpandAndLockInstance()
5079 def BuildHooksEnv(self):
5082 This runs on master, primary and secondary nodes of the instance.
5085 env = _BuildInstanceHookEnvByObject(self, self.instance)
5086 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5089 def CheckPrereq(self):
5090 """Check prerequisites.
5092 This checks that the instance is in the cluster and is not running.
5095 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5096 assert instance is not None, \
5097 "Cannot retrieve locked instance %s" % self.op.instance_name
5098 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5099 " offline, cannot reinstall")
5100 for node in instance.secondary_nodes:
5101 _CheckNodeOnline(self, node, "Instance secondary node offline,"
5102 " cannot reinstall")
5104 if instance.disk_template == constants.DT_DISKLESS:
5105 raise errors.OpPrereqError("Instance '%s' has no disks" %
5106 self.op.instance_name,
5108 _CheckInstanceDown(self, instance, "cannot reinstall")
5110 if self.op.os_type is not None:
5112 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5113 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5114 instance_os = self.op.os_type
5116 instance_os = instance.os
5118 nodelist = list(instance.all_nodes)
5120 if self.op.osparams:
5121 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5122 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5123 self.os_inst = i_osdict # the new dict (without defaults)
5127 self.instance = instance
5129 def Exec(self, feedback_fn):
5130 """Reinstall the instance.
5133 inst = self.instance
5135 if self.op.os_type is not None:
5136 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5137 inst.os = self.op.os_type
5138 # Write to configuration
5139 self.cfg.Update(inst, feedback_fn)
5141 _StartInstanceDisks(self, inst, None)
5143 feedback_fn("Running the instance OS create scripts...")
5144 # FIXME: pass debug option from opcode to backend
5145 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5146 self.op.debug_level,
5147 osparams=self.os_inst)
5148 result.Raise("Could not install OS for instance %s on node %s" %
5149 (inst.name, inst.primary_node))
5151 _ShutdownInstanceDisks(self, inst)
5154 class LURecreateInstanceDisks(LogicalUnit):
5155 """Recreate an instance's missing disks.
5158 HPATH = "instance-recreate-disks"
5159 HTYPE = constants.HTYPE_INSTANCE
5162 ("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt)),
5166 def ExpandNames(self):
5167 self._ExpandAndLockInstance()
5169 def BuildHooksEnv(self):
5172 This runs on master, primary and secondary nodes of the instance.
5175 env = _BuildInstanceHookEnvByObject(self, self.instance)
5176 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5179 def CheckPrereq(self):
5180 """Check prerequisites.
5182 This checks that the instance is in the cluster and is not running.
5185 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5186 assert instance is not None, \
5187 "Cannot retrieve locked instance %s" % self.op.instance_name
5188 _CheckNodeOnline(self, instance.primary_node)
5190 if instance.disk_template == constants.DT_DISKLESS:
5191 raise errors.OpPrereqError("Instance '%s' has no disks" %
5192 self.op.instance_name, errors.ECODE_INVAL)
5193 _CheckInstanceDown(self, instance, "cannot recreate disks")
5195 if not self.op.disks:
5196 self.op.disks = range(len(instance.disks))
5198 for idx in self.op.disks:
5199 if idx >= len(instance.disks):
5200 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5203 self.instance = instance
5205 def Exec(self, feedback_fn):
5206 """Recreate the disks.
5210 for idx, _ in enumerate(self.instance.disks):
5211 if idx not in self.op.disks: # disk idx has not been passed in
5215 _CreateDisks(self, self.instance, to_skip=to_skip)
5218 class LURenameInstance(LogicalUnit):
5219 """Rename an instance.
5222 HPATH = "instance-rename"
5223 HTYPE = constants.HTYPE_INSTANCE
5226 ("new_name", ht.NoDefault, ht.TNonEmptyString),
5227 ("ip_check", False, ht.TBool),
5228 ("name_check", True, ht.TBool),
5231 def CheckArguments(self):
5235 if self.op.ip_check and not self.op.name_check:
5236 # TODO: make the ip check more flexible and not depend on the name check
5237 raise errors.OpPrereqError("Cannot do ip check without a name check",
5240 def BuildHooksEnv(self):
5243 This runs on master, primary and secondary nodes of the instance.
5246 env = _BuildInstanceHookEnvByObject(self, self.instance)
5247 env["INSTANCE_NEW_NAME"] = self.op.new_name
5248 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5251 def CheckPrereq(self):
5252 """Check prerequisites.
5254 This checks that the instance is in the cluster and is not running.
5257 self.op.instance_name = _ExpandInstanceName(self.cfg,
5258 self.op.instance_name)
5259 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5260 assert instance is not None
5261 _CheckNodeOnline(self, instance.primary_node)
5262 _CheckInstanceDown(self, instance, "cannot rename")
5263 self.instance = instance
5265 new_name = self.op.new_name
5266 if self.op.name_check:
5267 hostname = netutils.GetHostname(name=new_name)
5268 new_name = self.op.new_name = hostname.name
5269 if (self.op.ip_check and
5270 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5271 raise errors.OpPrereqError("IP %s of instance %s already in use" %
5272 (hostname.ip, new_name),
5273 errors.ECODE_NOTUNIQUE)
5275 instance_list = self.cfg.GetInstanceList()
5276 if new_name in instance_list:
5277 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5278 new_name, errors.ECODE_EXISTS)
5280 def Exec(self, feedback_fn):
5281 """Reinstall the instance.
5284 inst = self.instance
5285 old_name = inst.name
5287 if inst.disk_template == constants.DT_FILE:
5288 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5290 self.cfg.RenameInstance(inst.name, self.op.new_name)
5291 # Change the instance lock. This is definitely safe while we hold the BGL
5292 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5293 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5295 # re-read the instance from the configuration after rename
5296 inst = self.cfg.GetInstanceInfo(self.op.new_name)
5298 if inst.disk_template == constants.DT_FILE:
5299 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5300 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5301 old_file_storage_dir,
5302 new_file_storage_dir)
5303 result.Raise("Could not rename on node %s directory '%s' to '%s'"
5304 " (but the instance has been renamed in Ganeti)" %
5305 (inst.primary_node, old_file_storage_dir,
5306 new_file_storage_dir))
5308 _StartInstanceDisks(self, inst, None)
5310 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5311 old_name, self.op.debug_level)
5312 msg = result.fail_msg
5314 msg = ("Could not run OS rename script for instance %s on node %s"
5315 " (but the instance has been renamed in Ganeti): %s" %
5316 (inst.name, inst.primary_node, msg))
5317 self.proc.LogWarning(msg)
5319 _ShutdownInstanceDisks(self, inst)
5324 class LURemoveInstance(LogicalUnit):
5325 """Remove an instance.
5328 HPATH = "instance-remove"
5329 HTYPE = constants.HTYPE_INSTANCE
5332 ("ignore_failures", False, ht.TBool),
5337 def ExpandNames(self):
5338 self._ExpandAndLockInstance()
5339 self.needed_locks[locking.LEVEL_NODE] = []
5340 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5342 def DeclareLocks(self, level):
5343 if level == locking.LEVEL_NODE:
5344 self._LockInstancesNodes()
5346 def BuildHooksEnv(self):
5349 This runs on master, primary and secondary nodes of the instance.
5352 env = _BuildInstanceHookEnvByObject(self, self.instance)
5353 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5354 nl = [self.cfg.GetMasterNode()]
5355 nl_post = list(self.instance.all_nodes) + nl
5356 return env, nl, nl_post
5358 def CheckPrereq(self):
5359 """Check prerequisites.
5361 This checks that the instance is in the cluster.
5364 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5365 assert self.instance is not None, \
5366 "Cannot retrieve locked instance %s" % self.op.instance_name
5368 def Exec(self, feedback_fn):
5369 """Remove the instance.
5372 instance = self.instance
5373 logging.info("Shutting down instance %s on node %s",
5374 instance.name, instance.primary_node)
5376 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5377 self.op.shutdown_timeout)
5378 msg = result.fail_msg
5380 if self.op.ignore_failures:
5381 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5383 raise errors.OpExecError("Could not shutdown instance %s on"
5385 (instance.name, instance.primary_node, msg))
5387 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5390 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5391 """Utility function to remove an instance.
5394 logging.info("Removing block devices for instance %s", instance.name)
5396 if not _RemoveDisks(lu, instance):
5397 if not ignore_failures:
5398 raise errors.OpExecError("Can't remove instance's disks")
5399 feedback_fn("Warning: can't remove instance's disks")
5401 logging.info("Removing instance %s out of cluster config", instance.name)
5403 lu.cfg.RemoveInstance(instance.name)
5405 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5406 "Instance lock removal conflict"
5408 # Remove lock for the instance
5409 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5412 class LUQueryInstances(NoHooksLU):
5413 """Logical unit for querying instances.
5416 # pylint: disable-msg=W0142
5419 ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
5420 ("use_locking", False, ht.TBool),
5423 _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
5424 "serial_no", "ctime", "mtime", "uuid"]
5425 _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
5427 "disk_template", "ip", "mac", "bridge",
5428 "nic_mode", "nic_link",
5429 "sda_size", "sdb_size", "vcpus", "tags",
5430 "network_port", "beparams",
5431 r"(disk)\.(size)/([0-9]+)",
5432 r"(disk)\.(sizes)", "disk_usage",
5433 r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
5434 r"(nic)\.(bridge)/([0-9]+)",
5435 r"(nic)\.(macs|ips|modes|links|bridges)",
5436 r"(disk|nic)\.(count)",
5437 "hvparams", "custom_hvparams",
5438 "custom_beparams", "custom_nicparams",
5439 ] + _SIMPLE_FIELDS +
5441 for name in constants.HVS_PARAMETERS
5442 if name not in constants.HVC_GLOBALS] +
5444 for name in constants.BES_PARAMETERS])
5445 _FIELDS_DYNAMIC = utils.FieldSet("oper_state",
5451 def CheckArguments(self):
5452 _CheckOutputFields(static=self._FIELDS_STATIC,
5453 dynamic=self._FIELDS_DYNAMIC,
5454 selected=self.op.output_fields)
5456 def ExpandNames(self):
5457 self.needed_locks = {}
5458 self.share_locks[locking.LEVEL_INSTANCE] = 1
5459 self.share_locks[locking.LEVEL_NODE] = 1
5462 self.wanted = _GetWantedInstances(self, self.op.names)
5464 self.wanted = locking.ALL_SET
5466 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
5467 self.do_locking = self.do_node_query and self.op.use_locking
5469 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5470 self.needed_locks[locking.LEVEL_NODE] = []
5471 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5473 def DeclareLocks(self, level):
5474 if level == locking.LEVEL_NODE and self.do_locking:
5475 self._LockInstancesNodes()
5477 def Exec(self, feedback_fn):
5478 """Computes the list of nodes and their attributes.
5481 # pylint: disable-msg=R0912
5482 # way too many branches here
5483 all_info = self.cfg.GetAllInstancesInfo()
5484 if self.wanted == locking.ALL_SET:
5485 # caller didn't specify instance names, so ordering is not important
5487 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5489 instance_names = all_info.keys()
5490 instance_names = utils.NiceSort(instance_names)
5492 # caller did specify names, so we must keep the ordering
5494 tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
5496 tgt_set = all_info.keys()
5497 missing = set(self.wanted).difference(tgt_set)
5499 raise errors.OpExecError("Some instances were removed before"
5500 " retrieving their data: %s" % missing)
5501 instance_names = self.wanted
5503 instance_list = [all_info[iname] for iname in instance_names]
5505 # begin data gathering
5507 nodes = frozenset([inst.primary_node for inst in instance_list])
5508 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5512 if self.do_node_query:
5514 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
5516 result = node_data[name]
5518 # offline nodes will be in both lists
5519 off_nodes.append(name)
5521 bad_nodes.append(name)
5524 live_data.update(result.payload)
5525 # else no instance is alive
5527 live_data = dict([(name, {}) for name in instance_names])
5529 # end data gathering
5534 cluster = self.cfg.GetClusterInfo()
5535 for instance in instance_list:
5537 i_hv = cluster.FillHV(instance, skip_globals=True)
5538 i_be = cluster.FillBE(instance)
5539 i_nicp = [cluster.SimpleFillNIC(nic.nicparams) for nic in instance.nics]
5540 for field in self.op.output_fields:
5541 st_match = self._FIELDS_STATIC.Matches(field)
5542 if field in self._SIMPLE_FIELDS:
5543 val = getattr(instance, field)
5544 elif field == "pnode":
5545 val = instance.primary_node
5546 elif field == "snodes":
5547 val = list(instance.secondary_nodes)
5548 elif field == "admin_state":
5549 val = instance.admin_up
5550 elif field == "oper_state":
5551 if instance.primary_node in bad_nodes:
5554 val = bool(live_data.get(instance.name))
5555 elif field == "status":
5556 if instance.primary_node in off_nodes:
5557 val = "ERROR_nodeoffline"
5558 elif instance.primary_node in bad_nodes:
5559 val = "ERROR_nodedown"
5561 running = bool(live_data.get(instance.name))
5563 if instance.admin_up:
5568 if instance.admin_up:
5572 elif field == "oper_ram":
5573 if instance.primary_node in bad_nodes:
5575 elif instance.name in live_data:
5576 val = live_data[instance.name].get("memory", "?")
5579 elif field == "oper_vcpus":
5580 if instance.primary_node in bad_nodes:
5582 elif instance.name in live_data:
5583 val = live_data[instance.name].get("vcpus", "?")
5586 elif field == "vcpus":
5587 val = i_be[constants.BE_VCPUS]
5588 elif field == "disk_template":
5589 val = instance.disk_template
5592 val = instance.nics[0].ip
5595 elif field == "nic_mode":
5597 val = i_nicp[0][constants.NIC_MODE]
5600 elif field == "nic_link":
5602 val = i_nicp[0][constants.NIC_LINK]
5605 elif field == "bridge":
5606 if (instance.nics and
5607 i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
5608 val = i_nicp[0][constants.NIC_LINK]
5611 elif field == "mac":
5613 val = instance.nics[0].mac
5616 elif field == "custom_nicparams":
5617 val = [nic.nicparams for nic in instance.nics]
5618 elif field == "sda_size" or field == "sdb_size":
5619 idx = ord(field[2]) - ord('a')
5621 val = instance.FindDisk(idx).size
5622 except errors.OpPrereqError:
5624 elif field == "disk_usage": # total disk usage per node
5625 disk_sizes = [{'size': disk.size} for disk in instance.disks]
5626 val = _ComputeDiskSize(instance.disk_template, disk_sizes)
5627 elif field == "tags":
5628 val = list(instance.GetTags())
5629 elif field == "custom_hvparams":
5630 val = instance.hvparams # not filled!
5631 elif field == "hvparams":
5633 elif (field.startswith(HVPREFIX) and
5634 field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
5635 field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
5636 val = i_hv.get(field[len(HVPREFIX):], None)
5637 elif field == "custom_beparams":
5638 val = instance.beparams
5639 elif field == "beparams":
5641 elif (field.startswith(BEPREFIX) and
5642 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
5643 val = i_be.get(field[len(BEPREFIX):], None)
5644 elif st_match and st_match.groups():
5645 # matches a variable list
5646 st_groups = st_match.groups()
5647 if st_groups and st_groups[0] == "disk":
5648 if st_groups[1] == "count":
5649 val = len(instance.disks)
5650 elif st_groups[1] == "sizes":
5651 val = [disk.size for disk in instance.disks]
5652 elif st_groups[1] == "size":
5654 val = instance.FindDisk(st_groups[2]).size
5655 except errors.OpPrereqError:
5658 assert False, "Unhandled disk parameter"
5659 elif st_groups[0] == "nic":
5660 if st_groups[1] == "count":
5661 val = len(instance.nics)
5662 elif st_groups[1] == "macs":
5663 val = [nic.mac for nic in instance.nics]
5664 elif st_groups[1] == "ips":
5665 val = [nic.ip for nic in instance.nics]
5666 elif st_groups[1] == "modes":
5667 val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
5668 elif st_groups[1] == "links":
5669 val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
5670 elif st_groups[1] == "bridges":
5673 if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
5674 val.append(nicp[constants.NIC_LINK])
5679 nic_idx = int(st_groups[2])
5680 if nic_idx >= len(instance.nics):
5683 if st_groups[1] == "mac":
5684 val = instance.nics[nic_idx].mac
5685 elif st_groups[1] == "ip":
5686 val = instance.nics[nic_idx].ip
5687 elif st_groups[1] == "mode":
5688 val = i_nicp[nic_idx][constants.NIC_MODE]
5689 elif st_groups[1] == "link":
5690 val = i_nicp[nic_idx][constants.NIC_LINK]
5691 elif st_groups[1] == "bridge":
5692 nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
5693 if nic_mode == constants.NIC_MODE_BRIDGED:
5694 val = i_nicp[nic_idx][constants.NIC_LINK]
5698 assert False, "Unhandled NIC parameter"
5700 assert False, ("Declared but unhandled variable parameter '%s'" %
5703 assert False, "Declared but unhandled parameter '%s'" % field
5710 class LUFailoverInstance(LogicalUnit):
5711 """Failover an instance.
5714 HPATH = "instance-failover"
5715 HTYPE = constants.HTYPE_INSTANCE
5718 ("ignore_consistency", False, ht.TBool),
5723 def ExpandNames(self):
5724 self._ExpandAndLockInstance()
5725 self.needed_locks[locking.LEVEL_NODE] = []
5726 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5728 def DeclareLocks(self, level):
5729 if level == locking.LEVEL_NODE:
5730 self._LockInstancesNodes()
5732 def BuildHooksEnv(self):
5735 This runs on master, primary and secondary nodes of the instance.
5738 instance = self.instance
5739 source_node = instance.primary_node
5740 target_node = instance.secondary_nodes[0]
5742 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5743 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5744 "OLD_PRIMARY": source_node,
5745 "OLD_SECONDARY": target_node,
5746 "NEW_PRIMARY": target_node,
5747 "NEW_SECONDARY": source_node,
5749 env.update(_BuildInstanceHookEnvByObject(self, instance))
5750 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5752 nl_post.append(source_node)
5753 return env, nl, nl_post
5755 def CheckPrereq(self):
5756 """Check prerequisites.
5758 This checks that the instance is in the cluster.
5761 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5762 assert self.instance is not None, \
5763 "Cannot retrieve locked instance %s" % self.op.instance_name
5765 bep = self.cfg.GetClusterInfo().FillBE(instance)
5766 if instance.disk_template not in constants.DTS_NET_MIRROR:
5767 raise errors.OpPrereqError("Instance's disk layout is not"
5768 " network mirrored, cannot failover.",
5771 secondary_nodes = instance.secondary_nodes
5772 if not secondary_nodes:
5773 raise errors.ProgrammerError("no secondary node but using "
5774 "a mirrored disk template")
5776 target_node = secondary_nodes[0]
5777 _CheckNodeOnline(self, target_node)
5778 _CheckNodeNotDrained(self, target_node)
5779 if instance.admin_up:
5780 # check memory requirements on the secondary node
5781 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5782 instance.name, bep[constants.BE_MEMORY],
5783 instance.hypervisor)
5785 self.LogInfo("Not checking memory on the secondary node as"
5786 " instance will not be started")
5788 # check bridge existance
5789 _CheckInstanceBridgesExist(self, instance, node=target_node)
5791 def Exec(self, feedback_fn):
5792 """Failover an instance.
5794 The failover is done by shutting it down on its present node and
5795 starting it on the secondary.
5798 instance = self.instance
5799 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5801 source_node = instance.primary_node
5802 target_node = instance.secondary_nodes[0]
5804 if instance.admin_up:
5805 feedback_fn("* checking disk consistency between source and target")
5806 for dev in instance.disks:
5807 # for drbd, these are drbd over lvm
5808 if not _CheckDiskConsistency(self, dev, target_node, False):
5809 if not self.op.ignore_consistency:
5810 raise errors.OpExecError("Disk %s is degraded on target node,"
5811 " aborting failover." % dev.iv_name)
5813 feedback_fn("* not checking disk consistency as instance is not running")
5815 feedback_fn("* shutting down instance on source node")
5816 logging.info("Shutting down instance %s on node %s",
5817 instance.name, source_node)
5819 result = self.rpc.call_instance_shutdown(source_node, instance,
5820 self.op.shutdown_timeout)
5821 msg = result.fail_msg
5823 if self.op.ignore_consistency or primary_node.offline:
5824 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5825 " Proceeding anyway. Please make sure node"
5826 " %s is down. Error details: %s",
5827 instance.name, source_node, source_node, msg)
5829 raise errors.OpExecError("Could not shutdown instance %s on"
5831 (instance.name, source_node, msg))
5833 feedback_fn("* deactivating the instance's disks on source node")
5834 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5835 raise errors.OpExecError("Can't shut down the instance's disks.")
5837 instance.primary_node = target_node
5838 # distribute new instance config to the other nodes
5839 self.cfg.Update(instance, feedback_fn)
5841 # Only start the instance if it's marked as up
5842 if instance.admin_up:
5843 feedback_fn("* activating the instance's disks on target node")
5844 logging.info("Starting instance %s on node %s",
5845 instance.name, target_node)
5847 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5848 ignore_secondaries=True)
5850 _ShutdownInstanceDisks(self, instance)
5851 raise errors.OpExecError("Can't activate the instance's disks")
5853 feedback_fn("* starting the instance on the target node")
5854 result = self.rpc.call_instance_start(target_node, instance, None, None)
5855 msg = result.fail_msg
5857 _ShutdownInstanceDisks(self, instance)
5858 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5859 (instance.name, target_node, msg))
5862 class LUMigrateInstance(LogicalUnit):
5863 """Migrate an instance.
5865 This is migration without shutting down, compared to the failover,
5866 which is done with shutdown.
5869 HPATH = "instance-migrate"
5870 HTYPE = constants.HTYPE_INSTANCE
5875 ("cleanup", False, ht.TBool),
5880 def ExpandNames(self):
5881 self._ExpandAndLockInstance()
5883 self.needed_locks[locking.LEVEL_NODE] = []
5884 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5886 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5888 self.tasklets = [self._migrater]
5890 def DeclareLocks(self, level):
5891 if level == locking.LEVEL_NODE:
5892 self._LockInstancesNodes()
5894 def BuildHooksEnv(self):
5897 This runs on master, primary and secondary nodes of the instance.
5900 instance = self._migrater.instance
5901 source_node = instance.primary_node
5902 target_node = instance.secondary_nodes[0]
5903 env = _BuildInstanceHookEnvByObject(self, instance)
5904 env["MIGRATE_LIVE"] = self._migrater.live
5905 env["MIGRATE_CLEANUP"] = self.op.cleanup
5907 "OLD_PRIMARY": source_node,
5908 "OLD_SECONDARY": target_node,
5909 "NEW_PRIMARY": target_node,
5910 "NEW_SECONDARY": source_node,
5912 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5914 nl_post.append(source_node)
5915 return env, nl, nl_post
5918 class LUMoveInstance(LogicalUnit):
5919 """Move an instance by data-copying.
5922 HPATH = "instance-move"
5923 HTYPE = constants.HTYPE_INSTANCE
5926 ("target_node", ht.NoDefault, ht.TNonEmptyString),
5931 def ExpandNames(self):
5932 self._ExpandAndLockInstance()
5933 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5934 self.op.target_node = target_node
5935 self.needed_locks[locking.LEVEL_NODE] = [target_node]
5936 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5938 def DeclareLocks(self, level):
5939 if level == locking.LEVEL_NODE:
5940 self._LockInstancesNodes(primary_only=True)
5942 def BuildHooksEnv(self):
5945 This runs on master, primary and secondary nodes of the instance.
5949 "TARGET_NODE": self.op.target_node,
5950 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5952 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5953 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5954 self.op.target_node]
5957 def CheckPrereq(self):
5958 """Check prerequisites.
5960 This checks that the instance is in the cluster.
5963 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5964 assert self.instance is not None, \
5965 "Cannot retrieve locked instance %s" % self.op.instance_name
5967 node = self.cfg.GetNodeInfo(self.op.target_node)
5968 assert node is not None, \
5969 "Cannot retrieve locked node %s" % self.op.target_node
5971 self.target_node = target_node = node.name
5973 if target_node == instance.primary_node:
5974 raise errors.OpPrereqError("Instance %s is already on the node %s" %
5975 (instance.name, target_node),
5978 bep = self.cfg.GetClusterInfo().FillBE(instance)
5980 for idx, dsk in enumerate(instance.disks):
5981 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5982 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5983 " cannot copy" % idx, errors.ECODE_STATE)
5985 _CheckNodeOnline(self, target_node)
5986 _CheckNodeNotDrained(self, target_node)
5987 _CheckNodeVmCapable(self, target_node)
5989 if instance.admin_up:
5990 # check memory requirements on the secondary node
5991 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5992 instance.name, bep[constants.BE_MEMORY],
5993 instance.hypervisor)
5995 self.LogInfo("Not checking memory on the secondary node as"
5996 " instance will not be started")
5998 # check bridge existance
5999 _CheckInstanceBridgesExist(self, instance, node=target_node)
6001 def Exec(self, feedback_fn):
6002 """Move an instance.
6004 The move is done by shutting it down on its present node, copying
6005 the data over (slow) and starting it on the new node.
6008 instance = self.instance
6010 source_node = instance.primary_node
6011 target_node = self.target_node
6013 self.LogInfo("Shutting down instance %s on source node %s",
6014 instance.name, source_node)
6016 result = self.rpc.call_instance_shutdown(source_node, instance,
6017 self.op.shutdown_timeout)
6018 msg = result.fail_msg
6020 if self.op.ignore_consistency:
6021 self.proc.LogWarning("Could not shutdown instance %s on node %s."
6022 " Proceeding anyway. Please make sure node"
6023 " %s is down. Error details: %s",
6024 instance.name, source_node, source_node, msg)
6026 raise errors.OpExecError("Could not shutdown instance %s on"
6028 (instance.name, source_node, msg))
6030 # create the target disks
6032 _CreateDisks(self, instance, target_node=target_node)
6033 except errors.OpExecError:
6034 self.LogWarning("Device creation failed, reverting...")
6036 _RemoveDisks(self, instance, target_node=target_node)
6038 self.cfg.ReleaseDRBDMinors(instance.name)
6041 cluster_name = self.cfg.GetClusterInfo().cluster_name
6044 # activate, get path, copy the data over
6045 for idx, disk in enumerate(instance.disks):
6046 self.LogInfo("Copying data for disk %d", idx)
6047 result = self.rpc.call_blockdev_assemble(target_node, disk,
6048 instance.name, True)
6050 self.LogWarning("Can't assemble newly created disk %d: %s",
6051 idx, result.fail_msg)
6052 errs.append(result.fail_msg)
6054 dev_path = result.payload
6055 result = self.rpc.call_blockdev_export(source_node, disk,
6056 target_node, dev_path,
6059 self.LogWarning("Can't copy data over for disk %d: %s",
6060 idx, result.fail_msg)
6061 errs.append(result.fail_msg)
6065 self.LogWarning("Some disks failed to copy, aborting")
6067 _RemoveDisks(self, instance, target_node=target_node)
6069 self.cfg.ReleaseDRBDMinors(instance.name)
6070 raise errors.OpExecError("Errors during disk copy: %s" %
6073 instance.primary_node = target_node
6074 self.cfg.Update(instance, feedback_fn)
6076 self.LogInfo("Removing the disks on the original node")
6077 _RemoveDisks(self, instance, target_node=source_node)
6079 # Only start the instance if it's marked as up
6080 if instance.admin_up:
6081 self.LogInfo("Starting instance %s on node %s",
6082 instance.name, target_node)
6084 disks_ok, _ = _AssembleInstanceDisks(self, instance,
6085 ignore_secondaries=True)
6087 _ShutdownInstanceDisks(self, instance)
6088 raise errors.OpExecError("Can't activate the instance's disks")
6090 result = self.rpc.call_instance_start(target_node, instance, None, None)
6091 msg = result.fail_msg
6093 _ShutdownInstanceDisks(self, instance)
6094 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6095 (instance.name, target_node, msg))
6098 class LUMigrateNode(LogicalUnit):
6099 """Migrate all instances from a node.
6102 HPATH = "node-migrate"
6103 HTYPE = constants.HTYPE_NODE
6111 def ExpandNames(self):
6112 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6114 self.needed_locks = {
6115 locking.LEVEL_NODE: [self.op.node_name],
6118 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6120 # Create tasklets for migrating instances for all instances on this node
6124 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6125 logging.debug("Migrating instance %s", inst.name)
6126 names.append(inst.name)
6128 tasklets.append(TLMigrateInstance(self, inst.name, False))
6130 self.tasklets = tasklets
6132 # Declare instance locks
6133 self.needed_locks[locking.LEVEL_INSTANCE] = names
6135 def DeclareLocks(self, level):
6136 if level == locking.LEVEL_NODE:
6137 self._LockInstancesNodes()
6139 def BuildHooksEnv(self):
6142 This runs on the master, the primary and all the secondaries.
6146 "NODE_NAME": self.op.node_name,
6149 nl = [self.cfg.GetMasterNode()]
6151 return (env, nl, nl)
6154 class TLMigrateInstance(Tasklet):
6155 """Tasklet class for instance migration.
6158 @ivar live: whether the migration will be done live or non-live;
6159 this variable is initalized only after CheckPrereq has run
6162 def __init__(self, lu, instance_name, cleanup):
6163 """Initializes this class.
6166 Tasklet.__init__(self, lu)
6169 self.instance_name = instance_name
6170 self.cleanup = cleanup
6171 self.live = False # will be overridden later
6173 def CheckPrereq(self):
6174 """Check prerequisites.
6176 This checks that the instance is in the cluster.
6179 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6180 instance = self.cfg.GetInstanceInfo(instance_name)
6181 assert instance is not None
6183 if instance.disk_template != constants.DT_DRBD8:
6184 raise errors.OpPrereqError("Instance's disk layout is not"
6185 " drbd8, cannot migrate.", errors.ECODE_STATE)
6187 secondary_nodes = instance.secondary_nodes
6188 if not secondary_nodes:
6189 raise errors.ConfigurationError("No secondary node but using"
6190 " drbd8 disk template")
6192 i_be = self.cfg.GetClusterInfo().FillBE(instance)
6194 target_node = secondary_nodes[0]
6195 # check memory requirements on the secondary node
6196 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6197 instance.name, i_be[constants.BE_MEMORY],
6198 instance.hypervisor)
6200 # check bridge existance
6201 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6203 if not self.cleanup:
6204 _CheckNodeNotDrained(self.lu, target_node)
6205 result = self.rpc.call_instance_migratable(instance.primary_node,
6207 result.Raise("Can't migrate, please use failover",
6208 prereq=True, ecode=errors.ECODE_STATE)
6210 self.instance = instance
6212 if self.lu.op.live is not None and self.lu.op.mode is not None:
6213 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6214 " parameters are accepted",
6216 if self.lu.op.live is not None:
6218 self.lu.op.mode = constants.HT_MIGRATION_LIVE
6220 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6221 # reset the 'live' parameter to None so that repeated
6222 # invocations of CheckPrereq do not raise an exception
6223 self.lu.op.live = None
6224 elif self.lu.op.mode is None:
6225 # read the default value from the hypervisor
6226 i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6227 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6229 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6231 def _WaitUntilSync(self):
6232 """Poll with custom rpc for disk sync.
6234 This uses our own step-based rpc call.
6237 self.feedback_fn("* wait until resync is done")
6241 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6243 self.instance.disks)
6245 for node, nres in result.items():
6246 nres.Raise("Cannot resync disks on node %s" % node)
6247 node_done, node_percent = nres.payload
6248 all_done = all_done and node_done
6249 if node_percent is not None:
6250 min_percent = min(min_percent, node_percent)
6252 if min_percent < 100:
6253 self.feedback_fn(" - progress: %.1f%%" % min_percent)
6256 def _EnsureSecondary(self, node):
6257 """Demote a node to secondary.
6260 self.feedback_fn("* switching node %s to secondary mode" % node)
6262 for dev in self.instance.disks:
6263 self.cfg.SetDiskID(dev, node)
6265 result = self.rpc.call_blockdev_close(node, self.instance.name,
6266 self.instance.disks)
6267 result.Raise("Cannot change disk to secondary on node %s" % node)
6269 def _GoStandalone(self):
6270 """Disconnect from the network.
6273 self.feedback_fn("* changing into standalone mode")
6274 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6275 self.instance.disks)
6276 for node, nres in result.items():
6277 nres.Raise("Cannot disconnect disks node %s" % node)
6279 def _GoReconnect(self, multimaster):
6280 """Reconnect to the network.
6286 msg = "single-master"
6287 self.feedback_fn("* changing disks into %s mode" % msg)
6288 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6289 self.instance.disks,
6290 self.instance.name, multimaster)
6291 for node, nres in result.items():
6292 nres.Raise("Cannot change disks config on node %s" % node)
6294 def _ExecCleanup(self):
6295 """Try to cleanup after a failed migration.
6297 The cleanup is done by:
6298 - check that the instance is running only on one node
6299 (and update the config if needed)
6300 - change disks on its secondary node to secondary
6301 - wait until disks are fully synchronized
6302 - disconnect from the network
6303 - change disks into single-master mode
6304 - wait again until disks are fully synchronized
6307 instance = self.instance
6308 target_node = self.target_node
6309 source_node = self.source_node
6311 # check running on only one node
6312 self.feedback_fn("* checking where the instance actually runs"
6313 " (if this hangs, the hypervisor might be in"
6315 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6316 for node, result in ins_l.items():
6317 result.Raise("Can't contact node %s" % node)
6319 runningon_source = instance.name in ins_l[source_node].payload
6320 runningon_target = instance.name in ins_l[target_node].payload
6322 if runningon_source and runningon_target:
6323 raise errors.OpExecError("Instance seems to be running on two nodes,"
6324 " or the hypervisor is confused. You will have"
6325 " to ensure manually that it runs only on one"
6326 " and restart this operation.")
6328 if not (runningon_source or runningon_target):
6329 raise errors.OpExecError("Instance does not seem to be running at all."
6330 " In this case, it's safer to repair by"
6331 " running 'gnt-instance stop' to ensure disk"
6332 " shutdown, and then restarting it.")
6334 if runningon_target:
6335 # the migration has actually succeeded, we need to update the config
6336 self.feedback_fn("* instance running on secondary node (%s),"
6337 " updating config" % target_node)
6338 instance.primary_node = target_node
6339 self.cfg.Update(instance, self.feedback_fn)
6340 demoted_node = source_node
6342 self.feedback_fn("* instance confirmed to be running on its"
6343 " primary node (%s)" % source_node)
6344 demoted_node = target_node
6346 self._EnsureSecondary(demoted_node)
6348 self._WaitUntilSync()
6349 except errors.OpExecError:
6350 # we ignore here errors, since if the device is standalone, it
6351 # won't be able to sync
6353 self._GoStandalone()
6354 self._GoReconnect(False)
6355 self._WaitUntilSync()
6357 self.feedback_fn("* done")
6359 def _RevertDiskStatus(self):
6360 """Try to revert the disk status after a failed migration.
6363 target_node = self.target_node
6365 self._EnsureSecondary(target_node)
6366 self._GoStandalone()
6367 self._GoReconnect(False)
6368 self._WaitUntilSync()
6369 except errors.OpExecError, err:
6370 self.lu.LogWarning("Migration failed and I can't reconnect the"
6371 " drives: error '%s'\n"
6372 "Please look and recover the instance status" %
6375 def _AbortMigration(self):
6376 """Call the hypervisor code to abort a started migration.
6379 instance = self.instance
6380 target_node = self.target_node
6381 migration_info = self.migration_info
6383 abort_result = self.rpc.call_finalize_migration(target_node,
6387 abort_msg = abort_result.fail_msg
6389 logging.error("Aborting migration failed on target node %s: %s",
6390 target_node, abort_msg)
6391 # Don't raise an exception here, as we stil have to try to revert the
6392 # disk status, even if this step failed.
6394 def _ExecMigration(self):
6395 """Migrate an instance.
6397 The migrate is done by:
6398 - change the disks into dual-master mode
6399 - wait until disks are fully synchronized again
6400 - migrate the instance
6401 - change disks on the new secondary node (the old primary) to secondary
6402 - wait until disks are fully synchronized
6403 - change disks into single-master mode
6406 instance = self.instance
6407 target_node = self.target_node
6408 source_node = self.source_node
6410 self.feedback_fn("* checking disk consistency between source and target")
6411 for dev in instance.disks:
6412 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6413 raise errors.OpExecError("Disk %s is degraded or not fully"
6414 " synchronized on target node,"
6415 " aborting migrate." % dev.iv_name)
6417 # First get the migration information from the remote node
6418 result = self.rpc.call_migration_info(source_node, instance)
6419 msg = result.fail_msg
6421 log_err = ("Failed fetching source migration information from %s: %s" %
6423 logging.error(log_err)
6424 raise errors.OpExecError(log_err)
6426 self.migration_info = migration_info = result.payload
6428 # Then switch the disks to master/master mode
6429 self._EnsureSecondary(target_node)
6430 self._GoStandalone()
6431 self._GoReconnect(True)
6432 self._WaitUntilSync()
6434 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6435 result = self.rpc.call_accept_instance(target_node,
6438 self.nodes_ip[target_node])
6440 msg = result.fail_msg
6442 logging.error("Instance pre-migration failed, trying to revert"
6443 " disk status: %s", msg)
6444 self.feedback_fn("Pre-migration failed, aborting")
6445 self._AbortMigration()
6446 self._RevertDiskStatus()
6447 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6448 (instance.name, msg))
6450 self.feedback_fn("* migrating instance to %s" % target_node)
6452 result = self.rpc.call_instance_migrate(source_node, instance,
6453 self.nodes_ip[target_node],
6455 msg = result.fail_msg
6457 logging.error("Instance migration failed, trying to revert"
6458 " disk status: %s", msg)
6459 self.feedback_fn("Migration failed, aborting")
6460 self._AbortMigration()
6461 self._RevertDiskStatus()
6462 raise errors.OpExecError("Could not migrate instance %s: %s" %
6463 (instance.name, msg))
6466 instance.primary_node = target_node
6467 # distribute new instance config to the other nodes
6468 self.cfg.Update(instance, self.feedback_fn)
6470 result = self.rpc.call_finalize_migration(target_node,
6474 msg = result.fail_msg
6476 logging.error("Instance migration succeeded, but finalization failed:"
6478 raise errors.OpExecError("Could not finalize instance migration: %s" %
6481 self._EnsureSecondary(source_node)
6482 self._WaitUntilSync()
6483 self._GoStandalone()
6484 self._GoReconnect(False)
6485 self._WaitUntilSync()
6487 self.feedback_fn("* done")
6489 def Exec(self, feedback_fn):
6490 """Perform the migration.
6493 feedback_fn("Migrating instance %s" % self.instance.name)
6495 self.feedback_fn = feedback_fn
6497 self.source_node = self.instance.primary_node
6498 self.target_node = self.instance.secondary_nodes[0]
6499 self.all_nodes = [self.source_node, self.target_node]
6501 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6502 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6506 return self._ExecCleanup()
6508 return self._ExecMigration()
6511 def _CreateBlockDev(lu, node, instance, device, force_create,
6513 """Create a tree of block devices on a given node.
6515 If this device type has to be created on secondaries, create it and
6518 If not, just recurse to children keeping the same 'force' value.
6520 @param lu: the lu on whose behalf we execute
6521 @param node: the node on which to create the device
6522 @type instance: L{objects.Instance}
6523 @param instance: the instance which owns the device
6524 @type device: L{objects.Disk}
6525 @param device: the device to create
6526 @type force_create: boolean
6527 @param force_create: whether to force creation of this device; this
6528 will be change to True whenever we find a device which has
6529 CreateOnSecondary() attribute
6530 @param info: the extra 'metadata' we should attach to the device
6531 (this will be represented as a LVM tag)
6532 @type force_open: boolean
6533 @param force_open: this parameter will be passes to the
6534 L{backend.BlockdevCreate} function where it specifies
6535 whether we run on primary or not, and it affects both
6536 the child assembly and the device own Open() execution
6539 if device.CreateOnSecondary():
6543 for child in device.children:
6544 _CreateBlockDev(lu, node, instance, child, force_create,
6547 if not force_create:
6550 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6553 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6554 """Create a single block device on a given node.
6556 This will not recurse over children of the device, so they must be
6559 @param lu: the lu on whose behalf we execute
6560 @param node: the node on which to create the device
6561 @type instance: L{objects.Instance}
6562 @param instance: the instance which owns the device
6563 @type device: L{objects.Disk}
6564 @param device: the device to create
6565 @param info: the extra 'metadata' we should attach to the device
6566 (this will be represented as a LVM tag)
6567 @type force_open: boolean
6568 @param force_open: this parameter will be passes to the
6569 L{backend.BlockdevCreate} function where it specifies
6570 whether we run on primary or not, and it affects both
6571 the child assembly and the device own Open() execution
6574 lu.cfg.SetDiskID(device, node)
6575 result = lu.rpc.call_blockdev_create(node, device, device.size,
6576 instance.name, force_open, info)
6577 result.Raise("Can't create block device %s on"
6578 " node %s for instance %s" % (device, node, instance.name))
6579 if device.physical_id is None:
6580 device.physical_id = result.payload
6583 def _GenerateUniqueNames(lu, exts):
6584 """Generate a suitable LV name.
6586 This will generate a logical volume name for the given instance.
6591 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6592 results.append("%s%s" % (new_id, val))
6596 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6598 """Generate a drbd8 device complete with its children.
6601 port = lu.cfg.AllocatePort()
6602 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6603 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6604 logical_id=(vgname, names[0]))
6605 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6606 logical_id=(vgname, names[1]))
6607 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6608 logical_id=(primary, secondary, port,
6611 children=[dev_data, dev_meta],
6616 def _GenerateDiskTemplate(lu, template_name,
6617 instance_name, primary_node,
6618 secondary_nodes, disk_info,
6619 file_storage_dir, file_driver,
6620 base_index, feedback_fn):
6621 """Generate the entire disk layout for a given template type.
6624 #TODO: compute space requirements
6626 vgname = lu.cfg.GetVGName()
6627 disk_count = len(disk_info)
6629 if template_name == constants.DT_DISKLESS:
6631 elif template_name == constants.DT_PLAIN:
6632 if len(secondary_nodes) != 0:
6633 raise errors.ProgrammerError("Wrong template configuration")
6635 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6636 for i in range(disk_count)])
6637 for idx, disk in enumerate(disk_info):
6638 disk_index = idx + base_index
6639 vg = disk.get("vg", vgname)
6640 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6641 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6642 logical_id=(vg, names[idx]),
6643 iv_name="disk/%d" % disk_index,
6645 disks.append(disk_dev)
6646 elif template_name == constants.DT_DRBD8:
6647 if len(secondary_nodes) != 1:
6648 raise errors.ProgrammerError("Wrong template configuration")
6649 remote_node = secondary_nodes[0]
6650 minors = lu.cfg.AllocateDRBDMinor(
6651 [primary_node, remote_node] * len(disk_info), instance_name)
6654 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6655 for i in range(disk_count)]):
6656 names.append(lv_prefix + "_data")
6657 names.append(lv_prefix + "_meta")
6658 for idx, disk in enumerate(disk_info):
6659 disk_index = idx + base_index
6660 vg = disk.get("vg", vgname)
6661 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6662 disk["size"], vg, names[idx*2:idx*2+2],
6663 "disk/%d" % disk_index,
6664 minors[idx*2], minors[idx*2+1])
6665 disk_dev.mode = disk["mode"]
6666 disks.append(disk_dev)
6667 elif template_name == constants.DT_FILE:
6668 if len(secondary_nodes) != 0:
6669 raise errors.ProgrammerError("Wrong template configuration")
6671 _RequireFileStorage()
6673 for idx, disk in enumerate(disk_info):
6674 disk_index = idx + base_index
6675 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6676 iv_name="disk/%d" % disk_index,
6677 logical_id=(file_driver,
6678 "%s/disk%d" % (file_storage_dir,
6681 disks.append(disk_dev)
6683 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6687 def _GetInstanceInfoText(instance):
6688 """Compute that text that should be added to the disk's metadata.
6691 return "originstname+%s" % instance.name
6694 def _CalcEta(time_taken, written, total_size):
6695 """Calculates the ETA based on size written and total size.
6697 @param time_taken: The time taken so far
6698 @param written: amount written so far
6699 @param total_size: The total size of data to be written
6700 @return: The remaining time in seconds
6703 avg_time = time_taken / float(written)
6704 return (total_size - written) * avg_time
6707 def _WipeDisks(lu, instance):
6708 """Wipes instance disks.
6710 @type lu: L{LogicalUnit}
6711 @param lu: the logical unit on whose behalf we execute
6712 @type instance: L{objects.Instance}
6713 @param instance: the instance whose disks we should create
6714 @return: the success of the wipe
6717 node = instance.primary_node
6718 for idx, device in enumerate(instance.disks):
6719 lu.LogInfo("* Wiping disk %d", idx)
6720 logging.info("Wiping disk %d for instance %s", idx, instance.name)
6722 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6723 # MAX_WIPE_CHUNK at max
6724 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6725 constants.MIN_WIPE_CHUNK_PERCENT)
6730 start_time = time.time()
6732 while offset < size:
6733 wipe_size = min(wipe_chunk_size, size - offset)
6734 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6735 result.Raise("Could not wipe disk %d at offset %d for size %d" %
6736 (idx, offset, wipe_size))
6739 if now - last_output >= 60:
6740 eta = _CalcEta(now - start_time, offset, size)
6741 lu.LogInfo(" - done: %.1f%% ETA: %s" %
6742 (offset / float(size) * 100, utils.FormatSeconds(eta)))
6746 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6747 """Create all disks for an instance.
6749 This abstracts away some work from AddInstance.
6751 @type lu: L{LogicalUnit}
6752 @param lu: the logical unit on whose behalf we execute
6753 @type instance: L{objects.Instance}
6754 @param instance: the instance whose disks we should create
6756 @param to_skip: list of indices to skip
6757 @type target_node: string
6758 @param target_node: if passed, overrides the target node for creation
6760 @return: the success of the creation
6763 info = _GetInstanceInfoText(instance)
6764 if target_node is None:
6765 pnode = instance.primary_node
6766 all_nodes = instance.all_nodes
6771 if instance.disk_template == constants.DT_FILE:
6772 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6773 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6775 result.Raise("Failed to create directory '%s' on"
6776 " node %s" % (file_storage_dir, pnode))
6778 # Note: this needs to be kept in sync with adding of disks in
6779 # LUSetInstanceParams
6780 for idx, device in enumerate(instance.disks):
6781 if to_skip and idx in to_skip:
6783 logging.info("Creating volume %s for instance %s",
6784 device.iv_name, instance.name)
6786 for node in all_nodes:
6787 f_create = node == pnode
6788 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6791 def _RemoveDisks(lu, instance, target_node=None):
6792 """Remove all disks for an instance.
6794 This abstracts away some work from `AddInstance()` and
6795 `RemoveInstance()`. Note that in case some of the devices couldn't
6796 be removed, the removal will continue with the other ones (compare
6797 with `_CreateDisks()`).
6799 @type lu: L{LogicalUnit}
6800 @param lu: the logical unit on whose behalf we execute
6801 @type instance: L{objects.Instance}
6802 @param instance: the instance whose disks we should remove
6803 @type target_node: string
6804 @param target_node: used to override the node on which to remove the disks
6806 @return: the success of the removal
6809 logging.info("Removing block devices for instance %s", instance.name)
6812 for device in instance.disks:
6814 edata = [(target_node, device)]
6816 edata = device.ComputeNodeTree(instance.primary_node)
6817 for node, disk in edata:
6818 lu.cfg.SetDiskID(disk, node)
6819 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6821 lu.LogWarning("Could not remove block device %s on node %s,"
6822 " continuing anyway: %s", device.iv_name, node, msg)
6825 if instance.disk_template == constants.DT_FILE:
6826 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6830 tgt = instance.primary_node
6831 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6833 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6834 file_storage_dir, instance.primary_node, result.fail_msg)
6840 def _ComputeDiskSizePerVG(disk_template, disks):
6841 """Compute disk size requirements in the volume group
6844 def _compute(disks, payload):
6845 """Universal algorithm
6850 vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
6854 # Required free disk space as a function of disk and swap space
6856 constants.DT_DISKLESS: None,
6857 constants.DT_PLAIN: _compute(disks, 0),
6858 # 128 MB are added for drbd metadata for each disk
6859 constants.DT_DRBD8: _compute(disks, 128),
6860 constants.DT_FILE: None,
6863 if disk_template not in req_size_dict:
6864 raise errors.ProgrammerError("Disk template '%s' size requirement"
6865 " is unknown" % disk_template)
6867 return req_size_dict[disk_template]
6869 def _ComputeDiskSize(disk_template, disks):
6870 """Compute disk size requirements in the volume group
6873 # Required free disk space as a function of disk and swap space
6875 constants.DT_DISKLESS: None,
6876 constants.DT_PLAIN: sum(d["size"] for d in disks),
6877 # 128 MB are added for drbd metadata for each disk
6878 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6879 constants.DT_FILE: None,
6882 if disk_template not in req_size_dict:
6883 raise errors.ProgrammerError("Disk template '%s' size requirement"
6884 " is unknown" % disk_template)
6886 return req_size_dict[disk_template]
6889 def _CheckHVParams(lu, nodenames, hvname, hvparams):
6890 """Hypervisor parameter validation.
6892 This function abstract the hypervisor parameter validation to be
6893 used in both instance create and instance modify.
6895 @type lu: L{LogicalUnit}
6896 @param lu: the logical unit for which we check
6897 @type nodenames: list
6898 @param nodenames: the list of nodes on which we should check
6899 @type hvname: string
6900 @param hvname: the name of the hypervisor we should use
6901 @type hvparams: dict
6902 @param hvparams: the parameters which we need to check
6903 @raise errors.OpPrereqError: if the parameters are not valid
6906 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6909 for node in nodenames:
6913 info.Raise("Hypervisor parameter validation failed on node %s" % node)
6916 def _CheckOSParams(lu, required, nodenames, osname, osparams):
6917 """OS parameters validation.
6919 @type lu: L{LogicalUnit}
6920 @param lu: the logical unit for which we check
6921 @type required: boolean
6922 @param required: whether the validation should fail if the OS is not
6924 @type nodenames: list
6925 @param nodenames: the list of nodes on which we should check
6926 @type osname: string
6927 @param osname: the name of the hypervisor we should use
6928 @type osparams: dict
6929 @param osparams: the parameters which we need to check
6930 @raise errors.OpPrereqError: if the parameters are not valid
6933 result = lu.rpc.call_os_validate(required, nodenames, osname,
6934 [constants.OS_VALIDATE_PARAMETERS],
6936 for node, nres in result.items():
6937 # we don't check for offline cases since this should be run only
6938 # against the master node and/or an instance's nodes
6939 nres.Raise("OS Parameters validation failed on node %s" % node)
6940 if not nres.payload:
6941 lu.LogInfo("OS %s not found on node %s, validation skipped",
6945 class LUCreateInstance(LogicalUnit):
6946 """Create an instance.
6949 HPATH = "instance-add"
6950 HTYPE = constants.HTYPE_INSTANCE
6953 ("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES)),
6954 ("start", True, ht.TBool),
6955 ("wait_for_sync", True, ht.TBool),
6956 ("ip_check", True, ht.TBool),
6957 ("name_check", True, ht.TBool),
6958 ("disks", ht.NoDefault, ht.TListOf(ht.TDict)),
6959 ("nics", ht.NoDefault, ht.TListOf(ht.TDict)),
6960 ("hvparams", ht.EmptyDict, ht.TDict),
6961 ("beparams", ht.EmptyDict, ht.TDict),
6962 ("osparams", ht.EmptyDict, ht.TDict),
6963 ("no_install", None, ht.TMaybeBool),
6964 ("os_type", None, ht.TMaybeString),
6965 ("force_variant", False, ht.TBool),
6966 ("source_handshake", None, ht.TOr(ht.TList, ht.TNone)),
6967 ("source_x509_ca", None, ht.TMaybeString),
6968 ("source_instance_name", None, ht.TMaybeString),
6969 ("src_node", None, ht.TMaybeString),
6970 ("src_path", None, ht.TMaybeString),
6971 ("pnode", None, ht.TMaybeString),
6972 ("snode", None, ht.TMaybeString),
6973 ("iallocator", None, ht.TMaybeString),
6974 ("hypervisor", None, ht.TMaybeString),
6975 ("disk_template", ht.NoDefault, _CheckDiskTemplate),
6976 ("identify_defaults", False, ht.TBool),
6977 ("file_driver", None, ht.TOr(ht.TNone, ht.TElemOf(constants.FILE_DRIVER))),
6978 ("file_storage_dir", None, ht.TMaybeString),
6982 def CheckArguments(self):
6986 # do not require name_check to ease forward/backward compatibility
6988 if self.op.no_install and self.op.start:
6989 self.LogInfo("No-installation mode selected, disabling startup")
6990 self.op.start = False
6991 # validate/normalize the instance name
6992 self.op.instance_name = \
6993 netutils.Hostname.GetNormalizedName(self.op.instance_name)
6995 if self.op.ip_check and not self.op.name_check:
6996 # TODO: make the ip check more flexible and not depend on the name check
6997 raise errors.OpPrereqError("Cannot do ip check without a name check",
7000 # check nics' parameter names
7001 for nic in self.op.nics:
7002 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7004 # check disks. parameter names and consistent adopt/no-adopt strategy
7005 has_adopt = has_no_adopt = False
7006 for disk in self.op.disks:
7007 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7012 if has_adopt and has_no_adopt:
7013 raise errors.OpPrereqError("Either all disks are adopted or none is",
7016 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7017 raise errors.OpPrereqError("Disk adoption is not supported for the"
7018 " '%s' disk template" %
7019 self.op.disk_template,
7021 if self.op.iallocator is not None:
7022 raise errors.OpPrereqError("Disk adoption not allowed with an"
7023 " iallocator script", errors.ECODE_INVAL)
7024 if self.op.mode == constants.INSTANCE_IMPORT:
7025 raise errors.OpPrereqError("Disk adoption not allowed for"
7026 " instance import", errors.ECODE_INVAL)
7028 self.adopt_disks = has_adopt
7030 # instance name verification
7031 if self.op.name_check:
7032 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7033 self.op.instance_name = self.hostname1.name
7034 # used in CheckPrereq for ip ping check
7035 self.check_ip = self.hostname1.ip
7037 self.check_ip = None
7039 # file storage checks
7040 if (self.op.file_driver and
7041 not self.op.file_driver in constants.FILE_DRIVER):
7042 raise errors.OpPrereqError("Invalid file driver name '%s'" %
7043 self.op.file_driver, errors.ECODE_INVAL)
7045 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7046 raise errors.OpPrereqError("File storage directory path not absolute",
7049 ### Node/iallocator related checks
7050 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7052 if self.op.pnode is not None:
7053 if self.op.disk_template in constants.DTS_NET_MIRROR:
7054 if self.op.snode is None:
7055 raise errors.OpPrereqError("The networked disk templates need"
7056 " a mirror node", errors.ECODE_INVAL)
7058 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7060 self.op.snode = None
7062 self._cds = _GetClusterDomainSecret()
7064 if self.op.mode == constants.INSTANCE_IMPORT:
7065 # On import force_variant must be True, because if we forced it at
7066 # initial install, our only chance when importing it back is that it
7068 self.op.force_variant = True
7070 if self.op.no_install:
7071 self.LogInfo("No-installation mode has no effect during import")
7073 elif self.op.mode == constants.INSTANCE_CREATE:
7074 if self.op.os_type is None:
7075 raise errors.OpPrereqError("No guest OS specified",
7077 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7078 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7079 " installation" % self.op.os_type,
7081 if self.op.disk_template is None:
7082 raise errors.OpPrereqError("No disk template specified",
7085 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7086 # Check handshake to ensure both clusters have the same domain secret
7087 src_handshake = self.op.source_handshake
7088 if not src_handshake:
7089 raise errors.OpPrereqError("Missing source handshake",
7092 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7095 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7098 # Load and check source CA
7099 self.source_x509_ca_pem = self.op.source_x509_ca
7100 if not self.source_x509_ca_pem:
7101 raise errors.OpPrereqError("Missing source X509 CA",
7105 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7107 except OpenSSL.crypto.Error, err:
7108 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7109 (err, ), errors.ECODE_INVAL)
7111 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7112 if errcode is not None:
7113 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7116 self.source_x509_ca = cert
7118 src_instance_name = self.op.source_instance_name
7119 if not src_instance_name:
7120 raise errors.OpPrereqError("Missing source instance name",
7123 self.source_instance_name = \
7124 netutils.GetHostname(name=src_instance_name).name
7127 raise errors.OpPrereqError("Invalid instance creation mode %r" %
7128 self.op.mode, errors.ECODE_INVAL)
7130 def ExpandNames(self):
7131 """ExpandNames for CreateInstance.
7133 Figure out the right locks for instance creation.
7136 self.needed_locks = {}
7138 instance_name = self.op.instance_name
7139 # this is just a preventive check, but someone might still add this
7140 # instance in the meantime, and creation will fail at lock-add time
7141 if instance_name in self.cfg.GetInstanceList():
7142 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7143 instance_name, errors.ECODE_EXISTS)
7145 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7147 if self.op.iallocator:
7148 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7150 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7151 nodelist = [self.op.pnode]
7152 if self.op.snode is not None:
7153 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7154 nodelist.append(self.op.snode)
7155 self.needed_locks[locking.LEVEL_NODE] = nodelist
7157 # in case of import lock the source node too
7158 if self.op.mode == constants.INSTANCE_IMPORT:
7159 src_node = self.op.src_node
7160 src_path = self.op.src_path
7162 if src_path is None:
7163 self.op.src_path = src_path = self.op.instance_name
7165 if src_node is None:
7166 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7167 self.op.src_node = None
7168 if os.path.isabs(src_path):
7169 raise errors.OpPrereqError("Importing an instance from an absolute"
7170 " path requires a source node option.",
7173 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7174 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7175 self.needed_locks[locking.LEVEL_NODE].append(src_node)
7176 if not os.path.isabs(src_path):
7177 self.op.src_path = src_path = \
7178 utils.PathJoin(constants.EXPORT_DIR, src_path)
7180 def _RunAllocator(self):
7181 """Run the allocator based on input opcode.
7184 nics = [n.ToDict() for n in self.nics]
7185 ial = IAllocator(self.cfg, self.rpc,
7186 mode=constants.IALLOCATOR_MODE_ALLOC,
7187 name=self.op.instance_name,
7188 disk_template=self.op.disk_template,
7191 vcpus=self.be_full[constants.BE_VCPUS],
7192 mem_size=self.be_full[constants.BE_MEMORY],
7195 hypervisor=self.op.hypervisor,
7198 ial.Run(self.op.iallocator)
7201 raise errors.OpPrereqError("Can't compute nodes using"
7202 " iallocator '%s': %s" %
7203 (self.op.iallocator, ial.info),
7205 if len(ial.result) != ial.required_nodes:
7206 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7207 " of nodes (%s), required %s" %
7208 (self.op.iallocator, len(ial.result),
7209 ial.required_nodes), errors.ECODE_FAULT)
7210 self.op.pnode = ial.result[0]
7211 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7212 self.op.instance_name, self.op.iallocator,
7213 utils.CommaJoin(ial.result))
7214 if ial.required_nodes == 2:
7215 self.op.snode = ial.result[1]
7217 def BuildHooksEnv(self):
7220 This runs on master, primary and secondary nodes of the instance.
7224 "ADD_MODE": self.op.mode,
7226 if self.op.mode == constants.INSTANCE_IMPORT:
7227 env["SRC_NODE"] = self.op.src_node
7228 env["SRC_PATH"] = self.op.src_path
7229 env["SRC_IMAGES"] = self.src_images
7231 env.update(_BuildInstanceHookEnv(
7232 name=self.op.instance_name,
7233 primary_node=self.op.pnode,
7234 secondary_nodes=self.secondaries,
7235 status=self.op.start,
7236 os_type=self.op.os_type,
7237 memory=self.be_full[constants.BE_MEMORY],
7238 vcpus=self.be_full[constants.BE_VCPUS],
7239 nics=_NICListToTuple(self, self.nics),
7240 disk_template=self.op.disk_template,
7241 disks=[(d["size"], d["mode"]) for d in self.disks],
7244 hypervisor_name=self.op.hypervisor,
7247 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7251 def _ReadExportInfo(self):
7252 """Reads the export information from disk.
7254 It will override the opcode source node and path with the actual
7255 information, if these two were not specified before.
7257 @return: the export information
7260 assert self.op.mode == constants.INSTANCE_IMPORT
7262 src_node = self.op.src_node
7263 src_path = self.op.src_path
7265 if src_node is None:
7266 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7267 exp_list = self.rpc.call_export_list(locked_nodes)
7269 for node in exp_list:
7270 if exp_list[node].fail_msg:
7272 if src_path in exp_list[node].payload:
7274 self.op.src_node = src_node = node
7275 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7279 raise errors.OpPrereqError("No export found for relative path %s" %
7280 src_path, errors.ECODE_INVAL)
7282 _CheckNodeOnline(self, src_node)
7283 result = self.rpc.call_export_info(src_node, src_path)
7284 result.Raise("No export or invalid export found in dir %s" % src_path)
7286 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7287 if not export_info.has_section(constants.INISECT_EXP):
7288 raise errors.ProgrammerError("Corrupted export config",
7289 errors.ECODE_ENVIRON)
7291 ei_version = export_info.get(constants.INISECT_EXP, "version")
7292 if (int(ei_version) != constants.EXPORT_VERSION):
7293 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7294 (ei_version, constants.EXPORT_VERSION),
7295 errors.ECODE_ENVIRON)
7298 def _ReadExportParams(self, einfo):
7299 """Use export parameters as defaults.
7301 In case the opcode doesn't specify (as in override) some instance
7302 parameters, then try to use them from the export information, if
7306 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7308 if self.op.disk_template is None:
7309 if einfo.has_option(constants.INISECT_INS, "disk_template"):
7310 self.op.disk_template = einfo.get(constants.INISECT_INS,
7313 raise errors.OpPrereqError("No disk template specified and the export"
7314 " is missing the disk_template information",
7317 if not self.op.disks:
7318 if einfo.has_option(constants.INISECT_INS, "disk_count"):
7320 # TODO: import the disk iv_name too
7321 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7322 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7323 disks.append({"size": disk_sz})
7324 self.op.disks = disks
7326 raise errors.OpPrereqError("No disk info specified and the export"
7327 " is missing the disk information",
7330 if (not self.op.nics and
7331 einfo.has_option(constants.INISECT_INS, "nic_count")):
7333 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7335 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7336 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7341 if (self.op.hypervisor is None and
7342 einfo.has_option(constants.INISECT_INS, "hypervisor")):
7343 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7344 if einfo.has_section(constants.INISECT_HYP):
7345 # use the export parameters but do not override the ones
7346 # specified by the user
7347 for name, value in einfo.items(constants.INISECT_HYP):
7348 if name not in self.op.hvparams:
7349 self.op.hvparams[name] = value
7351 if einfo.has_section(constants.INISECT_BEP):
7352 # use the parameters, without overriding
7353 for name, value in einfo.items(constants.INISECT_BEP):
7354 if name not in self.op.beparams:
7355 self.op.beparams[name] = value
7357 # try to read the parameters old style, from the main section
7358 for name in constants.BES_PARAMETERS:
7359 if (name not in self.op.beparams and
7360 einfo.has_option(constants.INISECT_INS, name)):
7361 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7363 if einfo.has_section(constants.INISECT_OSP):
7364 # use the parameters, without overriding
7365 for name, value in einfo.items(constants.INISECT_OSP):
7366 if name not in self.op.osparams:
7367 self.op.osparams[name] = value
7369 def _RevertToDefaults(self, cluster):
7370 """Revert the instance parameters to the default values.
7374 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7375 for name in self.op.hvparams.keys():
7376 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7377 del self.op.hvparams[name]
7379 be_defs = cluster.SimpleFillBE({})
7380 for name in self.op.beparams.keys():
7381 if name in be_defs and be_defs[name] == self.op.beparams[name]:
7382 del self.op.beparams[name]
7384 nic_defs = cluster.SimpleFillNIC({})
7385 for nic in self.op.nics:
7386 for name in constants.NICS_PARAMETERS:
7387 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7390 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7391 for name in self.op.osparams.keys():
7392 if name in os_defs and os_defs[name] == self.op.osparams[name]:
7393 del self.op.osparams[name]
7395 def CheckPrereq(self):
7396 """Check prerequisites.
7399 if self.op.mode == constants.INSTANCE_IMPORT:
7400 export_info = self._ReadExportInfo()
7401 self._ReadExportParams(export_info)
7403 _CheckDiskTemplate(self.op.disk_template)
7405 if (not self.cfg.GetVGName() and
7406 self.op.disk_template not in constants.DTS_NOT_LVM):
7407 raise errors.OpPrereqError("Cluster does not support lvm-based"
7408 " instances", errors.ECODE_STATE)
7410 if self.op.hypervisor is None:
7411 self.op.hypervisor = self.cfg.GetHypervisorType()
7413 cluster = self.cfg.GetClusterInfo()
7414 enabled_hvs = cluster.enabled_hypervisors
7415 if self.op.hypervisor not in enabled_hvs:
7416 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7417 " cluster (%s)" % (self.op.hypervisor,
7418 ",".join(enabled_hvs)),
7421 # check hypervisor parameter syntax (locally)
7422 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7423 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7425 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7426 hv_type.CheckParameterSyntax(filled_hvp)
7427 self.hv_full = filled_hvp
7428 # check that we don't specify global parameters on an instance
7429 _CheckGlobalHvParams(self.op.hvparams)
7431 # fill and remember the beparams dict
7432 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7433 self.be_full = cluster.SimpleFillBE(self.op.beparams)
7435 # build os parameters
7436 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7438 # now that hvp/bep are in final format, let's reset to defaults,
7440 if self.op.identify_defaults:
7441 self._RevertToDefaults(cluster)
7445 for idx, nic in enumerate(self.op.nics):
7446 nic_mode_req = nic.get("mode", None)
7447 nic_mode = nic_mode_req
7448 if nic_mode is None:
7449 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7451 # in routed mode, for the first nic, the default ip is 'auto'
7452 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7453 default_ip_mode = constants.VALUE_AUTO
7455 default_ip_mode = constants.VALUE_NONE
7457 # ip validity checks
7458 ip = nic.get("ip", default_ip_mode)
7459 if ip is None or ip.lower() == constants.VALUE_NONE:
7461 elif ip.lower() == constants.VALUE_AUTO:
7462 if not self.op.name_check:
7463 raise errors.OpPrereqError("IP address set to auto but name checks"
7464 " have been skipped",
7466 nic_ip = self.hostname1.ip
7468 if not netutils.IPAddress.IsValid(ip):
7469 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7473 # TODO: check the ip address for uniqueness
7474 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7475 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7478 # MAC address verification
7479 mac = nic.get("mac", constants.VALUE_AUTO)
7480 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7481 mac = utils.NormalizeAndValidateMac(mac)
7484 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7485 except errors.ReservationError:
7486 raise errors.OpPrereqError("MAC address %s already in use"
7487 " in cluster" % mac,
7488 errors.ECODE_NOTUNIQUE)
7490 # bridge verification
7491 bridge = nic.get("bridge", None)
7492 link = nic.get("link", None)
7494 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7495 " at the same time", errors.ECODE_INVAL)
7496 elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7497 raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7504 nicparams[constants.NIC_MODE] = nic_mode_req
7506 nicparams[constants.NIC_LINK] = link
7508 check_params = cluster.SimpleFillNIC(nicparams)
7509 objects.NIC.CheckParameterSyntax(check_params)
7510 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7512 # disk checks/pre-build
7514 for disk in self.op.disks:
7515 mode = disk.get("mode", constants.DISK_RDWR)
7516 if mode not in constants.DISK_ACCESS_SET:
7517 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7518 mode, errors.ECODE_INVAL)
7519 size = disk.get("size", None)
7521 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7524 except (TypeError, ValueError):
7525 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7527 vg = disk.get("vg", self.cfg.GetVGName())
7528 new_disk = {"size": size, "mode": mode, "vg": vg}
7530 new_disk["adopt"] = disk["adopt"]
7531 self.disks.append(new_disk)
7533 if self.op.mode == constants.INSTANCE_IMPORT:
7535 # Check that the new instance doesn't have less disks than the export
7536 instance_disks = len(self.disks)
7537 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7538 if instance_disks < export_disks:
7539 raise errors.OpPrereqError("Not enough disks to import."
7540 " (instance: %d, export: %d)" %
7541 (instance_disks, export_disks),
7545 for idx in range(export_disks):
7546 option = 'disk%d_dump' % idx
7547 if export_info.has_option(constants.INISECT_INS, option):
7548 # FIXME: are the old os-es, disk sizes, etc. useful?
7549 export_name = export_info.get(constants.INISECT_INS, option)
7550 image = utils.PathJoin(self.op.src_path, export_name)
7551 disk_images.append(image)
7553 disk_images.append(False)
7555 self.src_images = disk_images
7557 old_name = export_info.get(constants.INISECT_INS, 'name')
7559 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7560 except (TypeError, ValueError), err:
7561 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7562 " an integer: %s" % str(err),
7564 if self.op.instance_name == old_name:
7565 for idx, nic in enumerate(self.nics):
7566 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7567 nic_mac_ini = 'nic%d_mac' % idx
7568 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7570 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7572 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7573 if self.op.ip_check:
7574 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7575 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7576 (self.check_ip, self.op.instance_name),
7577 errors.ECODE_NOTUNIQUE)
7579 #### mac address generation
7580 # By generating here the mac address both the allocator and the hooks get
7581 # the real final mac address rather than the 'auto' or 'generate' value.
7582 # There is a race condition between the generation and the instance object
7583 # creation, which means that we know the mac is valid now, but we're not
7584 # sure it will be when we actually add the instance. If things go bad
7585 # adding the instance will abort because of a duplicate mac, and the
7586 # creation job will fail.
7587 for nic in self.nics:
7588 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7589 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7593 if self.op.iallocator is not None:
7594 self._RunAllocator()
7596 #### node related checks
7598 # check primary node
7599 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7600 assert self.pnode is not None, \
7601 "Cannot retrieve locked node %s" % self.op.pnode
7603 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7604 pnode.name, errors.ECODE_STATE)
7606 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7607 pnode.name, errors.ECODE_STATE)
7608 if not pnode.vm_capable:
7609 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7610 " '%s'" % pnode.name, errors.ECODE_STATE)
7612 self.secondaries = []
7614 # mirror node verification
7615 if self.op.disk_template in constants.DTS_NET_MIRROR:
7616 if self.op.snode == pnode.name:
7617 raise errors.OpPrereqError("The secondary node cannot be the"
7618 " primary node.", errors.ECODE_INVAL)
7619 _CheckNodeOnline(self, self.op.snode)
7620 _CheckNodeNotDrained(self, self.op.snode)
7621 _CheckNodeVmCapable(self, self.op.snode)
7622 self.secondaries.append(self.op.snode)
7624 nodenames = [pnode.name] + self.secondaries
7626 req_size = _ComputeDiskSize(self.op.disk_template,
7629 # Check lv size requirements, if not adopting
7630 if req_size is not None and not self.adopt_disks:
7631 _CheckNodesFreeDisk(self, nodenames, req_size)
7633 if self.adopt_disks: # instead, we must check the adoption data
7634 all_lvs = set([i["adopt"] for i in self.disks])
7635 if len(all_lvs) != len(self.disks):
7636 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7638 for lv_name in all_lvs:
7640 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7641 except errors.ReservationError:
7642 raise errors.OpPrereqError("LV named %s used by another instance" %
7643 lv_name, errors.ECODE_NOTUNIQUE)
7645 node_lvs = self.rpc.call_lv_list([pnode.name],
7646 self.cfg.GetVGName())[pnode.name]
7647 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7648 node_lvs = node_lvs.payload
7649 delta = all_lvs.difference(node_lvs.keys())
7651 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7652 utils.CommaJoin(delta),
7654 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7656 raise errors.OpPrereqError("Online logical volumes found, cannot"
7657 " adopt: %s" % utils.CommaJoin(online_lvs),
7659 # update the size of disk based on what is found
7660 for dsk in self.disks:
7661 dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
7663 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7665 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7666 # check OS parameters (remotely)
7667 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7669 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7671 # memory check on primary node
7673 _CheckNodeFreeMemory(self, self.pnode.name,
7674 "creating instance %s" % self.op.instance_name,
7675 self.be_full[constants.BE_MEMORY],
7678 self.dry_run_result = list(nodenames)
7680 def Exec(self, feedback_fn):
7681 """Create and add the instance to the cluster.
7684 instance = self.op.instance_name
7685 pnode_name = self.pnode.name
7687 ht_kind = self.op.hypervisor
7688 if ht_kind in constants.HTS_REQ_PORT:
7689 network_port = self.cfg.AllocatePort()
7693 if constants.ENABLE_FILE_STORAGE:
7694 # this is needed because os.path.join does not accept None arguments
7695 if self.op.file_storage_dir is None:
7696 string_file_storage_dir = ""
7698 string_file_storage_dir = self.op.file_storage_dir
7700 # build the full file storage dir path
7701 file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7702 string_file_storage_dir, instance)
7704 file_storage_dir = ""
7706 disks = _GenerateDiskTemplate(self,
7707 self.op.disk_template,
7708 instance, pnode_name,
7712 self.op.file_driver,
7716 iobj = objects.Instance(name=instance, os=self.op.os_type,
7717 primary_node=pnode_name,
7718 nics=self.nics, disks=disks,
7719 disk_template=self.op.disk_template,
7721 network_port=network_port,
7722 beparams=self.op.beparams,
7723 hvparams=self.op.hvparams,
7724 hypervisor=self.op.hypervisor,
7725 osparams=self.op.osparams,
7728 if self.adopt_disks:
7729 # rename LVs to the newly-generated names; we need to construct
7730 # 'fake' LV disks with the old data, plus the new unique_id
7731 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7733 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7734 rename_to.append(t_dsk.logical_id)
7735 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7736 self.cfg.SetDiskID(t_dsk, pnode_name)
7737 result = self.rpc.call_blockdev_rename(pnode_name,
7738 zip(tmp_disks, rename_to))
7739 result.Raise("Failed to rename adoped LVs")
7741 feedback_fn("* creating instance disks...")
7743 _CreateDisks(self, iobj)
7744 except errors.OpExecError:
7745 self.LogWarning("Device creation failed, reverting...")
7747 _RemoveDisks(self, iobj)
7749 self.cfg.ReleaseDRBDMinors(instance)
7752 if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7753 feedback_fn("* wiping instance disks...")
7755 _WipeDisks(self, iobj)
7756 except errors.OpExecError:
7757 self.LogWarning("Device wiping failed, reverting...")
7759 _RemoveDisks(self, iobj)
7761 self.cfg.ReleaseDRBDMinors(instance)
7764 feedback_fn("adding instance %s to cluster config" % instance)
7766 self.cfg.AddInstance(iobj, self.proc.GetECId())
7768 # Declare that we don't want to remove the instance lock anymore, as we've
7769 # added the instance to the config
7770 del self.remove_locks[locking.LEVEL_INSTANCE]
7771 # Unlock all the nodes
7772 if self.op.mode == constants.INSTANCE_IMPORT:
7773 nodes_keep = [self.op.src_node]
7774 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7775 if node != self.op.src_node]
7776 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7777 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7779 self.context.glm.release(locking.LEVEL_NODE)
7780 del self.acquired_locks[locking.LEVEL_NODE]
7782 if self.op.wait_for_sync:
7783 disk_abort = not _WaitForSync(self, iobj)
7784 elif iobj.disk_template in constants.DTS_NET_MIRROR:
7785 # make sure the disks are not degraded (still sync-ing is ok)
7787 feedback_fn("* checking mirrors status")
7788 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7793 _RemoveDisks(self, iobj)
7794 self.cfg.RemoveInstance(iobj.name)
7795 # Make sure the instance lock gets removed
7796 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7797 raise errors.OpExecError("There are some degraded disks for"
7800 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7801 if self.op.mode == constants.INSTANCE_CREATE:
7802 if not self.op.no_install:
7803 feedback_fn("* running the instance OS create scripts...")
7804 # FIXME: pass debug option from opcode to backend
7805 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7806 self.op.debug_level)
7807 result.Raise("Could not add os for instance %s"
7808 " on node %s" % (instance, pnode_name))
7810 elif self.op.mode == constants.INSTANCE_IMPORT:
7811 feedback_fn("* running the instance OS import scripts...")
7815 for idx, image in enumerate(self.src_images):
7819 # FIXME: pass debug option from opcode to backend
7820 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7821 constants.IEIO_FILE, (image, ),
7822 constants.IEIO_SCRIPT,
7823 (iobj.disks[idx], idx),
7825 transfers.append(dt)
7828 masterd.instance.TransferInstanceData(self, feedback_fn,
7829 self.op.src_node, pnode_name,
7830 self.pnode.secondary_ip,
7832 if not compat.all(import_result):
7833 self.LogWarning("Some disks for instance %s on node %s were not"
7834 " imported successfully" % (instance, pnode_name))
7836 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7837 feedback_fn("* preparing remote import...")
7838 connect_timeout = constants.RIE_CONNECT_TIMEOUT
7839 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7841 disk_results = masterd.instance.RemoteImport(self, feedback_fn, iobj,
7842 self.source_x509_ca,
7843 self._cds, timeouts)
7844 if not compat.all(disk_results):
7845 # TODO: Should the instance still be started, even if some disks
7846 # failed to import (valid for local imports, too)?
7847 self.LogWarning("Some disks for instance %s on node %s were not"
7848 " imported successfully" % (instance, pnode_name))
7850 # Run rename script on newly imported instance
7851 assert iobj.name == instance
7852 feedback_fn("Running rename script for %s" % instance)
7853 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7854 self.source_instance_name,
7855 self.op.debug_level)
7857 self.LogWarning("Failed to run rename script for %s on node"
7858 " %s: %s" % (instance, pnode_name, result.fail_msg))
7861 # also checked in the prereq part
7862 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7866 iobj.admin_up = True
7867 self.cfg.Update(iobj, feedback_fn)
7868 logging.info("Starting instance %s on node %s", instance, pnode_name)
7869 feedback_fn("* starting instance...")
7870 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7871 result.Raise("Could not start instance")
7873 return list(iobj.all_nodes)
7876 class LUConnectConsole(NoHooksLU):
7877 """Connect to an instance's console.
7879 This is somewhat special in that it returns the command line that
7880 you need to run on the master node in order to connect to the
7889 def ExpandNames(self):
7890 self._ExpandAndLockInstance()
7892 def CheckPrereq(self):
7893 """Check prerequisites.
7895 This checks that the instance is in the cluster.
7898 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7899 assert self.instance is not None, \
7900 "Cannot retrieve locked instance %s" % self.op.instance_name
7901 _CheckNodeOnline(self, self.instance.primary_node)
7903 def Exec(self, feedback_fn):
7904 """Connect to the console of an instance
7907 instance = self.instance
7908 node = instance.primary_node
7910 node_insts = self.rpc.call_instance_list([node],
7911 [instance.hypervisor])[node]
7912 node_insts.Raise("Can't get node information from %s" % node)
7914 if instance.name not in node_insts.payload:
7915 if instance.admin_up:
7916 state = "ERROR_down"
7918 state = "ADMIN_down"
7919 raise errors.OpExecError("Instance %s is not running (state %s)" %
7920 (instance.name, state))
7922 logging.debug("Connecting to console of %s on %s", instance.name, node)
7924 hyper = hypervisor.GetHypervisor(instance.hypervisor)
7925 cluster = self.cfg.GetClusterInfo()
7926 # beparams and hvparams are passed separately, to avoid editing the
7927 # instance and then saving the defaults in the instance itself.
7928 hvparams = cluster.FillHV(instance)
7929 beparams = cluster.FillBE(instance)
7930 console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
7933 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
7936 class LUReplaceDisks(LogicalUnit):
7937 """Replace the disks of an instance.
7940 HPATH = "mirrors-replace"
7941 HTYPE = constants.HTYPE_INSTANCE
7944 ("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES)),
7945 ("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt)),
7946 ("remote_node", None, ht.TMaybeString),
7947 ("iallocator", None, ht.TMaybeString),
7948 ("early_release", False, ht.TBool),
7952 def CheckArguments(self):
7953 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7956 def ExpandNames(self):
7957 self._ExpandAndLockInstance()
7959 if self.op.iallocator is not None:
7960 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7962 elif self.op.remote_node is not None:
7963 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7964 self.op.remote_node = remote_node
7966 # Warning: do not remove the locking of the new secondary here
7967 # unless DRBD8.AddChildren is changed to work in parallel;
7968 # currently it doesn't since parallel invocations of
7969 # FindUnusedMinor will conflict
7970 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7971 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7974 self.needed_locks[locking.LEVEL_NODE] = []
7975 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7977 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7978 self.op.iallocator, self.op.remote_node,
7979 self.op.disks, False, self.op.early_release)
7981 self.tasklets = [self.replacer]
7983 def DeclareLocks(self, level):
7984 # If we're not already locking all nodes in the set we have to declare the
7985 # instance's primary/secondary nodes.
7986 if (level == locking.LEVEL_NODE and
7987 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7988 self._LockInstancesNodes()
7990 def BuildHooksEnv(self):
7993 This runs on the master, the primary and all the secondaries.
7996 instance = self.replacer.instance
7998 "MODE": self.op.mode,
7999 "NEW_SECONDARY": self.op.remote_node,
8000 "OLD_SECONDARY": instance.secondary_nodes[0],
8002 env.update(_BuildInstanceHookEnvByObject(self, instance))
8004 self.cfg.GetMasterNode(),
8005 instance.primary_node,
8007 if self.op.remote_node is not None:
8008 nl.append(self.op.remote_node)
8012 class TLReplaceDisks(Tasklet):
8013 """Replaces disks for an instance.
8015 Note: Locking is not within the scope of this class.
8018 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8019 disks, delay_iallocator, early_release):
8020 """Initializes this class.
8023 Tasklet.__init__(self, lu)
8026 self.instance_name = instance_name
8028 self.iallocator_name = iallocator_name
8029 self.remote_node = remote_node
8031 self.delay_iallocator = delay_iallocator
8032 self.early_release = early_release
8035 self.instance = None
8036 self.new_node = None
8037 self.target_node = None
8038 self.other_node = None
8039 self.remote_node_info = None
8040 self.node_secondary_ip = None
8043 def CheckArguments(mode, remote_node, iallocator):
8044 """Helper function for users of this class.
8047 # check for valid parameter combination
8048 if mode == constants.REPLACE_DISK_CHG:
8049 if remote_node is None and iallocator is None:
8050 raise errors.OpPrereqError("When changing the secondary either an"
8051 " iallocator script must be used or the"
8052 " new node given", errors.ECODE_INVAL)
8054 if remote_node is not None and iallocator is not None:
8055 raise errors.OpPrereqError("Give either the iallocator or the new"
8056 " secondary, not both", errors.ECODE_INVAL)
8058 elif remote_node is not None or iallocator is not None:
8059 # Not replacing the secondary
8060 raise errors.OpPrereqError("The iallocator and new node options can"
8061 " only be used when changing the"
8062 " secondary node", errors.ECODE_INVAL)
8065 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8066 """Compute a new secondary node using an IAllocator.
8069 ial = IAllocator(lu.cfg, lu.rpc,
8070 mode=constants.IALLOCATOR_MODE_RELOC,
8072 relocate_from=relocate_from)
8074 ial.Run(iallocator_name)
8077 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8078 " %s" % (iallocator_name, ial.info),
8081 if len(ial.result) != ial.required_nodes:
8082 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8083 " of nodes (%s), required %s" %
8085 len(ial.result), ial.required_nodes),
8088 remote_node_name = ial.result[0]
8090 lu.LogInfo("Selected new secondary for instance '%s': %s",
8091 instance_name, remote_node_name)
8093 return remote_node_name
8095 def _FindFaultyDisks(self, node_name):
8096 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8099 def CheckPrereq(self):
8100 """Check prerequisites.
8102 This checks that the instance is in the cluster.
8105 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8106 assert instance is not None, \
8107 "Cannot retrieve locked instance %s" % self.instance_name
8109 if instance.disk_template != constants.DT_DRBD8:
8110 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8111 " instances", errors.ECODE_INVAL)
8113 if len(instance.secondary_nodes) != 1:
8114 raise errors.OpPrereqError("The instance has a strange layout,"
8115 " expected one secondary but found %d" %
8116 len(instance.secondary_nodes),
8119 if not self.delay_iallocator:
8120 self._CheckPrereq2()
8122 def _CheckPrereq2(self):
8123 """Check prerequisites, second part.
8125 This function should always be part of CheckPrereq. It was separated and is
8126 now called from Exec because during node evacuation iallocator was only
8127 called with an unmodified cluster model, not taking planned changes into
8131 instance = self.instance
8132 secondary_node = instance.secondary_nodes[0]
8134 if self.iallocator_name is None:
8135 remote_node = self.remote_node
8137 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8138 instance.name, instance.secondary_nodes)
8140 if remote_node is not None:
8141 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8142 assert self.remote_node_info is not None, \
8143 "Cannot retrieve locked node %s" % remote_node
8145 self.remote_node_info = None
8147 if remote_node == self.instance.primary_node:
8148 raise errors.OpPrereqError("The specified node is the primary node of"
8149 " the instance.", errors.ECODE_INVAL)
8151 if remote_node == secondary_node:
8152 raise errors.OpPrereqError("The specified node is already the"
8153 " secondary node of the instance.",
8156 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8157 constants.REPLACE_DISK_CHG):
8158 raise errors.OpPrereqError("Cannot specify disks to be replaced",
8161 if self.mode == constants.REPLACE_DISK_AUTO:
8162 faulty_primary = self._FindFaultyDisks(instance.primary_node)
8163 faulty_secondary = self._FindFaultyDisks(secondary_node)
8165 if faulty_primary and faulty_secondary:
8166 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8167 " one node and can not be repaired"
8168 " automatically" % self.instance_name,
8172 self.disks = faulty_primary
8173 self.target_node = instance.primary_node
8174 self.other_node = secondary_node
8175 check_nodes = [self.target_node, self.other_node]
8176 elif faulty_secondary:
8177 self.disks = faulty_secondary
8178 self.target_node = secondary_node
8179 self.other_node = instance.primary_node
8180 check_nodes = [self.target_node, self.other_node]
8186 # Non-automatic modes
8187 if self.mode == constants.REPLACE_DISK_PRI:
8188 self.target_node = instance.primary_node
8189 self.other_node = secondary_node
8190 check_nodes = [self.target_node, self.other_node]
8192 elif self.mode == constants.REPLACE_DISK_SEC:
8193 self.target_node = secondary_node
8194 self.other_node = instance.primary_node
8195 check_nodes = [self.target_node, self.other_node]
8197 elif self.mode == constants.REPLACE_DISK_CHG:
8198 self.new_node = remote_node
8199 self.other_node = instance.primary_node
8200 self.target_node = secondary_node
8201 check_nodes = [self.new_node, self.other_node]
8203 _CheckNodeNotDrained(self.lu, remote_node)
8204 _CheckNodeVmCapable(self.lu, remote_node)
8206 old_node_info = self.cfg.GetNodeInfo(secondary_node)
8207 assert old_node_info is not None
8208 if old_node_info.offline and not self.early_release:
8209 # doesn't make sense to delay the release
8210 self.early_release = True
8211 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8212 " early-release mode", secondary_node)
8215 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8218 # If not specified all disks should be replaced
8220 self.disks = range(len(self.instance.disks))
8222 for node in check_nodes:
8223 _CheckNodeOnline(self.lu, node)
8225 # Check whether disks are valid
8226 for disk_idx in self.disks:
8227 instance.FindDisk(disk_idx)
8229 # Get secondary node IP addresses
8232 for node_name in [self.target_node, self.other_node, self.new_node]:
8233 if node_name is not None:
8234 node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8236 self.node_secondary_ip = node_2nd_ip
8238 def Exec(self, feedback_fn):
8239 """Execute disk replacement.
8241 This dispatches the disk replacement to the appropriate handler.
8244 if self.delay_iallocator:
8245 self._CheckPrereq2()
8248 feedback_fn("No disks need replacement")
8251 feedback_fn("Replacing disk(s) %s for %s" %
8252 (utils.CommaJoin(self.disks), self.instance.name))
8254 activate_disks = (not self.instance.admin_up)
8256 # Activate the instance disks if we're replacing them on a down instance
8258 _StartInstanceDisks(self.lu, self.instance, True)
8261 # Should we replace the secondary node?
8262 if self.new_node is not None:
8263 fn = self._ExecDrbd8Secondary
8265 fn = self._ExecDrbd8DiskOnly
8267 return fn(feedback_fn)
8270 # Deactivate the instance disks if we're replacing them on a
8273 _SafeShutdownInstanceDisks(self.lu, self.instance)
8275 def _CheckVolumeGroup(self, nodes):
8276 self.lu.LogInfo("Checking volume groups")
8278 vgname = self.cfg.GetVGName()
8280 # Make sure volume group exists on all involved nodes
8281 results = self.rpc.call_vg_list(nodes)
8283 raise errors.OpExecError("Can't list volume groups on the nodes")
8287 res.Raise("Error checking node %s" % node)
8288 if vgname not in res.payload:
8289 raise errors.OpExecError("Volume group '%s' not found on node %s" %
8292 def _CheckDisksExistence(self, nodes):
8293 # Check disk existence
8294 for idx, dev in enumerate(self.instance.disks):
8295 if idx not in self.disks:
8299 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8300 self.cfg.SetDiskID(dev, node)
8302 result = self.rpc.call_blockdev_find(node, dev)
8304 msg = result.fail_msg
8305 if msg or not result.payload:
8307 msg = "disk not found"
8308 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8311 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8312 for idx, dev in enumerate(self.instance.disks):
8313 if idx not in self.disks:
8316 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8319 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8321 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8322 " replace disks for instance %s" %
8323 (node_name, self.instance.name))
8325 def _CreateNewStorage(self, node_name):
8326 vgname = self.cfg.GetVGName()
8329 for idx, dev in enumerate(self.instance.disks):
8330 if idx not in self.disks:
8333 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8335 self.cfg.SetDiskID(dev, node_name)
8337 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8338 names = _GenerateUniqueNames(self.lu, lv_names)
8340 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8341 logical_id=(vgname, names[0]))
8342 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8343 logical_id=(vgname, names[1]))
8345 new_lvs = [lv_data, lv_meta]
8346 old_lvs = dev.children
8347 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8349 # we pass force_create=True to force the LVM creation
8350 for new_lv in new_lvs:
8351 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8352 _GetInstanceInfoText(self.instance), False)
8356 def _CheckDevices(self, node_name, iv_names):
8357 for name, (dev, _, _) in iv_names.iteritems():
8358 self.cfg.SetDiskID(dev, node_name)
8360 result = self.rpc.call_blockdev_find(node_name, dev)
8362 msg = result.fail_msg
8363 if msg or not result.payload:
8365 msg = "disk not found"
8366 raise errors.OpExecError("Can't find DRBD device %s: %s" %
8369 if result.payload.is_degraded:
8370 raise errors.OpExecError("DRBD device %s is degraded!" % name)
8372 def _RemoveOldStorage(self, node_name, iv_names):
8373 for name, (_, old_lvs, _) in iv_names.iteritems():
8374 self.lu.LogInfo("Remove logical volumes for %s" % name)
8377 self.cfg.SetDiskID(lv, node_name)
8379 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8381 self.lu.LogWarning("Can't remove old LV: %s" % msg,
8382 hint="remove unused LVs manually")
8384 def _ReleaseNodeLock(self, node_name):
8385 """Releases the lock for a given node."""
8386 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8388 def _ExecDrbd8DiskOnly(self, feedback_fn):
8389 """Replace a disk on the primary or secondary for DRBD 8.
8391 The algorithm for replace is quite complicated:
8393 1. for each disk to be replaced:
8395 1. create new LVs on the target node with unique names
8396 1. detach old LVs from the drbd device
8397 1. rename old LVs to name_replaced.<time_t>
8398 1. rename new LVs to old LVs
8399 1. attach the new LVs (with the old names now) to the drbd device
8401 1. wait for sync across all devices
8403 1. for each modified disk:
8405 1. remove old LVs (which have the name name_replaces.<time_t>)
8407 Failures are not very well handled.
8412 # Step: check device activation
8413 self.lu.LogStep(1, steps_total, "Check device existence")
8414 self._CheckDisksExistence([self.other_node, self.target_node])
8415 self._CheckVolumeGroup([self.target_node, self.other_node])
8417 # Step: check other node consistency
8418 self.lu.LogStep(2, steps_total, "Check peer consistency")
8419 self._CheckDisksConsistency(self.other_node,
8420 self.other_node == self.instance.primary_node,
8423 # Step: create new storage
8424 self.lu.LogStep(3, steps_total, "Allocate new storage")
8425 iv_names = self._CreateNewStorage(self.target_node)
8427 # Step: for each lv, detach+rename*2+attach
8428 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8429 for dev, old_lvs, new_lvs in iv_names.itervalues():
8430 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8432 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8434 result.Raise("Can't detach drbd from local storage on node"
8435 " %s for device %s" % (self.target_node, dev.iv_name))
8437 #cfg.Update(instance)
8439 # ok, we created the new LVs, so now we know we have the needed
8440 # storage; as such, we proceed on the target node to rename
8441 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8442 # using the assumption that logical_id == physical_id (which in
8443 # turn is the unique_id on that node)
8445 # FIXME(iustin): use a better name for the replaced LVs
8446 temp_suffix = int(time.time())
8447 ren_fn = lambda d, suff: (d.physical_id[0],
8448 d.physical_id[1] + "_replaced-%s" % suff)
8450 # Build the rename list based on what LVs exist on the node
8451 rename_old_to_new = []
8452 for to_ren in old_lvs:
8453 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8454 if not result.fail_msg and result.payload:
8456 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8458 self.lu.LogInfo("Renaming the old LVs on the target node")
8459 result = self.rpc.call_blockdev_rename(self.target_node,
8461 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8463 # Now we rename the new LVs to the old LVs
8464 self.lu.LogInfo("Renaming the new LVs on the target node")
8465 rename_new_to_old = [(new, old.physical_id)
8466 for old, new in zip(old_lvs, new_lvs)]
8467 result = self.rpc.call_blockdev_rename(self.target_node,
8469 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8471 for old, new in zip(old_lvs, new_lvs):
8472 new.logical_id = old.logical_id
8473 self.cfg.SetDiskID(new, self.target_node)
8475 for disk in old_lvs:
8476 disk.logical_id = ren_fn(disk, temp_suffix)
8477 self.cfg.SetDiskID(disk, self.target_node)
8479 # Now that the new lvs have the old name, we can add them to the device
8480 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8481 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8483 msg = result.fail_msg
8485 for new_lv in new_lvs:
8486 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8489 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8490 hint=("cleanup manually the unused logical"
8492 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8494 dev.children = new_lvs
8496 self.cfg.Update(self.instance, feedback_fn)
8499 if self.early_release:
8500 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8502 self._RemoveOldStorage(self.target_node, iv_names)
8503 # WARNING: we release both node locks here, do not do other RPCs
8504 # than WaitForSync to the primary node
8505 self._ReleaseNodeLock([self.target_node, self.other_node])
8508 # This can fail as the old devices are degraded and _WaitForSync
8509 # does a combined result over all disks, so we don't check its return value
8510 self.lu.LogStep(cstep, steps_total, "Sync devices")
8512 _WaitForSync(self.lu, self.instance)
8514 # Check all devices manually
8515 self._CheckDevices(self.instance.primary_node, iv_names)
8517 # Step: remove old storage
8518 if not self.early_release:
8519 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8521 self._RemoveOldStorage(self.target_node, iv_names)
8523 def _ExecDrbd8Secondary(self, feedback_fn):
8524 """Replace the secondary node for DRBD 8.
8526 The algorithm for replace is quite complicated:
8527 - for all disks of the instance:
8528 - create new LVs on the new node with same names
8529 - shutdown the drbd device on the old secondary
8530 - disconnect the drbd network on the primary
8531 - create the drbd device on the new secondary
8532 - network attach the drbd on the primary, using an artifice:
8533 the drbd code for Attach() will connect to the network if it
8534 finds a device which is connected to the good local disks but
8536 - wait for sync across all devices
8537 - remove all disks from the old secondary
8539 Failures are not very well handled.
8544 # Step: check device activation
8545 self.lu.LogStep(1, steps_total, "Check device existence")
8546 self._CheckDisksExistence([self.instance.primary_node])
8547 self._CheckVolumeGroup([self.instance.primary_node])
8549 # Step: check other node consistency
8550 self.lu.LogStep(2, steps_total, "Check peer consistency")
8551 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8553 # Step: create new storage
8554 self.lu.LogStep(3, steps_total, "Allocate new storage")
8555 for idx, dev in enumerate(self.instance.disks):
8556 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8557 (self.new_node, idx))
8558 # we pass force_create=True to force LVM creation
8559 for new_lv in dev.children:
8560 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8561 _GetInstanceInfoText(self.instance), False)
8563 # Step 4: dbrd minors and drbd setups changes
8564 # after this, we must manually remove the drbd minors on both the
8565 # error and the success paths
8566 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8567 minors = self.cfg.AllocateDRBDMinor([self.new_node
8568 for dev in self.instance.disks],
8570 logging.debug("Allocated minors %r", minors)
8573 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8574 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8575 (self.new_node, idx))
8576 # create new devices on new_node; note that we create two IDs:
8577 # one without port, so the drbd will be activated without
8578 # networking information on the new node at this stage, and one
8579 # with network, for the latter activation in step 4
8580 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8581 if self.instance.primary_node == o_node1:
8584 assert self.instance.primary_node == o_node2, "Three-node instance?"
8587 new_alone_id = (self.instance.primary_node, self.new_node, None,
8588 p_minor, new_minor, o_secret)
8589 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8590 p_minor, new_minor, o_secret)
8592 iv_names[idx] = (dev, dev.children, new_net_id)
8593 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8595 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8596 logical_id=new_alone_id,
8597 children=dev.children,
8600 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8601 _GetInstanceInfoText(self.instance), False)
8602 except errors.GenericError:
8603 self.cfg.ReleaseDRBDMinors(self.instance.name)
8606 # We have new devices, shutdown the drbd on the old secondary
8607 for idx, dev in enumerate(self.instance.disks):
8608 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8609 self.cfg.SetDiskID(dev, self.target_node)
8610 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8612 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8613 "node: %s" % (idx, msg),
8614 hint=("Please cleanup this device manually as"
8615 " soon as possible"))
8617 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8618 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8619 self.node_secondary_ip,
8620 self.instance.disks)\
8621 [self.instance.primary_node]
8623 msg = result.fail_msg
8625 # detaches didn't succeed (unlikely)
8626 self.cfg.ReleaseDRBDMinors(self.instance.name)
8627 raise errors.OpExecError("Can't detach the disks from the network on"
8628 " old node: %s" % (msg,))
8630 # if we managed to detach at least one, we update all the disks of
8631 # the instance to point to the new secondary
8632 self.lu.LogInfo("Updating instance configuration")
8633 for dev, _, new_logical_id in iv_names.itervalues():
8634 dev.logical_id = new_logical_id
8635 self.cfg.SetDiskID(dev, self.instance.primary_node)
8637 self.cfg.Update(self.instance, feedback_fn)
8639 # and now perform the drbd attach
8640 self.lu.LogInfo("Attaching primary drbds to new secondary"
8641 " (standalone => connected)")
8642 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8644 self.node_secondary_ip,
8645 self.instance.disks,
8648 for to_node, to_result in result.items():
8649 msg = to_result.fail_msg
8651 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8653 hint=("please do a gnt-instance info to see the"
8654 " status of disks"))
8656 if self.early_release:
8657 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8659 self._RemoveOldStorage(self.target_node, iv_names)
8660 # WARNING: we release all node locks here, do not do other RPCs
8661 # than WaitForSync to the primary node
8662 self._ReleaseNodeLock([self.instance.primary_node,
8667 # This can fail as the old devices are degraded and _WaitForSync
8668 # does a combined result over all disks, so we don't check its return value
8669 self.lu.LogStep(cstep, steps_total, "Sync devices")
8671 _WaitForSync(self.lu, self.instance)
8673 # Check all devices manually
8674 self._CheckDevices(self.instance.primary_node, iv_names)
8676 # Step: remove old storage
8677 if not self.early_release:
8678 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8679 self._RemoveOldStorage(self.target_node, iv_names)
8682 class LURepairNodeStorage(NoHooksLU):
8683 """Repairs the volume group on a node.
8688 ("storage_type", ht.NoDefault, _CheckStorageType),
8689 ("name", ht.NoDefault, ht.TNonEmptyString),
8690 ("ignore_consistency", False, ht.TBool),
8694 def CheckArguments(self):
8695 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8697 storage_type = self.op.storage_type
8699 if (constants.SO_FIX_CONSISTENCY not in
8700 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8701 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8702 " repaired" % storage_type,
8705 def ExpandNames(self):
8706 self.needed_locks = {
8707 locking.LEVEL_NODE: [self.op.node_name],
8710 def _CheckFaultyDisks(self, instance, node_name):
8711 """Ensure faulty disks abort the opcode or at least warn."""
8713 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8715 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8716 " node '%s'" % (instance.name, node_name),
8718 except errors.OpPrereqError, err:
8719 if self.op.ignore_consistency:
8720 self.proc.LogWarning(str(err.args[0]))
8724 def CheckPrereq(self):
8725 """Check prerequisites.
8728 # Check whether any instance on this node has faulty disks
8729 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8730 if not inst.admin_up:
8732 check_nodes = set(inst.all_nodes)
8733 check_nodes.discard(self.op.node_name)
8734 for inst_node_name in check_nodes:
8735 self._CheckFaultyDisks(inst, inst_node_name)
8737 def Exec(self, feedback_fn):
8738 feedback_fn("Repairing storage unit '%s' on %s ..." %
8739 (self.op.name, self.op.node_name))
8741 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8742 result = self.rpc.call_storage_execute(self.op.node_name,
8743 self.op.storage_type, st_args,
8745 constants.SO_FIX_CONSISTENCY)
8746 result.Raise("Failed to repair storage unit '%s' on %s" %
8747 (self.op.name, self.op.node_name))
8750 class LUNodeEvacuationStrategy(NoHooksLU):
8751 """Computes the node evacuation strategy.
8755 ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
8756 ("remote_node", None, ht.TMaybeString),
8757 ("iallocator", None, ht.TMaybeString),
8761 def CheckArguments(self):
8762 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8764 def ExpandNames(self):
8765 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8766 self.needed_locks = locks = {}
8767 if self.op.remote_node is None:
8768 locks[locking.LEVEL_NODE] = locking.ALL_SET
8770 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8771 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8773 def Exec(self, feedback_fn):
8774 if self.op.remote_node is not None:
8776 for node in self.op.nodes:
8777 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8780 if i.primary_node == self.op.remote_node:
8781 raise errors.OpPrereqError("Node %s is the primary node of"
8782 " instance %s, cannot use it as"
8784 (self.op.remote_node, i.name),
8786 result.append([i.name, self.op.remote_node])
8788 ial = IAllocator(self.cfg, self.rpc,
8789 mode=constants.IALLOCATOR_MODE_MEVAC,
8790 evac_nodes=self.op.nodes)
8791 ial.Run(self.op.iallocator, validate=True)
8793 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8799 class LUGrowDisk(LogicalUnit):
8800 """Grow a disk of an instance.
8804 HTYPE = constants.HTYPE_INSTANCE
8807 ("disk", ht.NoDefault, ht.TInt),
8808 ("amount", ht.NoDefault, ht.TInt),
8809 ("wait_for_sync", True, ht.TBool),
8813 def ExpandNames(self):
8814 self._ExpandAndLockInstance()
8815 self.needed_locks[locking.LEVEL_NODE] = []
8816 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8818 def DeclareLocks(self, level):
8819 if level == locking.LEVEL_NODE:
8820 self._LockInstancesNodes()
8822 def BuildHooksEnv(self):
8825 This runs on the master, the primary and all the secondaries.
8829 "DISK": self.op.disk,
8830 "AMOUNT": self.op.amount,
8832 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8833 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8836 def CheckPrereq(self):
8837 """Check prerequisites.
8839 This checks that the instance is in the cluster.
8842 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8843 assert instance is not None, \
8844 "Cannot retrieve locked instance %s" % self.op.instance_name
8845 nodenames = list(instance.all_nodes)
8846 for node in nodenames:
8847 _CheckNodeOnline(self, node)
8849 self.instance = instance
8851 if instance.disk_template not in constants.DTS_GROWABLE:
8852 raise errors.OpPrereqError("Instance's disk layout does not support"
8853 " growing.", errors.ECODE_INVAL)
8855 self.disk = instance.FindDisk(self.op.disk)
8857 if instance.disk_template != constants.DT_FILE:
8858 # TODO: check the free disk space for file, when that feature will be
8860 _CheckNodesFreeDisk(self, nodenames, self.op.amount)
8862 def Exec(self, feedback_fn):
8863 """Execute disk grow.
8866 instance = self.instance
8869 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8871 raise errors.OpExecError("Cannot activate block device to grow")
8873 for node in instance.all_nodes:
8874 self.cfg.SetDiskID(disk, node)
8875 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8876 result.Raise("Grow request failed to node %s" % node)
8878 # TODO: Rewrite code to work properly
8879 # DRBD goes into sync mode for a short amount of time after executing the
8880 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8881 # calling "resize" in sync mode fails. Sleeping for a short amount of
8882 # time is a work-around.
8885 disk.RecordGrow(self.op.amount)
8886 self.cfg.Update(instance, feedback_fn)
8887 if self.op.wait_for_sync:
8888 disk_abort = not _WaitForSync(self, instance, disks=[disk])
8890 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8891 " status.\nPlease check the instance.")
8892 if not instance.admin_up:
8893 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8894 elif not instance.admin_up:
8895 self.proc.LogWarning("Not shutting down the disk even if the instance is"
8896 " not supposed to be running because no wait for"
8897 " sync mode was requested.")
8900 class LUQueryInstanceData(NoHooksLU):
8901 """Query runtime instance data.
8905 ("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
8906 ("static", False, ht.TBool),
8910 def ExpandNames(self):
8911 self.needed_locks = {}
8912 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8914 if self.op.instances:
8915 self.wanted_names = []
8916 for name in self.op.instances:
8917 full_name = _ExpandInstanceName(self.cfg, name)
8918 self.wanted_names.append(full_name)
8919 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8921 self.wanted_names = None
8922 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8924 self.needed_locks[locking.LEVEL_NODE] = []
8925 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8927 def DeclareLocks(self, level):
8928 if level == locking.LEVEL_NODE:
8929 self._LockInstancesNodes()
8931 def CheckPrereq(self):
8932 """Check prerequisites.
8934 This only checks the optional instance list against the existing names.
8937 if self.wanted_names is None:
8938 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8940 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8941 in self.wanted_names]
8943 def _ComputeBlockdevStatus(self, node, instance_name, dev):
8944 """Returns the status of a block device
8947 if self.op.static or not node:
8950 self.cfg.SetDiskID(dev, node)
8952 result = self.rpc.call_blockdev_find(node, dev)
8956 result.Raise("Can't compute disk status for %s" % instance_name)
8958 status = result.payload
8962 return (status.dev_path, status.major, status.minor,
8963 status.sync_percent, status.estimated_time,
8964 status.is_degraded, status.ldisk_status)
8966 def _ComputeDiskStatus(self, instance, snode, dev):
8967 """Compute block device status.
8970 if dev.dev_type in constants.LDS_DRBD:
8971 # we change the snode then (otherwise we use the one passed in)
8972 if dev.logical_id[0] == instance.primary_node:
8973 snode = dev.logical_id[1]
8975 snode = dev.logical_id[0]
8977 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8979 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8982 dev_children = [self._ComputeDiskStatus(instance, snode, child)
8983 for child in dev.children]
8988 "iv_name": dev.iv_name,
8989 "dev_type": dev.dev_type,
8990 "logical_id": dev.logical_id,
8991 "physical_id": dev.physical_id,
8992 "pstatus": dev_pstatus,
8993 "sstatus": dev_sstatus,
8994 "children": dev_children,
9001 def Exec(self, feedback_fn):
9002 """Gather and return data"""
9005 cluster = self.cfg.GetClusterInfo()
9007 for instance in self.wanted_instances:
9008 if not self.op.static:
9009 remote_info = self.rpc.call_instance_info(instance.primary_node,
9011 instance.hypervisor)
9012 remote_info.Raise("Error checking node %s" % instance.primary_node)
9013 remote_info = remote_info.payload
9014 if remote_info and "state" in remote_info:
9017 remote_state = "down"
9020 if instance.admin_up:
9023 config_state = "down"
9025 disks = [self._ComputeDiskStatus(instance, None, device)
9026 for device in instance.disks]
9029 "name": instance.name,
9030 "config_state": config_state,
9031 "run_state": remote_state,
9032 "pnode": instance.primary_node,
9033 "snodes": instance.secondary_nodes,
9035 # this happens to be the same format used for hooks
9036 "nics": _NICListToTuple(self, instance.nics),
9037 "disk_template": instance.disk_template,
9039 "hypervisor": instance.hypervisor,
9040 "network_port": instance.network_port,
9041 "hv_instance": instance.hvparams,
9042 "hv_actual": cluster.FillHV(instance, skip_globals=True),
9043 "be_instance": instance.beparams,
9044 "be_actual": cluster.FillBE(instance),
9045 "os_instance": instance.osparams,
9046 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9047 "serial_no": instance.serial_no,
9048 "mtime": instance.mtime,
9049 "ctime": instance.ctime,
9050 "uuid": instance.uuid,
9053 result[instance.name] = idict
9058 class LUSetInstanceParams(LogicalUnit):
9059 """Modifies an instances's parameters.
9062 HPATH = "instance-modify"
9063 HTYPE = constants.HTYPE_INSTANCE
9066 ("nics", ht.EmptyList, ht.TList),
9067 ("disks", ht.EmptyList, ht.TList),
9068 ("beparams", ht.EmptyDict, ht.TDict),
9069 ("hvparams", ht.EmptyDict, ht.TDict),
9070 ("disk_template", None, ht.TMaybeString),
9071 ("remote_node", None, ht.TMaybeString),
9072 ("os_name", None, ht.TMaybeString),
9073 ("force_variant", False, ht.TBool),
9074 ("osparams", None, ht.TOr(ht.TDict, ht.TNone)),
9079 def CheckArguments(self):
9080 if not (self.op.nics or self.op.disks or self.op.disk_template or
9081 self.op.hvparams or self.op.beparams or self.op.os_name):
9082 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9084 if self.op.hvparams:
9085 _CheckGlobalHvParams(self.op.hvparams)
9089 for disk_op, disk_dict in self.op.disks:
9090 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9091 if disk_op == constants.DDM_REMOVE:
9094 elif disk_op == constants.DDM_ADD:
9097 if not isinstance(disk_op, int):
9098 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9099 if not isinstance(disk_dict, dict):
9100 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9101 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9103 if disk_op == constants.DDM_ADD:
9104 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9105 if mode not in constants.DISK_ACCESS_SET:
9106 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9108 size = disk_dict.get('size', None)
9110 raise errors.OpPrereqError("Required disk parameter size missing",
9114 except (TypeError, ValueError), err:
9115 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9116 str(err), errors.ECODE_INVAL)
9117 disk_dict['size'] = size
9119 # modification of disk
9120 if 'size' in disk_dict:
9121 raise errors.OpPrereqError("Disk size change not possible, use"
9122 " grow-disk", errors.ECODE_INVAL)
9124 if disk_addremove > 1:
9125 raise errors.OpPrereqError("Only one disk add or remove operation"
9126 " supported at a time", errors.ECODE_INVAL)
9128 if self.op.disks and self.op.disk_template is not None:
9129 raise errors.OpPrereqError("Disk template conversion and other disk"
9130 " changes not supported at the same time",
9133 if self.op.disk_template:
9134 _CheckDiskTemplate(self.op.disk_template)
9135 if (self.op.disk_template in constants.DTS_NET_MIRROR and
9136 self.op.remote_node is None):
9137 raise errors.OpPrereqError("Changing the disk template to a mirrored"
9138 " one requires specifying a secondary node",
9143 for nic_op, nic_dict in self.op.nics:
9144 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9145 if nic_op == constants.DDM_REMOVE:
9148 elif nic_op == constants.DDM_ADD:
9151 if not isinstance(nic_op, int):
9152 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9153 if not isinstance(nic_dict, dict):
9154 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9155 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9157 # nic_dict should be a dict
9158 nic_ip = nic_dict.get('ip', None)
9159 if nic_ip is not None:
9160 if nic_ip.lower() == constants.VALUE_NONE:
9161 nic_dict['ip'] = None
9163 if not netutils.IPAddress.IsValid(nic_ip):
9164 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9167 nic_bridge = nic_dict.get('bridge', None)
9168 nic_link = nic_dict.get('link', None)
9169 if nic_bridge and nic_link:
9170 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9171 " at the same time", errors.ECODE_INVAL)
9172 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9173 nic_dict['bridge'] = None
9174 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9175 nic_dict['link'] = None
9177 if nic_op == constants.DDM_ADD:
9178 nic_mac = nic_dict.get('mac', None)
9180 nic_dict['mac'] = constants.VALUE_AUTO
9182 if 'mac' in nic_dict:
9183 nic_mac = nic_dict['mac']
9184 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9185 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9187 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9188 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9189 " modifying an existing nic",
9192 if nic_addremove > 1:
9193 raise errors.OpPrereqError("Only one NIC add or remove operation"
9194 " supported at a time", errors.ECODE_INVAL)
9196 def ExpandNames(self):
9197 self._ExpandAndLockInstance()
9198 self.needed_locks[locking.LEVEL_NODE] = []
9199 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9201 def DeclareLocks(self, level):
9202 if level == locking.LEVEL_NODE:
9203 self._LockInstancesNodes()
9204 if self.op.disk_template and self.op.remote_node:
9205 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9206 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9208 def BuildHooksEnv(self):
9211 This runs on the master, primary and secondaries.
9215 if constants.BE_MEMORY in self.be_new:
9216 args['memory'] = self.be_new[constants.BE_MEMORY]
9217 if constants.BE_VCPUS in self.be_new:
9218 args['vcpus'] = self.be_new[constants.BE_VCPUS]
9219 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9220 # information at all.
9223 nic_override = dict(self.op.nics)
9224 for idx, nic in enumerate(self.instance.nics):
9225 if idx in nic_override:
9226 this_nic_override = nic_override[idx]
9228 this_nic_override = {}
9229 if 'ip' in this_nic_override:
9230 ip = this_nic_override['ip']
9233 if 'mac' in this_nic_override:
9234 mac = this_nic_override['mac']
9237 if idx in self.nic_pnew:
9238 nicparams = self.nic_pnew[idx]
9240 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9241 mode = nicparams[constants.NIC_MODE]
9242 link = nicparams[constants.NIC_LINK]
9243 args['nics'].append((ip, mac, mode, link))
9244 if constants.DDM_ADD in nic_override:
9245 ip = nic_override[constants.DDM_ADD].get('ip', None)
9246 mac = nic_override[constants.DDM_ADD]['mac']
9247 nicparams = self.nic_pnew[constants.DDM_ADD]
9248 mode = nicparams[constants.NIC_MODE]
9249 link = nicparams[constants.NIC_LINK]
9250 args['nics'].append((ip, mac, mode, link))
9251 elif constants.DDM_REMOVE in nic_override:
9252 del args['nics'][-1]
9254 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9255 if self.op.disk_template:
9256 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9257 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9260 def CheckPrereq(self):
9261 """Check prerequisites.
9263 This only checks the instance list against the existing names.
9266 # checking the new params on the primary/secondary nodes
9268 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9269 cluster = self.cluster = self.cfg.GetClusterInfo()
9270 assert self.instance is not None, \
9271 "Cannot retrieve locked instance %s" % self.op.instance_name
9272 pnode = instance.primary_node
9273 nodelist = list(instance.all_nodes)
9276 if self.op.os_name and not self.op.force:
9277 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9278 self.op.force_variant)
9279 instance_os = self.op.os_name
9281 instance_os = instance.os
9283 if self.op.disk_template:
9284 if instance.disk_template == self.op.disk_template:
9285 raise errors.OpPrereqError("Instance already has disk template %s" %
9286 instance.disk_template, errors.ECODE_INVAL)
9288 if (instance.disk_template,
9289 self.op.disk_template) not in self._DISK_CONVERSIONS:
9290 raise errors.OpPrereqError("Unsupported disk template conversion from"
9291 " %s to %s" % (instance.disk_template,
9292 self.op.disk_template),
9294 _CheckInstanceDown(self, instance, "cannot change disk template")
9295 if self.op.disk_template in constants.DTS_NET_MIRROR:
9296 if self.op.remote_node == pnode:
9297 raise errors.OpPrereqError("Given new secondary node %s is the same"
9298 " as the primary node of the instance" %
9299 self.op.remote_node, errors.ECODE_STATE)
9300 _CheckNodeOnline(self, self.op.remote_node)
9301 _CheckNodeNotDrained(self, self.op.remote_node)
9302 disks = [{"size": d.size} for d in instance.disks]
9303 required = _ComputeDiskSize(self.op.disk_template, disks)
9304 _CheckNodesFreeDisk(self, [self.op.remote_node], required)
9306 # hvparams processing
9307 if self.op.hvparams:
9308 hv_type = instance.hypervisor
9309 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9310 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9311 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9314 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9315 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9316 self.hv_new = hv_new # the new actual values
9317 self.hv_inst = i_hvdict # the new dict (without defaults)
9319 self.hv_new = self.hv_inst = {}
9321 # beparams processing
9322 if self.op.beparams:
9323 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9325 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9326 be_new = cluster.SimpleFillBE(i_bedict)
9327 self.be_new = be_new # the new actual values
9328 self.be_inst = i_bedict # the new dict (without defaults)
9330 self.be_new = self.be_inst = {}
9332 # osparams processing
9333 if self.op.osparams:
9334 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9335 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9336 self.os_inst = i_osdict # the new dict (without defaults)
9342 if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9343 mem_check_list = [pnode]
9344 if be_new[constants.BE_AUTO_BALANCE]:
9345 # either we changed auto_balance to yes or it was from before
9346 mem_check_list.extend(instance.secondary_nodes)
9347 instance_info = self.rpc.call_instance_info(pnode, instance.name,
9348 instance.hypervisor)
9349 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
9350 instance.hypervisor)
9351 pninfo = nodeinfo[pnode]
9352 msg = pninfo.fail_msg
9354 # Assume the primary node is unreachable and go ahead
9355 self.warn.append("Can't get info from primary node %s: %s" %
9357 elif not isinstance(pninfo.payload.get('memory_free', None), int):
9358 self.warn.append("Node data from primary node %s doesn't contain"
9359 " free memory information" % pnode)
9360 elif instance_info.fail_msg:
9361 self.warn.append("Can't get instance runtime information: %s" %
9362 instance_info.fail_msg)
9364 if instance_info.payload:
9365 current_mem = int(instance_info.payload['memory'])
9367 # Assume instance not running
9368 # (there is a slight race condition here, but it's not very probable,
9369 # and we have no other way to check)
9371 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9372 pninfo.payload['memory_free'])
9374 raise errors.OpPrereqError("This change will prevent the instance"
9375 " from starting, due to %d MB of memory"
9376 " missing on its primary node" % miss_mem,
9379 if be_new[constants.BE_AUTO_BALANCE]:
9380 for node, nres in nodeinfo.items():
9381 if node not in instance.secondary_nodes:
9385 self.warn.append("Can't get info from secondary node %s: %s" %
9387 elif not isinstance(nres.payload.get('memory_free', None), int):
9388 self.warn.append("Secondary node %s didn't return free"
9389 " memory information" % node)
9390 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9391 self.warn.append("Not enough memory to failover instance to"
9392 " secondary node %s" % node)
9397 for nic_op, nic_dict in self.op.nics:
9398 if nic_op == constants.DDM_REMOVE:
9399 if not instance.nics:
9400 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9403 if nic_op != constants.DDM_ADD:
9405 if not instance.nics:
9406 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9407 " no NICs" % nic_op,
9409 if nic_op < 0 or nic_op >= len(instance.nics):
9410 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9412 (nic_op, len(instance.nics) - 1),
9414 old_nic_params = instance.nics[nic_op].nicparams
9415 old_nic_ip = instance.nics[nic_op].ip
9420 update_params_dict = dict([(key, nic_dict[key])
9421 for key in constants.NICS_PARAMETERS
9422 if key in nic_dict])
9424 if 'bridge' in nic_dict:
9425 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9427 new_nic_params = _GetUpdatedParams(old_nic_params,
9429 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9430 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9431 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9432 self.nic_pinst[nic_op] = new_nic_params
9433 self.nic_pnew[nic_op] = new_filled_nic_params
9434 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9436 if new_nic_mode == constants.NIC_MODE_BRIDGED:
9437 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9438 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9440 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9442 self.warn.append(msg)
9444 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9445 if new_nic_mode == constants.NIC_MODE_ROUTED:
9446 if 'ip' in nic_dict:
9447 nic_ip = nic_dict['ip']
9451 raise errors.OpPrereqError('Cannot set the nic ip to None'
9452 ' on a routed nic', errors.ECODE_INVAL)
9453 if 'mac' in nic_dict:
9454 nic_mac = nic_dict['mac']
9456 raise errors.OpPrereqError('Cannot set the nic mac to None',
9458 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9459 # otherwise generate the mac
9460 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9462 # or validate/reserve the current one
9464 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9465 except errors.ReservationError:
9466 raise errors.OpPrereqError("MAC address %s already in use"
9467 " in cluster" % nic_mac,
9468 errors.ECODE_NOTUNIQUE)
9471 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9472 raise errors.OpPrereqError("Disk operations not supported for"
9473 " diskless instances",
9475 for disk_op, _ in self.op.disks:
9476 if disk_op == constants.DDM_REMOVE:
9477 if len(instance.disks) == 1:
9478 raise errors.OpPrereqError("Cannot remove the last disk of"
9479 " an instance", errors.ECODE_INVAL)
9480 _CheckInstanceDown(self, instance, "cannot remove disks")
9482 if (disk_op == constants.DDM_ADD and
9483 len(instance.nics) >= constants.MAX_DISKS):
9484 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9485 " add more" % constants.MAX_DISKS,
9487 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9489 if disk_op < 0 or disk_op >= len(instance.disks):
9490 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9492 (disk_op, len(instance.disks)),
9497 def _ConvertPlainToDrbd(self, feedback_fn):
9498 """Converts an instance from plain to drbd.
9501 feedback_fn("Converting template to drbd")
9502 instance = self.instance
9503 pnode = instance.primary_node
9504 snode = self.op.remote_node
9506 # create a fake disk info for _GenerateDiskTemplate
9507 disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9508 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9509 instance.name, pnode, [snode],
9510 disk_info, None, None, 0, feedback_fn)
9511 info = _GetInstanceInfoText(instance)
9512 feedback_fn("Creating aditional volumes...")
9513 # first, create the missing data and meta devices
9514 for disk in new_disks:
9515 # unfortunately this is... not too nice
9516 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9518 for child in disk.children:
9519 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9520 # at this stage, all new LVs have been created, we can rename the
9522 feedback_fn("Renaming original volumes...")
9523 rename_list = [(o, n.children[0].logical_id)
9524 for (o, n) in zip(instance.disks, new_disks)]
9525 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9526 result.Raise("Failed to rename original LVs")
9528 feedback_fn("Initializing DRBD devices...")
9529 # all child devices are in place, we can now create the DRBD devices
9530 for disk in new_disks:
9531 for node in [pnode, snode]:
9532 f_create = node == pnode
9533 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9535 # at this point, the instance has been modified
9536 instance.disk_template = constants.DT_DRBD8
9537 instance.disks = new_disks
9538 self.cfg.Update(instance, feedback_fn)
9540 # disks are created, waiting for sync
9541 disk_abort = not _WaitForSync(self, instance)
9543 raise errors.OpExecError("There are some degraded disks for"
9544 " this instance, please cleanup manually")
9546 def _ConvertDrbdToPlain(self, feedback_fn):
9547 """Converts an instance from drbd to plain.
9550 instance = self.instance
9551 assert len(instance.secondary_nodes) == 1
9552 pnode = instance.primary_node
9553 snode = instance.secondary_nodes[0]
9554 feedback_fn("Converting template to plain")
9556 old_disks = instance.disks
9557 new_disks = [d.children[0] for d in old_disks]
9559 # copy over size and mode
9560 for parent, child in zip(old_disks, new_disks):
9561 child.size = parent.size
9562 child.mode = parent.mode
9564 # update instance structure
9565 instance.disks = new_disks
9566 instance.disk_template = constants.DT_PLAIN
9567 self.cfg.Update(instance, feedback_fn)
9569 feedback_fn("Removing volumes on the secondary node...")
9570 for disk in old_disks:
9571 self.cfg.SetDiskID(disk, snode)
9572 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9574 self.LogWarning("Could not remove block device %s on node %s,"
9575 " continuing anyway: %s", disk.iv_name, snode, msg)
9577 feedback_fn("Removing unneeded volumes on the primary node...")
9578 for idx, disk in enumerate(old_disks):
9579 meta = disk.children[1]
9580 self.cfg.SetDiskID(meta, pnode)
9581 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9583 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9584 " continuing anyway: %s", idx, pnode, msg)
9587 def Exec(self, feedback_fn):
9588 """Modifies an instance.
9590 All parameters take effect only at the next restart of the instance.
9593 # Process here the warnings from CheckPrereq, as we don't have a
9594 # feedback_fn there.
9595 for warn in self.warn:
9596 feedback_fn("WARNING: %s" % warn)
9599 instance = self.instance
9601 for disk_op, disk_dict in self.op.disks:
9602 if disk_op == constants.DDM_REMOVE:
9603 # remove the last disk
9604 device = instance.disks.pop()
9605 device_idx = len(instance.disks)
9606 for node, disk in device.ComputeNodeTree(instance.primary_node):
9607 self.cfg.SetDiskID(disk, node)
9608 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9610 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9611 " continuing anyway", device_idx, node, msg)
9612 result.append(("disk/%d" % device_idx, "remove"))
9613 elif disk_op == constants.DDM_ADD:
9615 if instance.disk_template == constants.DT_FILE:
9616 file_driver, file_path = instance.disks[0].logical_id
9617 file_path = os.path.dirname(file_path)
9619 file_driver = file_path = None
9620 disk_idx_base = len(instance.disks)
9621 new_disk = _GenerateDiskTemplate(self,
9622 instance.disk_template,
9623 instance.name, instance.primary_node,
9624 instance.secondary_nodes,
9628 disk_idx_base, feedback_fn)[0]
9629 instance.disks.append(new_disk)
9630 info = _GetInstanceInfoText(instance)
9632 logging.info("Creating volume %s for instance %s",
9633 new_disk.iv_name, instance.name)
9634 # Note: this needs to be kept in sync with _CreateDisks
9636 for node in instance.all_nodes:
9637 f_create = node == instance.primary_node
9639 _CreateBlockDev(self, node, instance, new_disk,
9640 f_create, info, f_create)
9641 except errors.OpExecError, err:
9642 self.LogWarning("Failed to create volume %s (%s) on"
9644 new_disk.iv_name, new_disk, node, err)
9645 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9646 (new_disk.size, new_disk.mode)))
9648 # change a given disk
9649 instance.disks[disk_op].mode = disk_dict['mode']
9650 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9652 if self.op.disk_template:
9653 r_shut = _ShutdownInstanceDisks(self, instance)
9655 raise errors.OpExecError("Cannot shutdow instance disks, unable to"
9656 " proceed with disk template conversion")
9657 mode = (instance.disk_template, self.op.disk_template)
9659 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9661 self.cfg.ReleaseDRBDMinors(instance.name)
9663 result.append(("disk_template", self.op.disk_template))
9666 for nic_op, nic_dict in self.op.nics:
9667 if nic_op == constants.DDM_REMOVE:
9668 # remove the last nic
9669 del instance.nics[-1]
9670 result.append(("nic.%d" % len(instance.nics), "remove"))
9671 elif nic_op == constants.DDM_ADD:
9672 # mac and bridge should be set, by now
9673 mac = nic_dict['mac']
9674 ip = nic_dict.get('ip', None)
9675 nicparams = self.nic_pinst[constants.DDM_ADD]
9676 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9677 instance.nics.append(new_nic)
9678 result.append(("nic.%d" % (len(instance.nics) - 1),
9679 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9680 (new_nic.mac, new_nic.ip,
9681 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9682 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9685 for key in 'mac', 'ip':
9687 setattr(instance.nics[nic_op], key, nic_dict[key])
9688 if nic_op in self.nic_pinst:
9689 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9690 for key, val in nic_dict.iteritems():
9691 result.append(("nic.%s/%d" % (key, nic_op), val))
9694 if self.op.hvparams:
9695 instance.hvparams = self.hv_inst
9696 for key, val in self.op.hvparams.iteritems():
9697 result.append(("hv/%s" % key, val))
9700 if self.op.beparams:
9701 instance.beparams = self.be_inst
9702 for key, val in self.op.beparams.iteritems():
9703 result.append(("be/%s" % key, val))
9707 instance.os = self.op.os_name
9710 if self.op.osparams:
9711 instance.osparams = self.os_inst
9712 for key, val in self.op.osparams.iteritems():
9713 result.append(("os/%s" % key, val))
9715 self.cfg.Update(instance, feedback_fn)
9719 _DISK_CONVERSIONS = {
9720 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9721 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9725 class LUQueryExports(NoHooksLU):
9726 """Query the exports list
9730 ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
9731 ("use_locking", False, ht.TBool),
9735 def ExpandNames(self):
9736 self.needed_locks = {}
9737 self.share_locks[locking.LEVEL_NODE] = 1
9738 if not self.op.nodes:
9739 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9741 self.needed_locks[locking.LEVEL_NODE] = \
9742 _GetWantedNodes(self, self.op.nodes)
9744 def Exec(self, feedback_fn):
9745 """Compute the list of all the exported system images.
9748 @return: a dictionary with the structure node->(export-list)
9749 where export-list is a list of the instances exported on
9753 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9754 rpcresult = self.rpc.call_export_list(self.nodes)
9756 for node in rpcresult:
9757 if rpcresult[node].fail_msg:
9758 result[node] = False
9760 result[node] = rpcresult[node].payload
9765 class LUPrepareExport(NoHooksLU):
9766 """Prepares an instance for an export and returns useful information.
9771 ("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES)),
9775 def ExpandNames(self):
9776 self._ExpandAndLockInstance()
9778 def CheckPrereq(self):
9779 """Check prerequisites.
9782 instance_name = self.op.instance_name
9784 self.instance = self.cfg.GetInstanceInfo(instance_name)
9785 assert self.instance is not None, \
9786 "Cannot retrieve locked instance %s" % self.op.instance_name
9787 _CheckNodeOnline(self, self.instance.primary_node)
9789 self._cds = _GetClusterDomainSecret()
9791 def Exec(self, feedback_fn):
9792 """Prepares an instance for an export.
9795 instance = self.instance
9797 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9798 salt = utils.GenerateSecret(8)
9800 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9801 result = self.rpc.call_x509_cert_create(instance.primary_node,
9802 constants.RIE_CERT_VALIDITY)
9803 result.Raise("Can't create X509 key and certificate on %s" % result.node)
9805 (name, cert_pem) = result.payload
9807 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9811 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9812 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9814 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9820 class LUExportInstance(LogicalUnit):
9821 """Export an instance to an image in the cluster.
9824 HPATH = "instance-export"
9825 HTYPE = constants.HTYPE_INSTANCE
9828 ("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList)),
9829 ("shutdown", True, ht.TBool),
9831 ("remove_instance", False, ht.TBool),
9832 ("ignore_remove_failures", False, ht.TBool),
9833 ("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES)),
9834 ("x509_key_name", None, ht.TOr(ht.TList, ht.TNone)),
9835 ("destination_x509_ca", None, ht.TMaybeString),
9839 def CheckArguments(self):
9840 """Check the arguments.
9843 self.x509_key_name = self.op.x509_key_name
9844 self.dest_x509_ca_pem = self.op.destination_x509_ca
9846 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9847 if not self.x509_key_name:
9848 raise errors.OpPrereqError("Missing X509 key name for encryption",
9851 if not self.dest_x509_ca_pem:
9852 raise errors.OpPrereqError("Missing destination X509 CA",
9855 def ExpandNames(self):
9856 self._ExpandAndLockInstance()
9858 # Lock all nodes for local exports
9859 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9860 # FIXME: lock only instance primary and destination node
9862 # Sad but true, for now we have do lock all nodes, as we don't know where
9863 # the previous export might be, and in this LU we search for it and
9864 # remove it from its current node. In the future we could fix this by:
9865 # - making a tasklet to search (share-lock all), then create the
9866 # new one, then one to remove, after
9867 # - removing the removal operation altogether
9868 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9870 def DeclareLocks(self, level):
9871 """Last minute lock declaration."""
9872 # All nodes are locked anyway, so nothing to do here.
9874 def BuildHooksEnv(self):
9877 This will run on the master, primary node and target node.
9881 "EXPORT_MODE": self.op.mode,
9882 "EXPORT_NODE": self.op.target_node,
9883 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9884 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9885 # TODO: Generic function for boolean env variables
9886 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9889 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9891 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9893 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9894 nl.append(self.op.target_node)
9898 def CheckPrereq(self):
9899 """Check prerequisites.
9901 This checks that the instance and node names are valid.
9904 instance_name = self.op.instance_name
9906 self.instance = self.cfg.GetInstanceInfo(instance_name)
9907 assert self.instance is not None, \
9908 "Cannot retrieve locked instance %s" % self.op.instance_name
9909 _CheckNodeOnline(self, self.instance.primary_node)
9911 if (self.op.remove_instance and self.instance.admin_up and
9912 not self.op.shutdown):
9913 raise errors.OpPrereqError("Can not remove instance without shutting it"
9916 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9917 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9918 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9919 assert self.dst_node is not None
9921 _CheckNodeOnline(self, self.dst_node.name)
9922 _CheckNodeNotDrained(self, self.dst_node.name)
9925 self.dest_disk_info = None
9926 self.dest_x509_ca = None
9928 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9929 self.dst_node = None
9931 if len(self.op.target_node) != len(self.instance.disks):
9932 raise errors.OpPrereqError(("Received destination information for %s"
9933 " disks, but instance %s has %s disks") %
9934 (len(self.op.target_node), instance_name,
9935 len(self.instance.disks)),
9938 cds = _GetClusterDomainSecret()
9940 # Check X509 key name
9942 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9943 except (TypeError, ValueError), err:
9944 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9946 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9947 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9950 # Load and verify CA
9952 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9953 except OpenSSL.crypto.Error, err:
9954 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9955 (err, ), errors.ECODE_INVAL)
9957 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9958 if errcode is not None:
9959 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9960 (msg, ), errors.ECODE_INVAL)
9962 self.dest_x509_ca = cert
9964 # Verify target information
9966 for idx, disk_data in enumerate(self.op.target_node):
9968 (host, port, magic) = \
9969 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9970 except errors.GenericError, err:
9971 raise errors.OpPrereqError("Target info for disk %s: %s" %
9972 (idx, err), errors.ECODE_INVAL)
9974 disk_info.append((host, port, magic))
9976 assert len(disk_info) == len(self.op.target_node)
9977 self.dest_disk_info = disk_info
9980 raise errors.ProgrammerError("Unhandled export mode %r" %
9983 # instance disk type verification
9984 # TODO: Implement export support for file-based disks
9985 for disk in self.instance.disks:
9986 if disk.dev_type == constants.LD_FILE:
9987 raise errors.OpPrereqError("Export not supported for instances with"
9988 " file-based disks", errors.ECODE_INVAL)
9990 def _CleanupExports(self, feedback_fn):
9991 """Removes exports of current instance from all other nodes.
9993 If an instance in a cluster with nodes A..D was exported to node C, its
9994 exports will be removed from the nodes A, B and D.
9997 assert self.op.mode != constants.EXPORT_MODE_REMOTE
9999 nodelist = self.cfg.GetNodeList()
10000 nodelist.remove(self.dst_node.name)
10002 # on one-node clusters nodelist will be empty after the removal
10003 # if we proceed the backup would be removed because OpQueryExports
10004 # substitutes an empty list with the full cluster node list.
10005 iname = self.instance.name
10007 feedback_fn("Removing old exports for instance %s" % iname)
10008 exportlist = self.rpc.call_export_list(nodelist)
10009 for node in exportlist:
10010 if exportlist[node].fail_msg:
10012 if iname in exportlist[node].payload:
10013 msg = self.rpc.call_export_remove(node, iname).fail_msg
10015 self.LogWarning("Could not remove older export for instance %s"
10016 " on node %s: %s", iname, node, msg)
10018 def Exec(self, feedback_fn):
10019 """Export an instance to an image in the cluster.
10022 assert self.op.mode in constants.EXPORT_MODES
10024 instance = self.instance
10025 src_node = instance.primary_node
10027 if self.op.shutdown:
10028 # shutdown the instance, but not the disks
10029 feedback_fn("Shutting down instance %s" % instance.name)
10030 result = self.rpc.call_instance_shutdown(src_node, instance,
10031 self.op.shutdown_timeout)
10032 # TODO: Maybe ignore failures if ignore_remove_failures is set
10033 result.Raise("Could not shutdown instance %s on"
10034 " node %s" % (instance.name, src_node))
10036 # set the disks ID correctly since call_instance_start needs the
10037 # correct drbd minor to create the symlinks
10038 for disk in instance.disks:
10039 self.cfg.SetDiskID(disk, src_node)
10041 activate_disks = (not instance.admin_up)
10044 # Activate the instance disks if we'exporting a stopped instance
10045 feedback_fn("Activating disks for %s" % instance.name)
10046 _StartInstanceDisks(self, instance, None)
10049 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10052 helper.CreateSnapshots()
10054 if (self.op.shutdown and instance.admin_up and
10055 not self.op.remove_instance):
10056 assert not activate_disks
10057 feedback_fn("Starting instance %s" % instance.name)
10058 result = self.rpc.call_instance_start(src_node, instance, None, None)
10059 msg = result.fail_msg
10061 feedback_fn("Failed to start instance: %s" % msg)
10062 _ShutdownInstanceDisks(self, instance)
10063 raise errors.OpExecError("Could not start instance: %s" % msg)
10065 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10066 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10067 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10068 connect_timeout = constants.RIE_CONNECT_TIMEOUT
10069 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10071 (key_name, _, _) = self.x509_key_name
10074 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10077 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10078 key_name, dest_ca_pem,
10083 # Check for backwards compatibility
10084 assert len(dresults) == len(instance.disks)
10085 assert compat.all(isinstance(i, bool) for i in dresults), \
10086 "Not all results are boolean: %r" % dresults
10090 feedback_fn("Deactivating disks for %s" % instance.name)
10091 _ShutdownInstanceDisks(self, instance)
10093 if not (compat.all(dresults) and fin_resu):
10096 failures.append("export finalization")
10097 if not compat.all(dresults):
10098 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10100 failures.append("disk export: disk(s) %s" % fdsk)
10102 raise errors.OpExecError("Export failed, errors in %s" %
10103 utils.CommaJoin(failures))
10105 # At this point, the export was successful, we can cleanup/finish
10107 # Remove instance if requested
10108 if self.op.remove_instance:
10109 feedback_fn("Removing instance %s" % instance.name)
10110 _RemoveInstance(self, feedback_fn, instance,
10111 self.op.ignore_remove_failures)
10113 if self.op.mode == constants.EXPORT_MODE_LOCAL:
10114 self._CleanupExports(feedback_fn)
10116 return fin_resu, dresults
10119 class LURemoveExport(NoHooksLU):
10120 """Remove exports related to the named instance.
10128 def ExpandNames(self):
10129 self.needed_locks = {}
10130 # We need all nodes to be locked in order for RemoveExport to work, but we
10131 # don't need to lock the instance itself, as nothing will happen to it (and
10132 # we can remove exports also for a removed instance)
10133 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10135 def Exec(self, feedback_fn):
10136 """Remove any export.
10139 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10140 # If the instance was not found we'll try with the name that was passed in.
10141 # This will only work if it was an FQDN, though.
10143 if not instance_name:
10145 instance_name = self.op.instance_name
10147 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10148 exportlist = self.rpc.call_export_list(locked_nodes)
10150 for node in exportlist:
10151 msg = exportlist[node].fail_msg
10153 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10155 if instance_name in exportlist[node].payload:
10157 result = self.rpc.call_export_remove(node, instance_name)
10158 msg = result.fail_msg
10160 logging.error("Could not remove export for instance %s"
10161 " on node %s: %s", instance_name, node, msg)
10163 if fqdn_warn and not found:
10164 feedback_fn("Export not found. If trying to remove an export belonging"
10165 " to a deleted instance please use its Fully Qualified"
10169 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10170 """Generic tags LU.
10172 This is an abstract class which is the parent of all the other tags LUs.
10176 def ExpandNames(self):
10177 self.needed_locks = {}
10178 if self.op.kind == constants.TAG_NODE:
10179 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10180 self.needed_locks[locking.LEVEL_NODE] = self.op.name
10181 elif self.op.kind == constants.TAG_INSTANCE:
10182 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10183 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10185 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10186 # not possible to acquire the BGL based on opcode parameters)
10188 def CheckPrereq(self):
10189 """Check prerequisites.
10192 if self.op.kind == constants.TAG_CLUSTER:
10193 self.target = self.cfg.GetClusterInfo()
10194 elif self.op.kind == constants.TAG_NODE:
10195 self.target = self.cfg.GetNodeInfo(self.op.name)
10196 elif self.op.kind == constants.TAG_INSTANCE:
10197 self.target = self.cfg.GetInstanceInfo(self.op.name)
10199 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10200 str(self.op.kind), errors.ECODE_INVAL)
10203 class LUGetTags(TagsLU):
10204 """Returns the tags of a given object.
10208 ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES)),
10209 # Name is only meaningful for nodes and instances
10210 ("name", ht.NoDefault, ht.TMaybeString),
10214 def ExpandNames(self):
10215 TagsLU.ExpandNames(self)
10217 # Share locks as this is only a read operation
10218 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10220 def Exec(self, feedback_fn):
10221 """Returns the tag list.
10224 return list(self.target.GetTags())
10227 class LUSearchTags(NoHooksLU):
10228 """Searches the tags for a given pattern.
10232 ("pattern", ht.NoDefault, ht.TNonEmptyString),
10236 def ExpandNames(self):
10237 self.needed_locks = {}
10239 def CheckPrereq(self):
10240 """Check prerequisites.
10242 This checks the pattern passed for validity by compiling it.
10246 self.re = re.compile(self.op.pattern)
10247 except re.error, err:
10248 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10249 (self.op.pattern, err), errors.ECODE_INVAL)
10251 def Exec(self, feedback_fn):
10252 """Returns the tag list.
10256 tgts = [("/cluster", cfg.GetClusterInfo())]
10257 ilist = cfg.GetAllInstancesInfo().values()
10258 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10259 nlist = cfg.GetAllNodesInfo().values()
10260 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10262 for path, target in tgts:
10263 for tag in target.GetTags():
10264 if self.re.search(tag):
10265 results.append((path, tag))
10269 class LUAddTags(TagsLU):
10270 """Sets a tag on a given object.
10274 ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES)),
10275 # Name is only meaningful for nodes and instances
10276 ("name", ht.NoDefault, ht.TMaybeString),
10277 ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
10281 def CheckPrereq(self):
10282 """Check prerequisites.
10284 This checks the type and length of the tag name and value.
10287 TagsLU.CheckPrereq(self)
10288 for tag in self.op.tags:
10289 objects.TaggableObject.ValidateTag(tag)
10291 def Exec(self, feedback_fn):
10296 for tag in self.op.tags:
10297 self.target.AddTag(tag)
10298 except errors.TagError, err:
10299 raise errors.OpExecError("Error while setting tag: %s" % str(err))
10300 self.cfg.Update(self.target, feedback_fn)
10303 class LUDelTags(TagsLU):
10304 """Delete a list of tags from a given object.
10308 ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES)),
10309 # Name is only meaningful for nodes and instances
10310 ("name", ht.NoDefault, ht.TMaybeString),
10311 ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
10315 def CheckPrereq(self):
10316 """Check prerequisites.
10318 This checks that we have the given tag.
10321 TagsLU.CheckPrereq(self)
10322 for tag in self.op.tags:
10323 objects.TaggableObject.ValidateTag(tag)
10324 del_tags = frozenset(self.op.tags)
10325 cur_tags = self.target.GetTags()
10327 diff_tags = del_tags - cur_tags
10329 diff_names = ("'%s'" % i for i in sorted(diff_tags))
10330 raise errors.OpPrereqError("Tag(s) %s not found" %
10331 (utils.CommaJoin(diff_names), ),
10332 errors.ECODE_NOENT)
10334 def Exec(self, feedback_fn):
10335 """Remove the tag from the object.
10338 for tag in self.op.tags:
10339 self.target.RemoveTag(tag)
10340 self.cfg.Update(self.target, feedback_fn)
10343 class LUTestDelay(NoHooksLU):
10344 """Sleep for a specified amount of time.
10346 This LU sleeps on the master and/or nodes for a specified amount of
10351 ("duration", ht.NoDefault, ht.TFloat),
10352 ("on_master", True, ht.TBool),
10353 ("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
10354 ("repeat", 0, ht.TPositiveInt)
10358 def ExpandNames(self):
10359 """Expand names and set required locks.
10361 This expands the node list, if any.
10364 self.needed_locks = {}
10365 if self.op.on_nodes:
10366 # _GetWantedNodes can be used here, but is not always appropriate to use
10367 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10368 # more information.
10369 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10370 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10372 def _TestDelay(self):
10373 """Do the actual sleep.
10376 if self.op.on_master:
10377 if not utils.TestDelay(self.op.duration):
10378 raise errors.OpExecError("Error during master delay test")
10379 if self.op.on_nodes:
10380 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10381 for node, node_result in result.items():
10382 node_result.Raise("Failure during rpc call to node %s" % node)
10384 def Exec(self, feedback_fn):
10385 """Execute the test delay opcode, with the wanted repetitions.
10388 if self.op.repeat == 0:
10391 top_value = self.op.repeat - 1
10392 for i in range(self.op.repeat):
10393 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10397 class LUTestJobqueue(NoHooksLU):
10398 """Utility LU to test some aspects of the job queue.
10402 ("notify_waitlock", False, ht.TBool),
10403 ("notify_exec", False, ht.TBool),
10404 ("log_messages", ht.EmptyList, ht.TListOf(ht.TString)),
10405 ("fail", False, ht.TBool),
10409 # Must be lower than default timeout for WaitForJobChange to see whether it
10410 # notices changed jobs
10411 _CLIENT_CONNECT_TIMEOUT = 20.0
10412 _CLIENT_CONFIRM_TIMEOUT = 60.0
10415 def _NotifyUsingSocket(cls, cb, errcls):
10416 """Opens a Unix socket and waits for another program to connect.
10419 @param cb: Callback to send socket name to client
10420 @type errcls: class
10421 @param errcls: Exception class to use for errors
10424 # Using a temporary directory as there's no easy way to create temporary
10425 # sockets without writing a custom loop around tempfile.mktemp and
10427 tmpdir = tempfile.mkdtemp()
10429 tmpsock = utils.PathJoin(tmpdir, "sock")
10431 logging.debug("Creating temporary socket at %s", tmpsock)
10432 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10437 # Send details to client
10440 # Wait for client to connect before continuing
10441 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10443 (conn, _) = sock.accept()
10444 except socket.error, err:
10445 raise errcls("Client didn't connect in time (%s)" % err)
10449 # Remove as soon as client is connected
10450 shutil.rmtree(tmpdir)
10452 # Wait for client to close
10455 # pylint: disable-msg=E1101
10456 # Instance of '_socketobject' has no ... member
10457 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10459 except socket.error, err:
10460 raise errcls("Client failed to confirm notification (%s)" % err)
10464 def _SendNotification(self, test, arg, sockname):
10465 """Sends a notification to the client.
10468 @param test: Test name
10469 @param arg: Test argument (depends on test)
10470 @type sockname: string
10471 @param sockname: Socket path
10474 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10476 def _Notify(self, prereq, test, arg):
10477 """Notifies the client of a test.
10480 @param prereq: Whether this is a prereq-phase test
10482 @param test: Test name
10483 @param arg: Test argument (depends on test)
10487 errcls = errors.OpPrereqError
10489 errcls = errors.OpExecError
10491 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
10495 def CheckArguments(self):
10496 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
10497 self.expandnames_calls = 0
10499 def ExpandNames(self):
10500 checkargs_calls = getattr(self, "checkargs_calls", 0)
10501 if checkargs_calls < 1:
10502 raise errors.ProgrammerError("CheckArguments was not called")
10504 self.expandnames_calls += 1
10506 if self.op.notify_waitlock:
10507 self._Notify(True, constants.JQT_EXPANDNAMES, None)
10509 self.LogInfo("Expanding names")
10511 # Get lock on master node (just to get a lock, not for a particular reason)
10512 self.needed_locks = {
10513 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10516 def Exec(self, feedback_fn):
10517 if self.expandnames_calls < 1:
10518 raise errors.ProgrammerError("ExpandNames was not called")
10520 if self.op.notify_exec:
10521 self._Notify(False, constants.JQT_EXEC, None)
10523 self.LogInfo("Executing")
10525 if self.op.log_messages:
10526 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
10527 for idx, msg in enumerate(self.op.log_messages):
10528 self.LogInfo("Sending log message %s", idx + 1)
10529 feedback_fn(constants.JQT_MSGPREFIX + msg)
10530 # Report how many test messages have been sent
10531 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10534 raise errors.OpExecError("Opcode failure was requested")
10539 class IAllocator(object):
10540 """IAllocator framework.
10542 An IAllocator instance has three sets of attributes:
10543 - cfg that is needed to query the cluster
10544 - input data (all members of the _KEYS class attribute are required)
10545 - four buffer attributes (in|out_data|text), that represent the
10546 input (to the external script) in text and data structure format,
10547 and the output from it, again in two formats
10548 - the result variables from the script (success, info, nodes) for
10552 # pylint: disable-msg=R0902
10553 # lots of instance attributes
10555 "name", "mem_size", "disks", "disk_template",
10556 "os", "tags", "nics", "vcpus", "hypervisor",
10559 "name", "relocate_from",
10565 def __init__(self, cfg, rpc, mode, **kwargs):
10568 # init buffer variables
10569 self.in_text = self.out_text = self.in_data = self.out_data = None
10570 # init all input fields so that pylint is happy
10572 self.mem_size = self.disks = self.disk_template = None
10573 self.os = self.tags = self.nics = self.vcpus = None
10574 self.hypervisor = None
10575 self.relocate_from = None
10577 self.evac_nodes = None
10579 self.required_nodes = None
10580 # init result fields
10581 self.success = self.info = self.result = None
10582 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10583 keyset = self._ALLO_KEYS
10584 fn = self._AddNewInstance
10585 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10586 keyset = self._RELO_KEYS
10587 fn = self._AddRelocateInstance
10588 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10589 keyset = self._EVAC_KEYS
10590 fn = self._AddEvacuateNodes
10592 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10593 " IAllocator" % self.mode)
10595 if key not in keyset:
10596 raise errors.ProgrammerError("Invalid input parameter '%s' to"
10597 " IAllocator" % key)
10598 setattr(self, key, kwargs[key])
10601 if key not in kwargs:
10602 raise errors.ProgrammerError("Missing input parameter '%s' to"
10603 " IAllocator" % key)
10604 self._BuildInputData(fn)
10606 def _ComputeClusterData(self):
10607 """Compute the generic allocator input data.
10609 This is the data that is independent of the actual operation.
10613 cluster_info = cfg.GetClusterInfo()
10616 "version": constants.IALLOCATOR_VERSION,
10617 "cluster_name": cfg.GetClusterName(),
10618 "cluster_tags": list(cluster_info.GetTags()),
10619 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10620 # we don't have job IDs
10622 iinfo = cfg.GetAllInstancesInfo().values()
10623 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10626 node_list = cfg.GetNodeList()
10628 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10629 hypervisor_name = self.hypervisor
10630 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10631 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10632 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10633 hypervisor_name = cluster_info.enabled_hypervisors[0]
10635 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10638 self.rpc.call_all_instances_info(node_list,
10639 cluster_info.enabled_hypervisors)
10641 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
10643 data["nodes"] = self._ComputeNodeData(cfg, node_data, node_iinfo, i_list)
10645 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
10647 self.in_data = data
10650 def _ComputeNodeGroupData(cfg):
10651 """Compute node groups data.
10655 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
10656 ng[guuid] = { "name": gdata.name }
10660 def _ComputeNodeData(cfg, node_data, node_iinfo, i_list):
10661 """Compute global node data.
10665 for nname, nresult in node_data.items():
10666 # first fill in static (config-based) values
10667 ninfo = cfg.GetNodeInfo(nname)
10669 "tags": list(ninfo.GetTags()),
10670 "primary_ip": ninfo.primary_ip,
10671 "secondary_ip": ninfo.secondary_ip,
10672 "offline": ninfo.offline,
10673 "drained": ninfo.drained,
10674 "master_candidate": ninfo.master_candidate,
10675 "group": ninfo.group,
10676 "master_capable": ninfo.master_capable,
10677 "vm_capable": ninfo.vm_capable,
10680 if not (ninfo.offline or ninfo.drained):
10681 nresult.Raise("Can't get data for node %s" % nname)
10682 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
10684 remote_info = nresult.payload
10686 for attr in ['memory_total', 'memory_free', 'memory_dom0',
10687 'vg_size', 'vg_free', 'cpu_total']:
10688 if attr not in remote_info:
10689 raise errors.OpExecError("Node '%s' didn't return attribute"
10690 " '%s'" % (nname, attr))
10691 if not isinstance(remote_info[attr], int):
10692 raise errors.OpExecError("Node '%s' returned invalid value"
10694 (nname, attr, remote_info[attr]))
10695 # compute memory used by primary instances
10696 i_p_mem = i_p_up_mem = 0
10697 for iinfo, beinfo in i_list:
10698 if iinfo.primary_node == nname:
10699 i_p_mem += beinfo[constants.BE_MEMORY]
10700 if iinfo.name not in node_iinfo[nname].payload:
10703 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
10704 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
10705 remote_info['memory_free'] -= max(0, i_mem_diff)
10708 i_p_up_mem += beinfo[constants.BE_MEMORY]
10710 # compute memory used by instances
10712 "total_memory": remote_info['memory_total'],
10713 "reserved_memory": remote_info['memory_dom0'],
10714 "free_memory": remote_info['memory_free'],
10715 "total_disk": remote_info['vg_size'],
10716 "free_disk": remote_info['vg_free'],
10717 "total_cpus": remote_info['cpu_total'],
10718 "i_pri_memory": i_p_mem,
10719 "i_pri_up_memory": i_p_up_mem,
10721 pnr.update(pnr_dyn)
10723 node_results[nname] = pnr
10725 return node_results
10728 def _ComputeInstanceData(cluster_info, i_list):
10729 """Compute global instance data.
10733 for iinfo, beinfo in i_list:
10735 for nic in iinfo.nics:
10736 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
10737 nic_dict = {"mac": nic.mac,
10739 "mode": filled_params[constants.NIC_MODE],
10740 "link": filled_params[constants.NIC_LINK],
10742 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
10743 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
10744 nic_data.append(nic_dict)
10746 "tags": list(iinfo.GetTags()),
10747 "admin_up": iinfo.admin_up,
10748 "vcpus": beinfo[constants.BE_VCPUS],
10749 "memory": beinfo[constants.BE_MEMORY],
10751 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
10753 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
10754 "disk_template": iinfo.disk_template,
10755 "hypervisor": iinfo.hypervisor,
10757 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
10759 instance_data[iinfo.name] = pir
10761 return instance_data
10763 def _AddNewInstance(self):
10764 """Add new instance data to allocator structure.
10766 This in combination with _AllocatorGetClusterData will create the
10767 correct structure needed as input for the allocator.
10769 The checks for the completeness of the opcode must have already been
10773 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
10775 if self.disk_template in constants.DTS_NET_MIRROR:
10776 self.required_nodes = 2
10778 self.required_nodes = 1
10781 "disk_template": self.disk_template,
10784 "vcpus": self.vcpus,
10785 "memory": self.mem_size,
10786 "disks": self.disks,
10787 "disk_space_total": disk_space,
10789 "required_nodes": self.required_nodes,
10793 def _AddRelocateInstance(self):
10794 """Add relocate instance data to allocator structure.
10796 This in combination with _IAllocatorGetClusterData will create the
10797 correct structure needed as input for the allocator.
10799 The checks for the completeness of the opcode must have already been
10803 instance = self.cfg.GetInstanceInfo(self.name)
10804 if instance is None:
10805 raise errors.ProgrammerError("Unknown instance '%s' passed to"
10806 " IAllocator" % self.name)
10808 if instance.disk_template not in constants.DTS_NET_MIRROR:
10809 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
10810 errors.ECODE_INVAL)
10812 if len(instance.secondary_nodes) != 1:
10813 raise errors.OpPrereqError("Instance has not exactly one secondary node",
10814 errors.ECODE_STATE)
10816 self.required_nodes = 1
10817 disk_sizes = [{'size': disk.size} for disk in instance.disks]
10818 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
10822 "disk_space_total": disk_space,
10823 "required_nodes": self.required_nodes,
10824 "relocate_from": self.relocate_from,
10828 def _AddEvacuateNodes(self):
10829 """Add evacuate nodes data to allocator structure.
10833 "evac_nodes": self.evac_nodes
10837 def _BuildInputData(self, fn):
10838 """Build input data structures.
10841 self._ComputeClusterData()
10844 request["type"] = self.mode
10845 self.in_data["request"] = request
10847 self.in_text = serializer.Dump(self.in_data)
10849 def Run(self, name, validate=True, call_fn=None):
10850 """Run an instance allocator and return the results.
10853 if call_fn is None:
10854 call_fn = self.rpc.call_iallocator_runner
10856 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
10857 result.Raise("Failure while running the iallocator script")
10859 self.out_text = result.payload
10861 self._ValidateResult()
10863 def _ValidateResult(self):
10864 """Process the allocator results.
10866 This will process and if successful save the result in
10867 self.out_data and the other parameters.
10871 rdict = serializer.Load(self.out_text)
10872 except Exception, err:
10873 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
10875 if not isinstance(rdict, dict):
10876 raise errors.OpExecError("Can't parse iallocator results: not a dict")
10878 # TODO: remove backwards compatiblity in later versions
10879 if "nodes" in rdict and "result" not in rdict:
10880 rdict["result"] = rdict["nodes"]
10883 for key in "success", "info", "result":
10884 if key not in rdict:
10885 raise errors.OpExecError("Can't parse iallocator results:"
10886 " missing key '%s'" % key)
10887 setattr(self, key, rdict[key])
10889 if not isinstance(rdict["result"], list):
10890 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
10892 self.out_data = rdict
10895 class LUTestAllocator(NoHooksLU):
10896 """Run allocator tests.
10898 This LU runs the allocator tests
10902 ("direction", ht.NoDefault,
10903 ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
10904 ("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES)),
10905 ("name", ht.NoDefault, ht.TNonEmptyString),
10906 ("nics", ht.NoDefault, ht.TOr(ht.TNone, ht.TListOf(
10907 ht.TDictOf(ht.TElemOf(["mac", "ip", "bridge"]),
10908 ht.TOr(ht.TNone, ht.TNonEmptyString))))),
10909 ("disks", ht.NoDefault, ht.TOr(ht.TNone, ht.TList)),
10910 ("hypervisor", None, ht.TMaybeString),
10911 ("allocator", None, ht.TMaybeString),
10912 ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
10913 ("mem_size", None, ht.TOr(ht.TNone, ht.TPositiveInt)),
10914 ("vcpus", None, ht.TOr(ht.TNone, ht.TPositiveInt)),
10915 ("os", None, ht.TMaybeString),
10916 ("disk_template", None, ht.TMaybeString),
10917 ("evac_nodes", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString))),
10920 def CheckPrereq(self):
10921 """Check prerequisites.
10923 This checks the opcode parameters depending on the director and mode test.
10926 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10927 for attr in ["mem_size", "disks", "disk_template",
10928 "os", "tags", "nics", "vcpus"]:
10929 if not hasattr(self.op, attr):
10930 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
10931 attr, errors.ECODE_INVAL)
10932 iname = self.cfg.ExpandInstanceName(self.op.name)
10933 if iname is not None:
10934 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
10935 iname, errors.ECODE_EXISTS)
10936 if not isinstance(self.op.nics, list):
10937 raise errors.OpPrereqError("Invalid parameter 'nics'",
10938 errors.ECODE_INVAL)
10939 if not isinstance(self.op.disks, list):
10940 raise errors.OpPrereqError("Invalid parameter 'disks'",
10941 errors.ECODE_INVAL)
10942 for row in self.op.disks:
10943 if (not isinstance(row, dict) or
10944 "size" not in row or
10945 not isinstance(row["size"], int) or
10946 "mode" not in row or
10947 row["mode"] not in ['r', 'w']):
10948 raise errors.OpPrereqError("Invalid contents of the 'disks'"
10949 " parameter", errors.ECODE_INVAL)
10950 if self.op.hypervisor is None:
10951 self.op.hypervisor = self.cfg.GetHypervisorType()
10952 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10953 fname = _ExpandInstanceName(self.cfg, self.op.name)
10954 self.op.name = fname
10955 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
10956 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10957 if not hasattr(self.op, "evac_nodes"):
10958 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
10959 " opcode input", errors.ECODE_INVAL)
10961 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
10962 self.op.mode, errors.ECODE_INVAL)
10964 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
10965 if self.op.allocator is None:
10966 raise errors.OpPrereqError("Missing allocator name",
10967 errors.ECODE_INVAL)
10968 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
10969 raise errors.OpPrereqError("Wrong allocator test '%s'" %
10970 self.op.direction, errors.ECODE_INVAL)
10972 def Exec(self, feedback_fn):
10973 """Run the allocator test.
10976 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10977 ial = IAllocator(self.cfg, self.rpc,
10980 mem_size=self.op.mem_size,
10981 disks=self.op.disks,
10982 disk_template=self.op.disk_template,
10986 vcpus=self.op.vcpus,
10987 hypervisor=self.op.hypervisor,
10989 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10990 ial = IAllocator(self.cfg, self.rpc,
10993 relocate_from=list(self.relocate_from),
10995 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10996 ial = IAllocator(self.cfg, self.rpc,
10998 evac_nodes=self.op.evac_nodes)
11000 raise errors.ProgrammerError("Uncatched mode %s in"
11001 " LUTestAllocator.Exec", self.op.mode)
11003 if self.op.direction == constants.IALLOCATOR_DIR_IN:
11004 result = ial.in_text
11006 ial.Run(self.op.allocator, validate=False)
11007 result = ial.out_text