4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay to many lines in this module
43 from ganeti import ssh
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import hypervisor
47 from ganeti import locking
48 from ganeti import constants
49 from ganeti import objects
50 from ganeti import serializer
51 from ganeti import ssconf
52 from ganeti import uidpool
53 from ganeti import compat
54 from ganeti import masterd
55 from ganeti import netutils
57 import ganeti.masterd.instance # pylint: disable-msg=W0611
60 # Modifiable default values; need to define these here before the
64 """Returns an empty list.
71 """Returns an empty dict.
77 #: The without-default default value
81 #: The no-type (value to complex to check it in the type system)
87 """Checks if the given value is not None.
90 return val is not None
94 """Checks if the given value is None.
101 """Checks if the given value is a boolean.
104 return isinstance(val, bool)
108 """Checks if the given value is an integer.
111 return isinstance(val, int)
115 """Checks if the given value is a float.
118 return isinstance(val, float)
122 """Checks if the given value is a string.
125 return isinstance(val, basestring)
129 """Checks if a given value evaluates to a boolean True value.
135 def _TElemOf(target_list):
136 """Builds a function that checks if a given value is a member of a list.
139 return lambda val: val in target_list
144 """Checks if the given value is a list.
147 return isinstance(val, list)
151 """Checks if the given value is a dictionary.
154 return isinstance(val, dict)
159 """Combine multiple functions using an AND operation.
163 return compat.all(t(val) for t in args)
168 """Combine multiple functions using an AND operation.
172 return compat.any(t(val) for t in args)
178 #: a non-empty string
179 _TNonEmptyString = _TAnd(_TString, _TTrue)
182 #: a maybe non-empty string
183 _TMaybeString = _TOr(_TNonEmptyString, _TNone)
186 #: a maybe boolean (bool or none)
187 _TMaybeBool = _TOr(_TBool, _TNone)
190 #: a positive integer
191 _TPositiveInt = _TAnd(_TInt, lambda v: v >= 0)
193 #: a strictly positive integer
194 _TStrictPositiveInt = _TAnd(_TInt, lambda v: v > 0)
197 def _TListOf(my_type):
198 """Checks if a given value is a list with all elements of the same type.
202 lambda lst: compat.all(my_type(v) for v in lst))
205 def _TDictOf(key_type, val_type):
206 """Checks a dict type for the type of its key/values.
210 lambda my_dict: (compat.all(key_type(v) for v in my_dict.keys())
211 and compat.all(val_type(v)
212 for v in my_dict.values())))
215 # Common opcode attributes
217 #: output fields for a query operation
218 _POutputFields = ("output_fields", _NoDefault, _TListOf(_TNonEmptyString))
221 #: the shutdown timeout
222 _PShutdownTimeout = ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
225 #: the force parameter
226 _PForce = ("force", False, _TBool)
228 #: a required instance name (for single-instance LUs)
229 _PInstanceName = ("instance_name", _NoDefault, _TNonEmptyString)
232 #: a required node name (for single-node LUs)
233 _PNodeName = ("node_name", _NoDefault, _TNonEmptyString)
235 #: the migration type (live/non-live)
236 _PMigrationMode = ("mode", None, _TOr(_TNone,
237 _TElemOf(constants.HT_MIGRATION_MODES)))
239 #: the obsolete 'live' mode (boolean)
240 _PMigrationLive = ("live", None, _TMaybeBool)
244 class LogicalUnit(object):
245 """Logical Unit base class.
247 Subclasses must follow these rules:
248 - implement ExpandNames
249 - implement CheckPrereq (except when tasklets are used)
250 - implement Exec (except when tasklets are used)
251 - implement BuildHooksEnv
252 - redefine HPATH and HTYPE
253 - optionally redefine their run requirements:
254 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
256 Note that all commands require root permissions.
258 @ivar dry_run_result: the value (if any) that will be returned to the caller
259 in dry-run mode (signalled by opcode dry_run parameter)
260 @cvar _OP_PARAMS: a list of opcode attributes, their defaults values
261 they should get if not already defined, and types they must match
269 def __init__(self, processor, op, context, rpc):
270 """Constructor for LogicalUnit.
272 This needs to be overridden in derived classes in order to check op
276 self.proc = processor
278 self.cfg = context.cfg
279 self.context = context
281 # Dicts used to declare locking needs to mcpu
282 self.needed_locks = None
283 self.acquired_locks = {}
284 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
286 self.remove_locks = {}
287 # Used to force good behavior when calling helper functions
288 self.recalculate_locks = {}
291 self.Log = processor.Log # pylint: disable-msg=C0103
292 self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
293 self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
294 self.LogStep = processor.LogStep # pylint: disable-msg=C0103
295 # support for dry-run
296 self.dry_run_result = None
297 # support for generic debug attribute
298 if (not hasattr(self.op, "debug_level") or
299 not isinstance(self.op.debug_level, int)):
300 self.op.debug_level = 0
305 # The new kind-of-type-system
306 op_id = self.op.OP_ID
307 for attr_name, aval, test in self._OP_PARAMS:
308 if not hasattr(op, attr_name):
309 if aval == _NoDefault:
310 raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
311 (op_id, attr_name), errors.ECODE_INVAL)
317 setattr(self.op, attr_name, dval)
318 attr_val = getattr(op, attr_name)
322 if not callable(test):
323 raise errors.ProgrammerError("Validation for parameter '%s.%s' failed,"
324 " given type is not a proper type (%s)" %
325 (op_id, attr_name, test))
326 if not test(attr_val):
327 logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
328 self.op.OP_ID, attr_name, type(attr_val), attr_val)
329 raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
330 (op_id, attr_name), errors.ECODE_INVAL)
332 self.CheckArguments()
335 """Returns the SshRunner object
339 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
342 ssh = property(fget=__GetSSH)
344 def CheckArguments(self):
345 """Check syntactic validity for the opcode arguments.
347 This method is for doing a simple syntactic check and ensure
348 validity of opcode parameters, without any cluster-related
349 checks. While the same can be accomplished in ExpandNames and/or
350 CheckPrereq, doing these separate is better because:
352 - ExpandNames is left as as purely a lock-related function
353 - CheckPrereq is run after we have acquired locks (and possible
356 The function is allowed to change the self.op attribute so that
357 later methods can no longer worry about missing parameters.
362 def ExpandNames(self):
363 """Expand names for this LU.
365 This method is called before starting to execute the opcode, and it should
366 update all the parameters of the opcode to their canonical form (e.g. a
367 short node name must be fully expanded after this method has successfully
368 completed). This way locking, hooks, logging, ecc. can work correctly.
370 LUs which implement this method must also populate the self.needed_locks
371 member, as a dict with lock levels as keys, and a list of needed lock names
374 - use an empty dict if you don't need any lock
375 - if you don't need any lock at a particular level omit that level
376 - don't put anything for the BGL level
377 - if you want all locks at a level use locking.ALL_SET as a value
379 If you need to share locks (rather than acquire them exclusively) at one
380 level you can modify self.share_locks, setting a true value (usually 1) for
381 that level. By default locks are not shared.
383 This function can also define a list of tasklets, which then will be
384 executed in order instead of the usual LU-level CheckPrereq and Exec
385 functions, if those are not defined by the LU.
389 # Acquire all nodes and one instance
390 self.needed_locks = {
391 locking.LEVEL_NODE: locking.ALL_SET,
392 locking.LEVEL_INSTANCE: ['instance1.example.com'],
394 # Acquire just two nodes
395 self.needed_locks = {
396 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
399 self.needed_locks = {} # No, you can't leave it to the default value None
402 # The implementation of this method is mandatory only if the new LU is
403 # concurrent, so that old LUs don't need to be changed all at the same
406 self.needed_locks = {} # Exclusive LUs don't need locks.
408 raise NotImplementedError
410 def DeclareLocks(self, level):
411 """Declare LU locking needs for a level
413 While most LUs can just declare their locking needs at ExpandNames time,
414 sometimes there's the need to calculate some locks after having acquired
415 the ones before. This function is called just before acquiring locks at a
416 particular level, but after acquiring the ones at lower levels, and permits
417 such calculations. It can be used to modify self.needed_locks, and by
418 default it does nothing.
420 This function is only called if you have something already set in
421 self.needed_locks for the level.
423 @param level: Locking level which is going to be locked
424 @type level: member of ganeti.locking.LEVELS
428 def CheckPrereq(self):
429 """Check prerequisites for this LU.
431 This method should check that the prerequisites for the execution
432 of this LU are fulfilled. It can do internode communication, but
433 it should be idempotent - no cluster or system changes are
436 The method should raise errors.OpPrereqError in case something is
437 not fulfilled. Its return value is ignored.
439 This method should also update all the parameters of the opcode to
440 their canonical form if it hasn't been done by ExpandNames before.
443 if self.tasklets is not None:
444 for (idx, tl) in enumerate(self.tasklets):
445 logging.debug("Checking prerequisites for tasklet %s/%s",
446 idx + 1, len(self.tasklets))
451 def Exec(self, feedback_fn):
454 This method should implement the actual work. It should raise
455 errors.OpExecError for failures that are somewhat dealt with in
459 if self.tasklets is not None:
460 for (idx, tl) in enumerate(self.tasklets):
461 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
464 raise NotImplementedError
466 def BuildHooksEnv(self):
467 """Build hooks environment for this LU.
469 This method should return a three-node tuple consisting of: a dict
470 containing the environment that will be used for running the
471 specific hook for this LU, a list of node names on which the hook
472 should run before the execution, and a list of node names on which
473 the hook should run after the execution.
475 The keys of the dict must not have 'GANETI_' prefixed as this will
476 be handled in the hooks runner. Also note additional keys will be
477 added by the hooks runner. If the LU doesn't define any
478 environment, an empty dict (and not None) should be returned.
480 No nodes should be returned as an empty list (and not None).
482 Note that if the HPATH for a LU class is None, this function will
486 raise NotImplementedError
488 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
489 """Notify the LU about the results of its hooks.
491 This method is called every time a hooks phase is executed, and notifies
492 the Logical Unit about the hooks' result. The LU can then use it to alter
493 its result based on the hooks. By default the method does nothing and the
494 previous result is passed back unchanged but any LU can define it if it
495 wants to use the local cluster hook-scripts somehow.
497 @param phase: one of L{constants.HOOKS_PHASE_POST} or
498 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
499 @param hook_results: the results of the multi-node hooks rpc call
500 @param feedback_fn: function used send feedback back to the caller
501 @param lu_result: the previous Exec result this LU had, or None
503 @return: the new Exec result, based on the previous result
507 # API must be kept, thus we ignore the unused argument and could
508 # be a function warnings
509 # pylint: disable-msg=W0613,R0201
512 def _ExpandAndLockInstance(self):
513 """Helper function to expand and lock an instance.
515 Many LUs that work on an instance take its name in self.op.instance_name
516 and need to expand it and then declare the expanded name for locking. This
517 function does it, and then updates self.op.instance_name to the expanded
518 name. It also initializes needed_locks as a dict, if this hasn't been done
522 if self.needed_locks is None:
523 self.needed_locks = {}
525 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
526 "_ExpandAndLockInstance called with instance-level locks set"
527 self.op.instance_name = _ExpandInstanceName(self.cfg,
528 self.op.instance_name)
529 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
531 def _LockInstancesNodes(self, primary_only=False):
532 """Helper function to declare instances' nodes for locking.
534 This function should be called after locking one or more instances to lock
535 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
536 with all primary or secondary nodes for instances already locked and
537 present in self.needed_locks[locking.LEVEL_INSTANCE].
539 It should be called from DeclareLocks, and for safety only works if
540 self.recalculate_locks[locking.LEVEL_NODE] is set.
542 In the future it may grow parameters to just lock some instance's nodes, or
543 to just lock primaries or secondary nodes, if needed.
545 If should be called in DeclareLocks in a way similar to::
547 if level == locking.LEVEL_NODE:
548 self._LockInstancesNodes()
550 @type primary_only: boolean
551 @param primary_only: only lock primary nodes of locked instances
554 assert locking.LEVEL_NODE in self.recalculate_locks, \
555 "_LockInstancesNodes helper function called with no nodes to recalculate"
557 # TODO: check if we're really been called with the instance locks held
559 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
560 # future we might want to have different behaviors depending on the value
561 # of self.recalculate_locks[locking.LEVEL_NODE]
563 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
564 instance = self.context.cfg.GetInstanceInfo(instance_name)
565 wanted_nodes.append(instance.primary_node)
567 wanted_nodes.extend(instance.secondary_nodes)
569 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
570 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
571 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
572 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
574 del self.recalculate_locks[locking.LEVEL_NODE]
577 class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
578 """Simple LU which runs no hooks.
580 This LU is intended as a parent for other LogicalUnits which will
581 run no hooks, in order to reduce duplicate code.
587 def BuildHooksEnv(self):
588 """Empty BuildHooksEnv for NoHooksLu.
590 This just raises an error.
593 assert False, "BuildHooksEnv called for NoHooksLUs"
597 """Tasklet base class.
599 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
600 they can mix legacy code with tasklets. Locking needs to be done in the LU,
601 tasklets know nothing about locks.
603 Subclasses must follow these rules:
604 - Implement CheckPrereq
608 def __init__(self, lu):
615 def CheckPrereq(self):
616 """Check prerequisites for this tasklets.
618 This method should check whether the prerequisites for the execution of
619 this tasklet are fulfilled. It can do internode communication, but it
620 should be idempotent - no cluster or system changes are allowed.
622 The method should raise errors.OpPrereqError in case something is not
623 fulfilled. Its return value is ignored.
625 This method should also update all parameters to their canonical form if it
626 hasn't been done before.
631 def Exec(self, feedback_fn):
632 """Execute the tasklet.
634 This method should implement the actual work. It should raise
635 errors.OpExecError for failures that are somewhat dealt with in code, or
639 raise NotImplementedError
642 def _GetWantedNodes(lu, nodes):
643 """Returns list of checked and expanded node names.
645 @type lu: L{LogicalUnit}
646 @param lu: the logical unit on whose behalf we execute
648 @param nodes: list of node names or None for all nodes
650 @return: the list of nodes, sorted
651 @raise errors.ProgrammerError: if the nodes parameter is wrong type
655 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
656 " non-empty list of nodes whose name is to be expanded.")
658 wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
659 return utils.NiceSort(wanted)
662 def _GetWantedInstances(lu, instances):
663 """Returns list of checked and expanded instance names.
665 @type lu: L{LogicalUnit}
666 @param lu: the logical unit on whose behalf we execute
667 @type instances: list
668 @param instances: list of instance names or None for all instances
670 @return: the list of instances, sorted
671 @raise errors.OpPrereqError: if the instances parameter is wrong type
672 @raise errors.OpPrereqError: if any of the passed instances is not found
676 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
678 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
682 def _GetUpdatedParams(old_params, update_dict,
683 use_default=True, use_none=False):
684 """Return the new version of a parameter dictionary.
686 @type old_params: dict
687 @param old_params: old parameters
688 @type update_dict: dict
689 @param update_dict: dict containing new parameter values, or
690 constants.VALUE_DEFAULT to reset the parameter to its default
692 @param use_default: boolean
693 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
694 values as 'to be deleted' values
695 @param use_none: boolean
696 @type use_none: whether to recognise C{None} values as 'to be
699 @return: the new parameter dictionary
702 params_copy = copy.deepcopy(old_params)
703 for key, val in update_dict.iteritems():
704 if ((use_default and val == constants.VALUE_DEFAULT) or
705 (use_none and val is None)):
711 params_copy[key] = val
715 def _CheckOutputFields(static, dynamic, selected):
716 """Checks whether all selected fields are valid.
718 @type static: L{utils.FieldSet}
719 @param static: static fields set
720 @type dynamic: L{utils.FieldSet}
721 @param dynamic: dynamic fields set
728 delta = f.NonMatching(selected)
730 raise errors.OpPrereqError("Unknown output fields selected: %s"
731 % ",".join(delta), errors.ECODE_INVAL)
734 def _CheckGlobalHvParams(params):
735 """Validates that given hypervisor params are not global ones.
737 This will ensure that instances don't get customised versions of
741 used_globals = constants.HVC_GLOBALS.intersection(params)
743 msg = ("The following hypervisor parameters are global and cannot"
744 " be customized at instance level, please modify them at"
745 " cluster level: %s" % utils.CommaJoin(used_globals))
746 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
749 def _CheckNodeOnline(lu, node):
750 """Ensure that a given node is online.
752 @param lu: the LU on behalf of which we make the check
753 @param node: the node to check
754 @raise errors.OpPrereqError: if the node is offline
757 if lu.cfg.GetNodeInfo(node).offline:
758 raise errors.OpPrereqError("Can't use offline node %s" % node,
762 def _CheckNodeNotDrained(lu, node):
763 """Ensure that a given node is not drained.
765 @param lu: the LU on behalf of which we make the check
766 @param node: the node to check
767 @raise errors.OpPrereqError: if the node is drained
770 if lu.cfg.GetNodeInfo(node).drained:
771 raise errors.OpPrereqError("Can't use drained node %s" % node,
775 def _CheckNodeHasOS(lu, node, os_name, force_variant):
776 """Ensure that a node supports a given OS.
778 @param lu: the LU on behalf of which we make the check
779 @param node: the node to check
780 @param os_name: the OS to query about
781 @param force_variant: whether to ignore variant errors
782 @raise errors.OpPrereqError: if the node is not supporting the OS
785 result = lu.rpc.call_os_get(node, os_name)
786 result.Raise("OS '%s' not in supported OS list for node %s" %
788 prereq=True, ecode=errors.ECODE_INVAL)
789 if not force_variant:
790 _CheckOSVariant(result.payload, os_name)
793 def _RequireFileStorage():
794 """Checks that file storage is enabled.
796 @raise errors.OpPrereqError: when file storage is disabled
799 if not constants.ENABLE_FILE_STORAGE:
800 raise errors.OpPrereqError("File storage disabled at configure time",
804 def _CheckDiskTemplate(template):
805 """Ensure a given disk template is valid.
808 if template not in constants.DISK_TEMPLATES:
809 msg = ("Invalid disk template name '%s', valid templates are: %s" %
810 (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
811 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
812 if template == constants.DT_FILE:
813 _RequireFileStorage()
817 def _CheckStorageType(storage_type):
818 """Ensure a given storage type is valid.
821 if storage_type not in constants.VALID_STORAGE_TYPES:
822 raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
824 if storage_type == constants.ST_FILE:
825 _RequireFileStorage()
829 def _GetClusterDomainSecret():
830 """Reads the cluster domain secret.
833 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
837 def _CheckInstanceDown(lu, instance, reason):
838 """Ensure that an instance is not running."""
839 if instance.admin_up:
840 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
841 (instance.name, reason), errors.ECODE_STATE)
843 pnode = instance.primary_node
844 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
845 ins_l.Raise("Can't contact node %s for instance information" % pnode,
846 prereq=True, ecode=errors.ECODE_ENVIRON)
848 if instance.name in ins_l.payload:
849 raise errors.OpPrereqError("Instance %s is running, %s" %
850 (instance.name, reason), errors.ECODE_STATE)
853 def _ExpandItemName(fn, name, kind):
854 """Expand an item name.
856 @param fn: the function to use for expansion
857 @param name: requested item name
858 @param kind: text description ('Node' or 'Instance')
859 @return: the resolved (full) name
860 @raise errors.OpPrereqError: if the item is not found
864 if full_name is None:
865 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
870 def _ExpandNodeName(cfg, name):
871 """Wrapper over L{_ExpandItemName} for nodes."""
872 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
875 def _ExpandInstanceName(cfg, name):
876 """Wrapper over L{_ExpandItemName} for instance."""
877 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
880 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
881 memory, vcpus, nics, disk_template, disks,
882 bep, hvp, hypervisor_name):
883 """Builds instance related env variables for hooks
885 This builds the hook environment from individual variables.
888 @param name: the name of the instance
889 @type primary_node: string
890 @param primary_node: the name of the instance's primary node
891 @type secondary_nodes: list
892 @param secondary_nodes: list of secondary nodes as strings
893 @type os_type: string
894 @param os_type: the name of the instance's OS
895 @type status: boolean
896 @param status: the should_run status of the instance
898 @param memory: the memory size of the instance
900 @param vcpus: the count of VCPUs the instance has
902 @param nics: list of tuples (ip, mac, mode, link) representing
903 the NICs the instance has
904 @type disk_template: string
905 @param disk_template: the disk template of the instance
907 @param disks: the list of (size, mode) pairs
909 @param bep: the backend parameters for the instance
911 @param hvp: the hypervisor parameters for the instance
912 @type hypervisor_name: string
913 @param hypervisor_name: the hypervisor for the instance
915 @return: the hook environment for this instance
924 "INSTANCE_NAME": name,
925 "INSTANCE_PRIMARY": primary_node,
926 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
927 "INSTANCE_OS_TYPE": os_type,
928 "INSTANCE_STATUS": str_status,
929 "INSTANCE_MEMORY": memory,
930 "INSTANCE_VCPUS": vcpus,
931 "INSTANCE_DISK_TEMPLATE": disk_template,
932 "INSTANCE_HYPERVISOR": hypervisor_name,
936 nic_count = len(nics)
937 for idx, (ip, mac, mode, link) in enumerate(nics):
940 env["INSTANCE_NIC%d_IP" % idx] = ip
941 env["INSTANCE_NIC%d_MAC" % idx] = mac
942 env["INSTANCE_NIC%d_MODE" % idx] = mode
943 env["INSTANCE_NIC%d_LINK" % idx] = link
944 if mode == constants.NIC_MODE_BRIDGED:
945 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
949 env["INSTANCE_NIC_COUNT"] = nic_count
952 disk_count = len(disks)
953 for idx, (size, mode) in enumerate(disks):
954 env["INSTANCE_DISK%d_SIZE" % idx] = size
955 env["INSTANCE_DISK%d_MODE" % idx] = mode
959 env["INSTANCE_DISK_COUNT"] = disk_count
961 for source, kind in [(bep, "BE"), (hvp, "HV")]:
962 for key, value in source.items():
963 env["INSTANCE_%s_%s" % (kind, key)] = value
968 def _NICListToTuple(lu, nics):
969 """Build a list of nic information tuples.
971 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
972 value in LUQueryInstanceData.
974 @type lu: L{LogicalUnit}
975 @param lu: the logical unit on whose behalf we execute
976 @type nics: list of L{objects.NIC}
977 @param nics: list of nics to convert to hooks tuples
981 cluster = lu.cfg.GetClusterInfo()
985 filled_params = cluster.SimpleFillNIC(nic.nicparams)
986 mode = filled_params[constants.NIC_MODE]
987 link = filled_params[constants.NIC_LINK]
988 hooks_nics.append((ip, mac, mode, link))
992 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
993 """Builds instance related env variables for hooks from an object.
995 @type lu: L{LogicalUnit}
996 @param lu: the logical unit on whose behalf we execute
997 @type instance: L{objects.Instance}
998 @param instance: the instance for which we should build the
1000 @type override: dict
1001 @param override: dictionary with key/values that will override
1004 @return: the hook environment dictionary
1007 cluster = lu.cfg.GetClusterInfo()
1008 bep = cluster.FillBE(instance)
1009 hvp = cluster.FillHV(instance)
1011 'name': instance.name,
1012 'primary_node': instance.primary_node,
1013 'secondary_nodes': instance.secondary_nodes,
1014 'os_type': instance.os,
1015 'status': instance.admin_up,
1016 'memory': bep[constants.BE_MEMORY],
1017 'vcpus': bep[constants.BE_VCPUS],
1018 'nics': _NICListToTuple(lu, instance.nics),
1019 'disk_template': instance.disk_template,
1020 'disks': [(disk.size, disk.mode) for disk in instance.disks],
1023 'hypervisor_name': instance.hypervisor,
1026 args.update(override)
1027 return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
1030 def _AdjustCandidatePool(lu, exceptions):
1031 """Adjust the candidate pool after node operations.
1034 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1036 lu.LogInfo("Promoted nodes to master candidate role: %s",
1037 utils.CommaJoin(node.name for node in mod_list))
1038 for name in mod_list:
1039 lu.context.ReaddNode(name)
1040 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1042 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1046 def _DecideSelfPromotion(lu, exceptions=None):
1047 """Decide whether I should promote myself as a master candidate.
1050 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1051 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1052 # the new node will increase mc_max with one, so:
1053 mc_should = min(mc_should + 1, cp_size)
1054 return mc_now < mc_should
1057 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1058 """Check that the brigdes needed by a list of nics exist.
1061 cluster = lu.cfg.GetClusterInfo()
1062 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1063 brlist = [params[constants.NIC_LINK] for params in paramslist
1064 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1066 result = lu.rpc.call_bridges_exist(target_node, brlist)
1067 result.Raise("Error checking bridges on destination node '%s'" %
1068 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1071 def _CheckInstanceBridgesExist(lu, instance, node=None):
1072 """Check that the brigdes needed by an instance exist.
1076 node = instance.primary_node
1077 _CheckNicsBridgesExist(lu, instance.nics, node)
1080 def _CheckOSVariant(os_obj, name):
1081 """Check whether an OS name conforms to the os variants specification.
1083 @type os_obj: L{objects.OS}
1084 @param os_obj: OS object to check
1086 @param name: OS name passed by the user, to check for validity
1089 if not os_obj.supported_variants:
1092 variant = name.split("+", 1)[1]
1094 raise errors.OpPrereqError("OS name must include a variant",
1097 if variant not in os_obj.supported_variants:
1098 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1101 def _GetNodeInstancesInner(cfg, fn):
1102 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1105 def _GetNodeInstances(cfg, node_name):
1106 """Returns a list of all primary and secondary instances on a node.
1110 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1113 def _GetNodePrimaryInstances(cfg, node_name):
1114 """Returns primary instances on a node.
1117 return _GetNodeInstancesInner(cfg,
1118 lambda inst: node_name == inst.primary_node)
1121 def _GetNodeSecondaryInstances(cfg, node_name):
1122 """Returns secondary instances on a node.
1125 return _GetNodeInstancesInner(cfg,
1126 lambda inst: node_name in inst.secondary_nodes)
1129 def _GetStorageTypeArgs(cfg, storage_type):
1130 """Returns the arguments for a storage type.
1133 # Special case for file storage
1134 if storage_type == constants.ST_FILE:
1135 # storage.FileStorage wants a list of storage directories
1136 return [[cfg.GetFileStorageDir()]]
1141 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1144 for dev in instance.disks:
1145 cfg.SetDiskID(dev, node_name)
1147 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1148 result.Raise("Failed to get disk status from node %s" % node_name,
1149 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1151 for idx, bdev_status in enumerate(result.payload):
1152 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1158 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1159 """Check the sanity of iallocator and node arguments and use the
1160 cluster-wide iallocator if appropriate.
1162 Check that at most one of (iallocator, node) is specified. If none is
1163 specified, then the LU's opcode's iallocator slot is filled with the
1164 cluster-wide default iallocator.
1166 @type iallocator_slot: string
1167 @param iallocator_slot: the name of the opcode iallocator slot
1168 @type node_slot: string
1169 @param node_slot: the name of the opcode target node slot
1172 node = getattr(lu.op, node_slot, None)
1173 iallocator = getattr(lu.op, iallocator_slot, None)
1175 if node is not None and iallocator is not None:
1176 raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1178 elif node is None and iallocator is None:
1179 default_iallocator = lu.cfg.GetDefaultIAllocator()
1180 if default_iallocator:
1181 setattr(lu.op, iallocator_slot, default_iallocator)
1183 raise errors.OpPrereqError("No iallocator or node given and no"
1184 " cluster-wide default iallocator found."
1185 " Please specify either an iallocator or a"
1186 " node, or set a cluster-wide default"
1190 class LUPostInitCluster(LogicalUnit):
1191 """Logical unit for running hooks after cluster initialization.
1194 HPATH = "cluster-init"
1195 HTYPE = constants.HTYPE_CLUSTER
1197 def BuildHooksEnv(self):
1201 env = {"OP_TARGET": self.cfg.GetClusterName()}
1202 mn = self.cfg.GetMasterNode()
1203 return env, [], [mn]
1205 def Exec(self, feedback_fn):
1212 class LUDestroyCluster(LogicalUnit):
1213 """Logical unit for destroying the cluster.
1216 HPATH = "cluster-destroy"
1217 HTYPE = constants.HTYPE_CLUSTER
1219 def BuildHooksEnv(self):
1223 env = {"OP_TARGET": self.cfg.GetClusterName()}
1226 def CheckPrereq(self):
1227 """Check prerequisites.
1229 This checks whether the cluster is empty.
1231 Any errors are signaled by raising errors.OpPrereqError.
1234 master = self.cfg.GetMasterNode()
1236 nodelist = self.cfg.GetNodeList()
1237 if len(nodelist) != 1 or nodelist[0] != master:
1238 raise errors.OpPrereqError("There are still %d node(s) in"
1239 " this cluster." % (len(nodelist) - 1),
1241 instancelist = self.cfg.GetInstanceList()
1243 raise errors.OpPrereqError("There are still %d instance(s) in"
1244 " this cluster." % len(instancelist),
1247 def Exec(self, feedback_fn):
1248 """Destroys the cluster.
1251 master = self.cfg.GetMasterNode()
1253 # Run post hooks on master node before it's removed
1254 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1256 hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1258 # pylint: disable-msg=W0702
1259 self.LogWarning("Errors occurred running hooks on %s" % master)
1261 result = self.rpc.call_node_stop_master(master, False)
1262 result.Raise("Could not disable the master role")
1267 def _VerifyCertificate(filename):
1268 """Verifies a certificate for LUVerifyCluster.
1270 @type filename: string
1271 @param filename: Path to PEM file
1275 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1276 utils.ReadFile(filename))
1277 except Exception, err: # pylint: disable-msg=W0703
1278 return (LUVerifyCluster.ETYPE_ERROR,
1279 "Failed to load X509 certificate %s: %s" % (filename, err))
1282 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1283 constants.SSL_CERT_EXPIRATION_ERROR)
1286 fnamemsg = "While verifying %s: %s" % (filename, msg)
1291 return (None, fnamemsg)
1292 elif errcode == utils.CERT_WARNING:
1293 return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1294 elif errcode == utils.CERT_ERROR:
1295 return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1297 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1300 class LUVerifyCluster(LogicalUnit):
1301 """Verifies the cluster status.
1304 HPATH = "cluster-verify"
1305 HTYPE = constants.HTYPE_CLUSTER
1307 ("skip_checks", _EmptyList,
1308 _TListOf(_TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
1309 ("verbose", False, _TBool),
1310 ("error_codes", False, _TBool),
1311 ("debug_simulate_errors", False, _TBool),
1315 TCLUSTER = "cluster"
1317 TINSTANCE = "instance"
1319 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1320 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1321 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1322 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1323 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1324 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1325 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1326 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1327 ENODEDRBD = (TNODE, "ENODEDRBD")
1328 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1329 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1330 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1331 ENODEHV = (TNODE, "ENODEHV")
1332 ENODELVM = (TNODE, "ENODELVM")
1333 ENODEN1 = (TNODE, "ENODEN1")
1334 ENODENET = (TNODE, "ENODENET")
1335 ENODEOS = (TNODE, "ENODEOS")
1336 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1337 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1338 ENODERPC = (TNODE, "ENODERPC")
1339 ENODESSH = (TNODE, "ENODESSH")
1340 ENODEVERSION = (TNODE, "ENODEVERSION")
1341 ENODESETUP = (TNODE, "ENODESETUP")
1342 ENODETIME = (TNODE, "ENODETIME")
1344 ETYPE_FIELD = "code"
1345 ETYPE_ERROR = "ERROR"
1346 ETYPE_WARNING = "WARNING"
1348 class NodeImage(object):
1349 """A class representing the logical and physical status of a node.
1352 @ivar name: the node name to which this object refers
1353 @ivar volumes: a structure as returned from
1354 L{ganeti.backend.GetVolumeList} (runtime)
1355 @ivar instances: a list of running instances (runtime)
1356 @ivar pinst: list of configured primary instances (config)
1357 @ivar sinst: list of configured secondary instances (config)
1358 @ivar sbp: diction of {secondary-node: list of instances} of all peers
1359 of this node (config)
1360 @ivar mfree: free memory, as reported by hypervisor (runtime)
1361 @ivar dfree: free disk, as reported by the node (runtime)
1362 @ivar offline: the offline status (config)
1363 @type rpc_fail: boolean
1364 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1365 not whether the individual keys were correct) (runtime)
1366 @type lvm_fail: boolean
1367 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1368 @type hyp_fail: boolean
1369 @ivar hyp_fail: whether the RPC call didn't return the instance list
1370 @type ghost: boolean
1371 @ivar ghost: whether this is a known node or not (config)
1372 @type os_fail: boolean
1373 @ivar os_fail: whether the RPC call didn't return valid OS data
1375 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1378 def __init__(self, offline=False, name=None):
1387 self.offline = offline
1388 self.rpc_fail = False
1389 self.lvm_fail = False
1390 self.hyp_fail = False
1392 self.os_fail = False
1395 def ExpandNames(self):
1396 self.needed_locks = {
1397 locking.LEVEL_NODE: locking.ALL_SET,
1398 locking.LEVEL_INSTANCE: locking.ALL_SET,
1400 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1402 def _Error(self, ecode, item, msg, *args, **kwargs):
1403 """Format an error message.
1405 Based on the opcode's error_codes parameter, either format a
1406 parseable error code, or a simpler error string.
1408 This must be called only from Exec and functions called from Exec.
1411 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1413 # first complete the msg
1416 # then format the whole message
1417 if self.op.error_codes:
1418 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1424 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1425 # and finally report it via the feedback_fn
1426 self._feedback_fn(" - %s" % msg)
1428 def _ErrorIf(self, cond, *args, **kwargs):
1429 """Log an error message if the passed condition is True.
1432 cond = bool(cond) or self.op.debug_simulate_errors
1434 self._Error(*args, **kwargs)
1435 # do not mark the operation as failed for WARN cases only
1436 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1437 self.bad = self.bad or cond
1439 def _VerifyNode(self, ninfo, nresult):
1440 """Perform some basic validation on data returned from a node.
1442 - check the result data structure is well formed and has all the
1444 - check ganeti version
1446 @type ninfo: L{objects.Node}
1447 @param ninfo: the node to check
1448 @param nresult: the results from the node
1450 @return: whether overall this call was successful (and we can expect
1451 reasonable values in the respose)
1455 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1457 # main result, nresult should be a non-empty dict
1458 test = not nresult or not isinstance(nresult, dict)
1459 _ErrorIf(test, self.ENODERPC, node,
1460 "unable to verify node: no data returned")
1464 # compares ganeti version
1465 local_version = constants.PROTOCOL_VERSION
1466 remote_version = nresult.get("version", None)
1467 test = not (remote_version and
1468 isinstance(remote_version, (list, tuple)) and
1469 len(remote_version) == 2)
1470 _ErrorIf(test, self.ENODERPC, node,
1471 "connection to node returned invalid data")
1475 test = local_version != remote_version[0]
1476 _ErrorIf(test, self.ENODEVERSION, node,
1477 "incompatible protocol versions: master %s,"
1478 " node %s", local_version, remote_version[0])
1482 # node seems compatible, we can actually try to look into its results
1484 # full package version
1485 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1486 self.ENODEVERSION, node,
1487 "software version mismatch: master %s, node %s",
1488 constants.RELEASE_VERSION, remote_version[1],
1489 code=self.ETYPE_WARNING)
1491 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1492 if isinstance(hyp_result, dict):
1493 for hv_name, hv_result in hyp_result.iteritems():
1494 test = hv_result is not None
1495 _ErrorIf(test, self.ENODEHV, node,
1496 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1499 test = nresult.get(constants.NV_NODESETUP,
1500 ["Missing NODESETUP results"])
1501 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1506 def _VerifyNodeTime(self, ninfo, nresult,
1507 nvinfo_starttime, nvinfo_endtime):
1508 """Check the node time.
1510 @type ninfo: L{objects.Node}
1511 @param ninfo: the node to check
1512 @param nresult: the remote results for the node
1513 @param nvinfo_starttime: the start time of the RPC call
1514 @param nvinfo_endtime: the end time of the RPC call
1518 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1520 ntime = nresult.get(constants.NV_TIME, None)
1522 ntime_merged = utils.MergeTime(ntime)
1523 except (ValueError, TypeError):
1524 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1527 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1528 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1529 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1530 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1534 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1535 "Node time diverges by at least %s from master node time",
1538 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1539 """Check the node time.
1541 @type ninfo: L{objects.Node}
1542 @param ninfo: the node to check
1543 @param nresult: the remote results for the node
1544 @param vg_name: the configured VG name
1551 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1553 # checks vg existence and size > 20G
1554 vglist = nresult.get(constants.NV_VGLIST, None)
1556 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1558 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1559 constants.MIN_VG_SIZE)
1560 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1563 pvlist = nresult.get(constants.NV_PVLIST, None)
1564 test = pvlist is None
1565 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1567 # check that ':' is not present in PV names, since it's a
1568 # special character for lvcreate (denotes the range of PEs to
1570 for _, pvname, owner_vg in pvlist:
1571 test = ":" in pvname
1572 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1573 " '%s' of VG '%s'", pvname, owner_vg)
1575 def _VerifyNodeNetwork(self, ninfo, nresult):
1576 """Check the node time.
1578 @type ninfo: L{objects.Node}
1579 @param ninfo: the node to check
1580 @param nresult: the remote results for the node
1584 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1586 test = constants.NV_NODELIST not in nresult
1587 _ErrorIf(test, self.ENODESSH, node,
1588 "node hasn't returned node ssh connectivity data")
1590 if nresult[constants.NV_NODELIST]:
1591 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1592 _ErrorIf(True, self.ENODESSH, node,
1593 "ssh communication with node '%s': %s", a_node, a_msg)
1595 test = constants.NV_NODENETTEST not in nresult
1596 _ErrorIf(test, self.ENODENET, node,
1597 "node hasn't returned node tcp connectivity data")
1599 if nresult[constants.NV_NODENETTEST]:
1600 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1602 _ErrorIf(True, self.ENODENET, node,
1603 "tcp communication with node '%s': %s",
1604 anode, nresult[constants.NV_NODENETTEST][anode])
1606 test = constants.NV_MASTERIP not in nresult
1607 _ErrorIf(test, self.ENODENET, node,
1608 "node hasn't returned node master IP reachability data")
1610 if not nresult[constants.NV_MASTERIP]:
1611 if node == self.master_node:
1612 msg = "the master node cannot reach the master IP (not configured?)"
1614 msg = "cannot reach the master IP"
1615 _ErrorIf(True, self.ENODENET, node, msg)
1618 def _VerifyInstance(self, instance, instanceconfig, node_image):
1619 """Verify an instance.
1621 This function checks to see if the required block devices are
1622 available on the instance's node.
1625 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1626 node_current = instanceconfig.primary_node
1628 node_vol_should = {}
1629 instanceconfig.MapLVsByNode(node_vol_should)
1631 for node in node_vol_should:
1632 n_img = node_image[node]
1633 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1634 # ignore missing volumes on offline or broken nodes
1636 for volume in node_vol_should[node]:
1637 test = volume not in n_img.volumes
1638 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1639 "volume %s missing on node %s", volume, node)
1641 if instanceconfig.admin_up:
1642 pri_img = node_image[node_current]
1643 test = instance not in pri_img.instances and not pri_img.offline
1644 _ErrorIf(test, self.EINSTANCEDOWN, instance,
1645 "instance not running on its primary node %s",
1648 for node, n_img in node_image.items():
1649 if (not node == node_current):
1650 test = instance in n_img.instances
1651 _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1652 "instance should not run on node %s", node)
1654 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1655 """Verify if there are any unknown volumes in the cluster.
1657 The .os, .swap and backup volumes are ignored. All other volumes are
1658 reported as unknown.
1660 @type reserved: L{ganeti.utils.FieldSet}
1661 @param reserved: a FieldSet of reserved volume names
1664 for node, n_img in node_image.items():
1665 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1666 # skip non-healthy nodes
1668 for volume in n_img.volumes:
1669 test = ((node not in node_vol_should or
1670 volume not in node_vol_should[node]) and
1671 not reserved.Matches(volume))
1672 self._ErrorIf(test, self.ENODEORPHANLV, node,
1673 "volume %s is unknown", volume)
1675 def _VerifyOrphanInstances(self, instancelist, node_image):
1676 """Verify the list of running instances.
1678 This checks what instances are running but unknown to the cluster.
1681 for node, n_img in node_image.items():
1682 for o_inst in n_img.instances:
1683 test = o_inst not in instancelist
1684 self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1685 "instance %s on node %s should not exist", o_inst, node)
1687 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1688 """Verify N+1 Memory Resilience.
1690 Check that if one single node dies we can still start all the
1691 instances it was primary for.
1694 for node, n_img in node_image.items():
1695 # This code checks that every node which is now listed as
1696 # secondary has enough memory to host all instances it is
1697 # supposed to should a single other node in the cluster fail.
1698 # FIXME: not ready for failover to an arbitrary node
1699 # FIXME: does not support file-backed instances
1700 # WARNING: we currently take into account down instances as well
1701 # as up ones, considering that even if they're down someone
1702 # might want to start them even in the event of a node failure.
1703 for prinode, instances in n_img.sbp.items():
1705 for instance in instances:
1706 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1707 if bep[constants.BE_AUTO_BALANCE]:
1708 needed_mem += bep[constants.BE_MEMORY]
1709 test = n_img.mfree < needed_mem
1710 self._ErrorIf(test, self.ENODEN1, node,
1711 "not enough memory on to accommodate"
1712 " failovers should peer node %s fail", prinode)
1714 def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1716 """Verifies and computes the node required file checksums.
1718 @type ninfo: L{objects.Node}
1719 @param ninfo: the node to check
1720 @param nresult: the remote results for the node
1721 @param file_list: required list of files
1722 @param local_cksum: dictionary of local files and their checksums
1723 @param master_files: list of files that only masters should have
1727 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1729 remote_cksum = nresult.get(constants.NV_FILELIST, None)
1730 test = not isinstance(remote_cksum, dict)
1731 _ErrorIf(test, self.ENODEFILECHECK, node,
1732 "node hasn't returned file checksum data")
1736 for file_name in file_list:
1737 node_is_mc = ninfo.master_candidate
1738 must_have = (file_name not in master_files) or node_is_mc
1740 test1 = file_name not in remote_cksum
1742 test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1744 test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1745 _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1746 "file '%s' missing", file_name)
1747 _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1748 "file '%s' has wrong checksum", file_name)
1749 # not candidate and this is not a must-have file
1750 _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1751 "file '%s' should not exist on non master"
1752 " candidates (and the file is outdated)", file_name)
1753 # all good, except non-master/non-must have combination
1754 _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1755 "file '%s' should not exist"
1756 " on non master candidates", file_name)
1758 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1760 """Verifies and the node DRBD status.
1762 @type ninfo: L{objects.Node}
1763 @param ninfo: the node to check
1764 @param nresult: the remote results for the node
1765 @param instanceinfo: the dict of instances
1766 @param drbd_helper: the configured DRBD usermode helper
1767 @param drbd_map: the DRBD map as returned by
1768 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1772 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1775 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1776 test = (helper_result == None)
1777 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1778 "no drbd usermode helper returned")
1780 status, payload = helper_result
1782 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1783 "drbd usermode helper check unsuccessful: %s", payload)
1784 test = status and (payload != drbd_helper)
1785 _ErrorIf(test, self.ENODEDRBDHELPER, node,
1786 "wrong drbd usermode helper: %s", payload)
1788 # compute the DRBD minors
1790 for minor, instance in drbd_map[node].items():
1791 test = instance not in instanceinfo
1792 _ErrorIf(test, self.ECLUSTERCFG, None,
1793 "ghost instance '%s' in temporary DRBD map", instance)
1794 # ghost instance should not be running, but otherwise we
1795 # don't give double warnings (both ghost instance and
1796 # unallocated minor in use)
1798 node_drbd[minor] = (instance, False)
1800 instance = instanceinfo[instance]
1801 node_drbd[minor] = (instance.name, instance.admin_up)
1803 # and now check them
1804 used_minors = nresult.get(constants.NV_DRBDLIST, [])
1805 test = not isinstance(used_minors, (tuple, list))
1806 _ErrorIf(test, self.ENODEDRBD, node,
1807 "cannot parse drbd status file: %s", str(used_minors))
1809 # we cannot check drbd status
1812 for minor, (iname, must_exist) in node_drbd.items():
1813 test = minor not in used_minors and must_exist
1814 _ErrorIf(test, self.ENODEDRBD, node,
1815 "drbd minor %d of instance %s is not active", minor, iname)
1816 for minor in used_minors:
1817 test = minor not in node_drbd
1818 _ErrorIf(test, self.ENODEDRBD, node,
1819 "unallocated drbd minor %d is in use", minor)
1821 def _UpdateNodeOS(self, ninfo, nresult, nimg):
1822 """Builds the node OS structures.
1824 @type ninfo: L{objects.Node}
1825 @param ninfo: the node to check
1826 @param nresult: the remote results for the node
1827 @param nimg: the node image object
1831 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1833 remote_os = nresult.get(constants.NV_OSLIST, None)
1834 test = (not isinstance(remote_os, list) or
1835 not compat.all(isinstance(v, list) and len(v) == 7
1836 for v in remote_os))
1838 _ErrorIf(test, self.ENODEOS, node,
1839 "node hasn't returned valid OS data")
1848 for (name, os_path, status, diagnose,
1849 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1851 if name not in os_dict:
1854 # parameters is a list of lists instead of list of tuples due to
1855 # JSON lacking a real tuple type, fix it:
1856 parameters = [tuple(v) for v in parameters]
1857 os_dict[name].append((os_path, status, diagnose,
1858 set(variants), set(parameters), set(api_ver)))
1860 nimg.oslist = os_dict
1862 def _VerifyNodeOS(self, ninfo, nimg, base):
1863 """Verifies the node OS list.
1865 @type ninfo: L{objects.Node}
1866 @param ninfo: the node to check
1867 @param nimg: the node image object
1868 @param base: the 'template' node we match against (e.g. from the master)
1872 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1874 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1876 for os_name, os_data in nimg.oslist.items():
1877 assert os_data, "Empty OS status for OS %s?!" % os_name
1878 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1879 _ErrorIf(not f_status, self.ENODEOS, node,
1880 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1881 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1882 "OS '%s' has multiple entries (first one shadows the rest): %s",
1883 os_name, utils.CommaJoin([v[0] for v in os_data]))
1884 # this will catched in backend too
1885 _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1886 and not f_var, self.ENODEOS, node,
1887 "OS %s with API at least %d does not declare any variant",
1888 os_name, constants.OS_API_V15)
1889 # comparisons with the 'base' image
1890 test = os_name not in base.oslist
1891 _ErrorIf(test, self.ENODEOS, node,
1892 "Extra OS %s not present on reference node (%s)",
1896 assert base.oslist[os_name], "Base node has empty OS status?"
1897 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1899 # base OS is invalid, skipping
1901 for kind, a, b in [("API version", f_api, b_api),
1902 ("variants list", f_var, b_var),
1903 ("parameters", f_param, b_param)]:
1904 _ErrorIf(a != b, self.ENODEOS, node,
1905 "OS %s %s differs from reference node %s: %s vs. %s",
1906 kind, os_name, base.name,
1907 utils.CommaJoin(a), utils.CommaJoin(b))
1909 # check any missing OSes
1910 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1911 _ErrorIf(missing, self.ENODEOS, node,
1912 "OSes present on reference node %s but missing on this node: %s",
1913 base.name, utils.CommaJoin(missing))
1915 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1916 """Verifies and updates the node volume data.
1918 This function will update a L{NodeImage}'s internal structures
1919 with data from the remote call.
1921 @type ninfo: L{objects.Node}
1922 @param ninfo: the node to check
1923 @param nresult: the remote results for the node
1924 @param nimg: the node image object
1925 @param vg_name: the configured VG name
1929 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1931 nimg.lvm_fail = True
1932 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1935 elif isinstance(lvdata, basestring):
1936 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1937 utils.SafeEncode(lvdata))
1938 elif not isinstance(lvdata, dict):
1939 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1941 nimg.volumes = lvdata
1942 nimg.lvm_fail = False
1944 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1945 """Verifies and updates the node instance list.
1947 If the listing was successful, then updates this node's instance
1948 list. Otherwise, it marks the RPC call as failed for the instance
1951 @type ninfo: L{objects.Node}
1952 @param ninfo: the node to check
1953 @param nresult: the remote results for the node
1954 @param nimg: the node image object
1957 idata = nresult.get(constants.NV_INSTANCELIST, None)
1958 test = not isinstance(idata, list)
1959 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1960 " (instancelist): %s", utils.SafeEncode(str(idata)))
1962 nimg.hyp_fail = True
1964 nimg.instances = idata
1966 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1967 """Verifies and computes a node information map
1969 @type ninfo: L{objects.Node}
1970 @param ninfo: the node to check
1971 @param nresult: the remote results for the node
1972 @param nimg: the node image object
1973 @param vg_name: the configured VG name
1977 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1979 # try to read free memory (from the hypervisor)
1980 hv_info = nresult.get(constants.NV_HVINFO, None)
1981 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1982 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1985 nimg.mfree = int(hv_info["memory_free"])
1986 except (ValueError, TypeError):
1987 _ErrorIf(True, self.ENODERPC, node,
1988 "node returned invalid nodeinfo, check hypervisor")
1990 # FIXME: devise a free space model for file based instances as well
1991 if vg_name is not None:
1992 test = (constants.NV_VGLIST not in nresult or
1993 vg_name not in nresult[constants.NV_VGLIST])
1994 _ErrorIf(test, self.ENODELVM, node,
1995 "node didn't return data for the volume group '%s'"
1996 " - it is either missing or broken", vg_name)
1999 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2000 except (ValueError, TypeError):
2001 _ErrorIf(True, self.ENODERPC, node,
2002 "node returned invalid LVM info, check LVM status")
2004 def BuildHooksEnv(self):
2007 Cluster-Verify hooks just ran in the post phase and their failure makes
2008 the output be logged in the verify output and the verification to fail.
2011 all_nodes = self.cfg.GetNodeList()
2013 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2015 for node in self.cfg.GetAllNodesInfo().values():
2016 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2018 return env, [], all_nodes
2020 def Exec(self, feedback_fn):
2021 """Verify integrity of cluster, performing various test on nodes.
2025 _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2026 verbose = self.op.verbose
2027 self._feedback_fn = feedback_fn
2028 feedback_fn("* Verifying global settings")
2029 for msg in self.cfg.VerifyConfig():
2030 _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2032 # Check the cluster certificates
2033 for cert_filename in constants.ALL_CERT_FILES:
2034 (errcode, msg) = _VerifyCertificate(cert_filename)
2035 _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2037 vg_name = self.cfg.GetVGName()
2038 drbd_helper = self.cfg.GetDRBDHelper()
2039 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2040 cluster = self.cfg.GetClusterInfo()
2041 nodelist = utils.NiceSort(self.cfg.GetNodeList())
2042 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2043 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2044 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2045 for iname in instancelist)
2046 i_non_redundant = [] # Non redundant instances
2047 i_non_a_balanced = [] # Non auto-balanced instances
2048 n_offline = 0 # Count of offline nodes
2049 n_drained = 0 # Count of nodes being drained
2050 node_vol_should = {}
2052 # FIXME: verify OS list
2053 # do local checksums
2054 master_files = [constants.CLUSTER_CONF_FILE]
2055 master_node = self.master_node = self.cfg.GetMasterNode()
2056 master_ip = self.cfg.GetMasterIP()
2058 file_names = ssconf.SimpleStore().GetFileList()
2059 file_names.extend(constants.ALL_CERT_FILES)
2060 file_names.extend(master_files)
2061 if cluster.modify_etc_hosts:
2062 file_names.append(constants.ETC_HOSTS)
2064 local_checksums = utils.FingerprintFiles(file_names)
2066 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2067 node_verify_param = {
2068 constants.NV_FILELIST: file_names,
2069 constants.NV_NODELIST: [node.name for node in nodeinfo
2070 if not node.offline],
2071 constants.NV_HYPERVISOR: hypervisors,
2072 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2073 node.secondary_ip) for node in nodeinfo
2074 if not node.offline],
2075 constants.NV_INSTANCELIST: hypervisors,
2076 constants.NV_VERSION: None,
2077 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2078 constants.NV_NODESETUP: None,
2079 constants.NV_TIME: None,
2080 constants.NV_MASTERIP: (master_node, master_ip),
2081 constants.NV_OSLIST: None,
2084 if vg_name is not None:
2085 node_verify_param[constants.NV_VGLIST] = None
2086 node_verify_param[constants.NV_LVLIST] = vg_name
2087 node_verify_param[constants.NV_PVLIST] = [vg_name]
2088 node_verify_param[constants.NV_DRBDLIST] = None
2091 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2093 # Build our expected cluster state
2094 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2096 for node in nodeinfo)
2098 for instance in instancelist:
2099 inst_config = instanceinfo[instance]
2101 for nname in inst_config.all_nodes:
2102 if nname not in node_image:
2104 gnode = self.NodeImage(name=nname)
2106 node_image[nname] = gnode
2108 inst_config.MapLVsByNode(node_vol_should)
2110 pnode = inst_config.primary_node
2111 node_image[pnode].pinst.append(instance)
2113 for snode in inst_config.secondary_nodes:
2114 nimg = node_image[snode]
2115 nimg.sinst.append(instance)
2116 if pnode not in nimg.sbp:
2117 nimg.sbp[pnode] = []
2118 nimg.sbp[pnode].append(instance)
2120 # At this point, we have the in-memory data structures complete,
2121 # except for the runtime information, which we'll gather next
2123 # Due to the way our RPC system works, exact response times cannot be
2124 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2125 # time before and after executing the request, we can at least have a time
2127 nvinfo_starttime = time.time()
2128 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2129 self.cfg.GetClusterName())
2130 nvinfo_endtime = time.time()
2132 all_drbd_map = self.cfg.ComputeDRBDMap()
2134 feedback_fn("* Verifying node status")
2138 for node_i in nodeinfo:
2140 nimg = node_image[node]
2144 feedback_fn("* Skipping offline node %s" % (node,))
2148 if node == master_node:
2150 elif node_i.master_candidate:
2151 ntype = "master candidate"
2152 elif node_i.drained:
2158 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2160 msg = all_nvinfo[node].fail_msg
2161 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2163 nimg.rpc_fail = True
2166 nresult = all_nvinfo[node].payload
2168 nimg.call_ok = self._VerifyNode(node_i, nresult)
2169 self._VerifyNodeNetwork(node_i, nresult)
2170 self._VerifyNodeLVM(node_i, nresult, vg_name)
2171 self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2173 self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2175 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2177 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2178 self._UpdateNodeInstances(node_i, nresult, nimg)
2179 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2180 self._UpdateNodeOS(node_i, nresult, nimg)
2181 if not nimg.os_fail:
2182 if refos_img is None:
2184 self._VerifyNodeOS(node_i, nimg, refos_img)
2186 feedback_fn("* Verifying instance status")
2187 for instance in instancelist:
2189 feedback_fn("* Verifying instance %s" % instance)
2190 inst_config = instanceinfo[instance]
2191 self._VerifyInstance(instance, inst_config, node_image)
2192 inst_nodes_offline = []
2194 pnode = inst_config.primary_node
2195 pnode_img = node_image[pnode]
2196 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2197 self.ENODERPC, pnode, "instance %s, connection to"
2198 " primary node failed", instance)
2200 if pnode_img.offline:
2201 inst_nodes_offline.append(pnode)
2203 # If the instance is non-redundant we cannot survive losing its primary
2204 # node, so we are not N+1 compliant. On the other hand we have no disk
2205 # templates with more than one secondary so that situation is not well
2207 # FIXME: does not support file-backed instances
2208 if not inst_config.secondary_nodes:
2209 i_non_redundant.append(instance)
2210 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2211 instance, "instance has multiple secondary nodes: %s",
2212 utils.CommaJoin(inst_config.secondary_nodes),
2213 code=self.ETYPE_WARNING)
2215 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2216 i_non_a_balanced.append(instance)
2218 for snode in inst_config.secondary_nodes:
2219 s_img = node_image[snode]
2220 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2221 "instance %s, connection to secondary node failed", instance)
2224 inst_nodes_offline.append(snode)
2226 # warn that the instance lives on offline nodes
2227 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2228 "instance lives on offline node(s) %s",
2229 utils.CommaJoin(inst_nodes_offline))
2230 # ... or ghost nodes
2231 for node in inst_config.all_nodes:
2232 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2233 "instance lives on ghost node %s", node)
2235 feedback_fn("* Verifying orphan volumes")
2236 reserved = utils.FieldSet(*cluster.reserved_lvs)
2237 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2239 feedback_fn("* Verifying orphan instances")
2240 self._VerifyOrphanInstances(instancelist, node_image)
2242 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2243 feedback_fn("* Verifying N+1 Memory redundancy")
2244 self._VerifyNPlusOneMemory(node_image, instanceinfo)
2246 feedback_fn("* Other Notes")
2248 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2249 % len(i_non_redundant))
2251 if i_non_a_balanced:
2252 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2253 % len(i_non_a_balanced))
2256 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2259 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2263 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2264 """Analyze the post-hooks' result
2266 This method analyses the hook result, handles it, and sends some
2267 nicely-formatted feedback back to the user.
2269 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2270 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2271 @param hooks_results: the results of the multi-node hooks rpc call
2272 @param feedback_fn: function used send feedback back to the caller
2273 @param lu_result: previous Exec result
2274 @return: the new Exec result, based on the previous result
2278 # We only really run POST phase hooks, and are only interested in
2280 if phase == constants.HOOKS_PHASE_POST:
2281 # Used to change hooks' output to proper indentation
2282 indent_re = re.compile('^', re.M)
2283 feedback_fn("* Hooks Results")
2284 assert hooks_results, "invalid result from hooks"
2286 for node_name in hooks_results:
2287 res = hooks_results[node_name]
2289 test = msg and not res.offline
2290 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2291 "Communication failure in hooks execution: %s", msg)
2292 if res.offline or msg:
2293 # No need to investigate payload if node is offline or gave an error.
2294 # override manually lu_result here as _ErrorIf only
2295 # overrides self.bad
2298 for script, hkr, output in res.payload:
2299 test = hkr == constants.HKR_FAIL
2300 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2301 "Script %s failed, output:", script)
2303 output = indent_re.sub(' ', output)
2304 feedback_fn("%s" % output)
2310 class LUVerifyDisks(NoHooksLU):
2311 """Verifies the cluster disks status.
2316 def ExpandNames(self):
2317 self.needed_locks = {
2318 locking.LEVEL_NODE: locking.ALL_SET,
2319 locking.LEVEL_INSTANCE: locking.ALL_SET,
2321 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2323 def Exec(self, feedback_fn):
2324 """Verify integrity of cluster disks.
2326 @rtype: tuple of three items
2327 @return: a tuple of (dict of node-to-node_error, list of instances
2328 which need activate-disks, dict of instance: (node, volume) for
2332 result = res_nodes, res_instances, res_missing = {}, [], {}
2334 vg_name = self.cfg.GetVGName()
2335 nodes = utils.NiceSort(self.cfg.GetNodeList())
2336 instances = [self.cfg.GetInstanceInfo(name)
2337 for name in self.cfg.GetInstanceList()]
2340 for inst in instances:
2342 if (not inst.admin_up or
2343 inst.disk_template not in constants.DTS_NET_MIRROR):
2345 inst.MapLVsByNode(inst_lvs)
2346 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2347 for node, vol_list in inst_lvs.iteritems():
2348 for vol in vol_list:
2349 nv_dict[(node, vol)] = inst
2354 node_lvs = self.rpc.call_lv_list(nodes, vg_name)
2358 node_res = node_lvs[node]
2359 if node_res.offline:
2361 msg = node_res.fail_msg
2363 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2364 res_nodes[node] = msg
2367 lvs = node_res.payload
2368 for lv_name, (_, _, lv_online) in lvs.items():
2369 inst = nv_dict.pop((node, lv_name), None)
2370 if (not lv_online and inst is not None
2371 and inst.name not in res_instances):
2372 res_instances.append(inst.name)
2374 # any leftover items in nv_dict are missing LVs, let's arrange the
2376 for key, inst in nv_dict.iteritems():
2377 if inst.name not in res_missing:
2378 res_missing[inst.name] = []
2379 res_missing[inst.name].append(key)
2384 class LURepairDiskSizes(NoHooksLU):
2385 """Verifies the cluster disks sizes.
2388 _OP_PARAMS = [("instances", _EmptyList, _TListOf(_TNonEmptyString))]
2391 def ExpandNames(self):
2392 if self.op.instances:
2393 self.wanted_names = []
2394 for name in self.op.instances:
2395 full_name = _ExpandInstanceName(self.cfg, name)
2396 self.wanted_names.append(full_name)
2397 self.needed_locks = {
2398 locking.LEVEL_NODE: [],
2399 locking.LEVEL_INSTANCE: self.wanted_names,
2401 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2403 self.wanted_names = None
2404 self.needed_locks = {
2405 locking.LEVEL_NODE: locking.ALL_SET,
2406 locking.LEVEL_INSTANCE: locking.ALL_SET,
2408 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2410 def DeclareLocks(self, level):
2411 if level == locking.LEVEL_NODE and self.wanted_names is not None:
2412 self._LockInstancesNodes(primary_only=True)
2414 def CheckPrereq(self):
2415 """Check prerequisites.
2417 This only checks the optional instance list against the existing names.
2420 if self.wanted_names is None:
2421 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2423 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2424 in self.wanted_names]
2426 def _EnsureChildSizes(self, disk):
2427 """Ensure children of the disk have the needed disk size.
2429 This is valid mainly for DRBD8 and fixes an issue where the
2430 children have smaller disk size.
2432 @param disk: an L{ganeti.objects.Disk} object
2435 if disk.dev_type == constants.LD_DRBD8:
2436 assert disk.children, "Empty children for DRBD8?"
2437 fchild = disk.children[0]
2438 mismatch = fchild.size < disk.size
2440 self.LogInfo("Child disk has size %d, parent %d, fixing",
2441 fchild.size, disk.size)
2442 fchild.size = disk.size
2444 # and we recurse on this child only, not on the metadev
2445 return self._EnsureChildSizes(fchild) or mismatch
2449 def Exec(self, feedback_fn):
2450 """Verify the size of cluster disks.
2453 # TODO: check child disks too
2454 # TODO: check differences in size between primary/secondary nodes
2456 for instance in self.wanted_instances:
2457 pnode = instance.primary_node
2458 if pnode not in per_node_disks:
2459 per_node_disks[pnode] = []
2460 for idx, disk in enumerate(instance.disks):
2461 per_node_disks[pnode].append((instance, idx, disk))
2464 for node, dskl in per_node_disks.items():
2465 newl = [v[2].Copy() for v in dskl]
2467 self.cfg.SetDiskID(dsk, node)
2468 result = self.rpc.call_blockdev_getsizes(node, newl)
2470 self.LogWarning("Failure in blockdev_getsizes call to node"
2471 " %s, ignoring", node)
2473 if len(result.data) != len(dskl):
2474 self.LogWarning("Invalid result from node %s, ignoring node results",
2477 for ((instance, idx, disk), size) in zip(dskl, result.data):
2479 self.LogWarning("Disk %d of instance %s did not return size"
2480 " information, ignoring", idx, instance.name)
2482 if not isinstance(size, (int, long)):
2483 self.LogWarning("Disk %d of instance %s did not return valid"
2484 " size information, ignoring", idx, instance.name)
2487 if size != disk.size:
2488 self.LogInfo("Disk %d of instance %s has mismatched size,"
2489 " correcting: recorded %d, actual %d", idx,
2490 instance.name, disk.size, size)
2492 self.cfg.Update(instance, feedback_fn)
2493 changed.append((instance.name, idx, size))
2494 if self._EnsureChildSizes(disk):
2495 self.cfg.Update(instance, feedback_fn)
2496 changed.append((instance.name, idx, disk.size))
2500 class LURenameCluster(LogicalUnit):
2501 """Rename the cluster.
2504 HPATH = "cluster-rename"
2505 HTYPE = constants.HTYPE_CLUSTER
2506 _OP_PARAMS = [("name", _NoDefault, _TNonEmptyString)]
2508 def BuildHooksEnv(self):
2513 "OP_TARGET": self.cfg.GetClusterName(),
2514 "NEW_NAME": self.op.name,
2516 mn = self.cfg.GetMasterNode()
2517 all_nodes = self.cfg.GetNodeList()
2518 return env, [mn], all_nodes
2520 def CheckPrereq(self):
2521 """Verify that the passed name is a valid one.
2524 hostname = netutils.GetHostname(name=self.op.name,
2525 family=self.cfg.GetPrimaryIPFamily())
2527 new_name = hostname.name
2528 self.ip = new_ip = hostname.ip
2529 old_name = self.cfg.GetClusterName()
2530 old_ip = self.cfg.GetMasterIP()
2531 if new_name == old_name and new_ip == old_ip:
2532 raise errors.OpPrereqError("Neither the name nor the IP address of the"
2533 " cluster has changed",
2535 if new_ip != old_ip:
2536 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2537 raise errors.OpPrereqError("The given cluster IP address (%s) is"
2538 " reachable on the network" %
2539 new_ip, errors.ECODE_NOTUNIQUE)
2541 self.op.name = new_name
2543 def Exec(self, feedback_fn):
2544 """Rename the cluster.
2547 clustername = self.op.name
2550 # shutdown the master IP
2551 master = self.cfg.GetMasterNode()
2552 result = self.rpc.call_node_stop_master(master, False)
2553 result.Raise("Could not disable the master role")
2556 cluster = self.cfg.GetClusterInfo()
2557 cluster.cluster_name = clustername
2558 cluster.master_ip = ip
2559 self.cfg.Update(cluster, feedback_fn)
2561 # update the known hosts file
2562 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2563 node_list = self.cfg.GetNodeList()
2565 node_list.remove(master)
2568 result = self.rpc.call_upload_file(node_list,
2569 constants.SSH_KNOWN_HOSTS_FILE)
2570 for to_node, to_result in result.iteritems():
2571 msg = to_result.fail_msg
2573 msg = ("Copy of file %s to node %s failed: %s" %
2574 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2575 self.proc.LogWarning(msg)
2578 result = self.rpc.call_node_start_master(master, False, False)
2579 msg = result.fail_msg
2581 self.LogWarning("Could not re-enable the master role on"
2582 " the master, please restart manually: %s", msg)
2587 class LUSetClusterParams(LogicalUnit):
2588 """Change the parameters of the cluster.
2591 HPATH = "cluster-modify"
2592 HTYPE = constants.HTYPE_CLUSTER
2594 ("vg_name", None, _TMaybeString),
2595 ("enabled_hypervisors", None,
2596 _TOr(_TAnd(_TListOf(_TElemOf(constants.HYPER_TYPES)), _TTrue), _TNone)),
2597 ("hvparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2598 ("beparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2599 ("os_hvp", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2600 ("osparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2601 ("candidate_pool_size", None, _TOr(_TStrictPositiveInt, _TNone)),
2602 ("uid_pool", None, _NoType),
2603 ("add_uids", None, _NoType),
2604 ("remove_uids", None, _NoType),
2605 ("maintain_node_health", None, _TMaybeBool),
2606 ("nicparams", None, _TOr(_TDict, _TNone)),
2607 ("drbd_helper", None, _TOr(_TString, _TNone)),
2608 ("default_iallocator", None, _TMaybeString),
2609 ("reserved_lvs", None, _TOr(_TListOf(_TNonEmptyString), _TNone)),
2613 def CheckArguments(self):
2617 if self.op.uid_pool:
2618 uidpool.CheckUidPool(self.op.uid_pool)
2620 if self.op.add_uids:
2621 uidpool.CheckUidPool(self.op.add_uids)
2623 if self.op.remove_uids:
2624 uidpool.CheckUidPool(self.op.remove_uids)
2626 def ExpandNames(self):
2627 # FIXME: in the future maybe other cluster params won't require checking on
2628 # all nodes to be modified.
2629 self.needed_locks = {
2630 locking.LEVEL_NODE: locking.ALL_SET,
2632 self.share_locks[locking.LEVEL_NODE] = 1
2634 def BuildHooksEnv(self):
2639 "OP_TARGET": self.cfg.GetClusterName(),
2640 "NEW_VG_NAME": self.op.vg_name,
2642 mn = self.cfg.GetMasterNode()
2643 return env, [mn], [mn]
2645 def CheckPrereq(self):
2646 """Check prerequisites.
2648 This checks whether the given params don't conflict and
2649 if the given volume group is valid.
2652 if self.op.vg_name is not None and not self.op.vg_name:
2653 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2654 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2655 " instances exist", errors.ECODE_INVAL)
2657 if self.op.drbd_helper is not None and not self.op.drbd_helper:
2658 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2659 raise errors.OpPrereqError("Cannot disable drbd helper while"
2660 " drbd-based instances exist",
2663 node_list = self.acquired_locks[locking.LEVEL_NODE]
2665 # if vg_name not None, checks given volume group on all nodes
2667 vglist = self.rpc.call_vg_list(node_list)
2668 for node in node_list:
2669 msg = vglist[node].fail_msg
2671 # ignoring down node
2672 self.LogWarning("Error while gathering data on node %s"
2673 " (ignoring node): %s", node, msg)
2675 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2677 constants.MIN_VG_SIZE)
2679 raise errors.OpPrereqError("Error on node '%s': %s" %
2680 (node, vgstatus), errors.ECODE_ENVIRON)
2682 if self.op.drbd_helper:
2683 # checks given drbd helper on all nodes
2684 helpers = self.rpc.call_drbd_helper(node_list)
2685 for node in node_list:
2686 ninfo = self.cfg.GetNodeInfo(node)
2688 self.LogInfo("Not checking drbd helper on offline node %s", node)
2690 msg = helpers[node].fail_msg
2692 raise errors.OpPrereqError("Error checking drbd helper on node"
2693 " '%s': %s" % (node, msg),
2694 errors.ECODE_ENVIRON)
2695 node_helper = helpers[node].payload
2696 if node_helper != self.op.drbd_helper:
2697 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2698 (node, node_helper), errors.ECODE_ENVIRON)
2700 self.cluster = cluster = self.cfg.GetClusterInfo()
2701 # validate params changes
2702 if self.op.beparams:
2703 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2704 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2706 if self.op.nicparams:
2707 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2708 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2709 objects.NIC.CheckParameterSyntax(self.new_nicparams)
2712 # check all instances for consistency
2713 for instance in self.cfg.GetAllInstancesInfo().values():
2714 for nic_idx, nic in enumerate(instance.nics):
2715 params_copy = copy.deepcopy(nic.nicparams)
2716 params_filled = objects.FillDict(self.new_nicparams, params_copy)
2718 # check parameter syntax
2720 objects.NIC.CheckParameterSyntax(params_filled)
2721 except errors.ConfigurationError, err:
2722 nic_errors.append("Instance %s, nic/%d: %s" %
2723 (instance.name, nic_idx, err))
2725 # if we're moving instances to routed, check that they have an ip
2726 target_mode = params_filled[constants.NIC_MODE]
2727 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2728 nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2729 (instance.name, nic_idx))
2731 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2732 "\n".join(nic_errors))
2734 # hypervisor list/parameters
2735 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2736 if self.op.hvparams:
2737 for hv_name, hv_dict in self.op.hvparams.items():
2738 if hv_name not in self.new_hvparams:
2739 self.new_hvparams[hv_name] = hv_dict
2741 self.new_hvparams[hv_name].update(hv_dict)
2743 # os hypervisor parameters
2744 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2746 for os_name, hvs in self.op.os_hvp.items():
2747 if os_name not in self.new_os_hvp:
2748 self.new_os_hvp[os_name] = hvs
2750 for hv_name, hv_dict in hvs.items():
2751 if hv_name not in self.new_os_hvp[os_name]:
2752 self.new_os_hvp[os_name][hv_name] = hv_dict
2754 self.new_os_hvp[os_name][hv_name].update(hv_dict)
2757 self.new_osp = objects.FillDict(cluster.osparams, {})
2758 if self.op.osparams:
2759 for os_name, osp in self.op.osparams.items():
2760 if os_name not in self.new_osp:
2761 self.new_osp[os_name] = {}
2763 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2766 if not self.new_osp[os_name]:
2767 # we removed all parameters
2768 del self.new_osp[os_name]
2770 # check the parameter validity (remote check)
2771 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2772 os_name, self.new_osp[os_name])
2774 # changes to the hypervisor list
2775 if self.op.enabled_hypervisors is not None:
2776 self.hv_list = self.op.enabled_hypervisors
2777 for hv in self.hv_list:
2778 # if the hypervisor doesn't already exist in the cluster
2779 # hvparams, we initialize it to empty, and then (in both
2780 # cases) we make sure to fill the defaults, as we might not
2781 # have a complete defaults list if the hypervisor wasn't
2783 if hv not in new_hvp:
2785 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2786 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2788 self.hv_list = cluster.enabled_hypervisors
2790 if self.op.hvparams or self.op.enabled_hypervisors is not None:
2791 # either the enabled list has changed, or the parameters have, validate
2792 for hv_name, hv_params in self.new_hvparams.items():
2793 if ((self.op.hvparams and hv_name in self.op.hvparams) or
2794 (self.op.enabled_hypervisors and
2795 hv_name in self.op.enabled_hypervisors)):
2796 # either this is a new hypervisor, or its parameters have changed
2797 hv_class = hypervisor.GetHypervisor(hv_name)
2798 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2799 hv_class.CheckParameterSyntax(hv_params)
2800 _CheckHVParams(self, node_list, hv_name, hv_params)
2803 # no need to check any newly-enabled hypervisors, since the
2804 # defaults have already been checked in the above code-block
2805 for os_name, os_hvp in self.new_os_hvp.items():
2806 for hv_name, hv_params in os_hvp.items():
2807 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2808 # we need to fill in the new os_hvp on top of the actual hv_p
2809 cluster_defaults = self.new_hvparams.get(hv_name, {})
2810 new_osp = objects.FillDict(cluster_defaults, hv_params)
2811 hv_class = hypervisor.GetHypervisor(hv_name)
2812 hv_class.CheckParameterSyntax(new_osp)
2813 _CheckHVParams(self, node_list, hv_name, new_osp)
2815 if self.op.default_iallocator:
2816 alloc_script = utils.FindFile(self.op.default_iallocator,
2817 constants.IALLOCATOR_SEARCH_PATH,
2819 if alloc_script is None:
2820 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2821 " specified" % self.op.default_iallocator,
2824 def Exec(self, feedback_fn):
2825 """Change the parameters of the cluster.
2828 if self.op.vg_name is not None:
2829 new_volume = self.op.vg_name
2832 if new_volume != self.cfg.GetVGName():
2833 self.cfg.SetVGName(new_volume)
2835 feedback_fn("Cluster LVM configuration already in desired"
2836 " state, not changing")
2837 if self.op.drbd_helper is not None:
2838 new_helper = self.op.drbd_helper
2841 if new_helper != self.cfg.GetDRBDHelper():
2842 self.cfg.SetDRBDHelper(new_helper)
2844 feedback_fn("Cluster DRBD helper already in desired state,"
2846 if self.op.hvparams:
2847 self.cluster.hvparams = self.new_hvparams
2849 self.cluster.os_hvp = self.new_os_hvp
2850 if self.op.enabled_hypervisors is not None:
2851 self.cluster.hvparams = self.new_hvparams
2852 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2853 if self.op.beparams:
2854 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2855 if self.op.nicparams:
2856 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2857 if self.op.osparams:
2858 self.cluster.osparams = self.new_osp
2860 if self.op.candidate_pool_size is not None:
2861 self.cluster.candidate_pool_size = self.op.candidate_pool_size
2862 # we need to update the pool size here, otherwise the save will fail
2863 _AdjustCandidatePool(self, [])
2865 if self.op.maintain_node_health is not None:
2866 self.cluster.maintain_node_health = self.op.maintain_node_health
2868 if self.op.add_uids is not None:
2869 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2871 if self.op.remove_uids is not None:
2872 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2874 if self.op.uid_pool is not None:
2875 self.cluster.uid_pool = self.op.uid_pool
2877 if self.op.default_iallocator is not None:
2878 self.cluster.default_iallocator = self.op.default_iallocator
2880 if self.op.reserved_lvs is not None:
2881 self.cluster.reserved_lvs = self.op.reserved_lvs
2883 self.cfg.Update(self.cluster, feedback_fn)
2886 def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2887 """Distribute additional files which are part of the cluster configuration.
2889 ConfigWriter takes care of distributing the config and ssconf files, but
2890 there are more files which should be distributed to all nodes. This function
2891 makes sure those are copied.
2893 @param lu: calling logical unit
2894 @param additional_nodes: list of nodes not in the config to distribute to
2897 # 1. Gather target nodes
2898 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2899 dist_nodes = lu.cfg.GetOnlineNodeList()
2900 if additional_nodes is not None:
2901 dist_nodes.extend(additional_nodes)
2902 if myself.name in dist_nodes:
2903 dist_nodes.remove(myself.name)
2905 # 2. Gather files to distribute
2906 dist_files = set([constants.ETC_HOSTS,
2907 constants.SSH_KNOWN_HOSTS_FILE,
2908 constants.RAPI_CERT_FILE,
2909 constants.RAPI_USERS_FILE,
2910 constants.CONFD_HMAC_KEY,
2911 constants.CLUSTER_DOMAIN_SECRET_FILE,
2914 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2915 for hv_name in enabled_hypervisors:
2916 hv_class = hypervisor.GetHypervisor(hv_name)
2917 dist_files.update(hv_class.GetAncillaryFiles())
2919 # 3. Perform the files upload
2920 for fname in dist_files:
2921 if os.path.exists(fname):
2922 result = lu.rpc.call_upload_file(dist_nodes, fname)
2923 for to_node, to_result in result.items():
2924 msg = to_result.fail_msg
2926 msg = ("Copy of file %s to node %s failed: %s" %
2927 (fname, to_node, msg))
2928 lu.proc.LogWarning(msg)
2931 class LURedistributeConfig(NoHooksLU):
2932 """Force the redistribution of cluster configuration.
2934 This is a very simple LU.
2939 def ExpandNames(self):
2940 self.needed_locks = {
2941 locking.LEVEL_NODE: locking.ALL_SET,
2943 self.share_locks[locking.LEVEL_NODE] = 1
2945 def Exec(self, feedback_fn):
2946 """Redistribute the configuration.
2949 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2950 _RedistributeAncillaryFiles(self)
2953 def _WaitForSync(lu, instance, disks=None, oneshot=False):
2954 """Sleep and poll for an instance's disk to sync.
2957 if not instance.disks or disks is not None and not disks:
2960 disks = _ExpandCheckDisks(instance, disks)
2963 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2965 node = instance.primary_node
2968 lu.cfg.SetDiskID(dev, node)
2970 # TODO: Convert to utils.Retry
2973 degr_retries = 10 # in seconds, as we sleep 1 second each time
2977 cumul_degraded = False
2978 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
2979 msg = rstats.fail_msg
2981 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2984 raise errors.RemoteError("Can't contact node %s for mirror data,"
2985 " aborting." % node)
2988 rstats = rstats.payload
2990 for i, mstat in enumerate(rstats):
2992 lu.LogWarning("Can't compute data for node %s/%s",
2993 node, disks[i].iv_name)
2996 cumul_degraded = (cumul_degraded or
2997 (mstat.is_degraded and mstat.sync_percent is None))
2998 if mstat.sync_percent is not None:
3000 if mstat.estimated_time is not None:
3001 rem_time = ("%s remaining (estimated)" %
3002 utils.FormatSeconds(mstat.estimated_time))
3003 max_time = mstat.estimated_time
3005 rem_time = "no time estimate"
3006 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3007 (disks[i].iv_name, mstat.sync_percent, rem_time))
3009 # if we're done but degraded, let's do a few small retries, to
3010 # make sure we see a stable and not transient situation; therefore
3011 # we force restart of the loop
3012 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3013 logging.info("Degraded disks found, %d retries left", degr_retries)
3021 time.sleep(min(60, max_time))
3024 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3025 return not cumul_degraded
3028 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3029 """Check that mirrors are not degraded.
3031 The ldisk parameter, if True, will change the test from the
3032 is_degraded attribute (which represents overall non-ok status for
3033 the device(s)) to the ldisk (representing the local storage status).
3036 lu.cfg.SetDiskID(dev, node)
3040 if on_primary or dev.AssembleOnSecondary():
3041 rstats = lu.rpc.call_blockdev_find(node, dev)
3042 msg = rstats.fail_msg
3044 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3046 elif not rstats.payload:
3047 lu.LogWarning("Can't find disk on node %s", node)
3051 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3053 result = result and not rstats.payload.is_degraded
3056 for child in dev.children:
3057 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3062 class LUDiagnoseOS(NoHooksLU):
3063 """Logical unit for OS diagnose/query.
3068 ("names", _EmptyList, _TListOf(_TNonEmptyString)),
3071 _FIELDS_STATIC = utils.FieldSet()
3072 _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants",
3073 "parameters", "api_versions")
3075 def CheckArguments(self):
3077 raise errors.OpPrereqError("Selective OS query not supported",
3080 _CheckOutputFields(static=self._FIELDS_STATIC,
3081 dynamic=self._FIELDS_DYNAMIC,
3082 selected=self.op.output_fields)
3084 def ExpandNames(self):
3085 # Lock all nodes, in shared mode
3086 # Temporary removal of locks, should be reverted later
3087 # TODO: reintroduce locks when they are lighter-weight
3088 self.needed_locks = {}
3089 #self.share_locks[locking.LEVEL_NODE] = 1
3090 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3093 def _DiagnoseByOS(rlist):
3094 """Remaps a per-node return list into an a per-os per-node dictionary
3096 @param rlist: a map with node names as keys and OS objects as values
3099 @return: a dictionary with osnames as keys and as value another
3100 map, with nodes as keys and tuples of (path, status, diagnose,
3101 variants, parameters, api_versions) as values, eg::
3103 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3104 (/srv/..., False, "invalid api")],
3105 "node2": [(/srv/..., True, "", [], [])]}
3110 # we build here the list of nodes that didn't fail the RPC (at RPC
3111 # level), so that nodes with a non-responding node daemon don't
3112 # make all OSes invalid
3113 good_nodes = [node_name for node_name in rlist
3114 if not rlist[node_name].fail_msg]
3115 for node_name, nr in rlist.items():
3116 if nr.fail_msg or not nr.payload:
3118 for (name, path, status, diagnose, variants,
3119 params, api_versions) in nr.payload:
3120 if name not in all_os:
3121 # build a list of nodes for this os containing empty lists
3122 # for each node in node_list
3124 for nname in good_nodes:
3125 all_os[name][nname] = []
3126 # convert params from [name, help] to (name, help)
3127 params = [tuple(v) for v in params]
3128 all_os[name][node_name].append((path, status, diagnose,
3129 variants, params, api_versions))
3132 def Exec(self, feedback_fn):
3133 """Compute the list of OSes.
3136 valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
3137 node_data = self.rpc.call_os_diagnose(valid_nodes)
3138 pol = self._DiagnoseByOS(node_data)
3141 for os_name, os_data in pol.items():
3144 (variants, params, api_versions) = null_state = (set(), set(), set())
3145 for idx, osl in enumerate(os_data.values()):
3146 valid = bool(valid and osl and osl[0][1])
3148 (variants, params, api_versions) = null_state
3150 node_variants, node_params, node_api = osl[0][3:6]
3151 if idx == 0: # first entry
3152 variants = set(node_variants)
3153 params = set(node_params)
3154 api_versions = set(node_api)
3155 else: # keep consistency
3156 variants.intersection_update(node_variants)
3157 params.intersection_update(node_params)
3158 api_versions.intersection_update(node_api)
3160 for field in self.op.output_fields:
3163 elif field == "valid":
3165 elif field == "node_status":
3166 # this is just a copy of the dict
3168 for node_name, nos_list in os_data.items():
3169 val[node_name] = nos_list
3170 elif field == "variants":
3171 val = list(variants)
3172 elif field == "parameters":
3174 elif field == "api_versions":
3175 val = list(api_versions)
3177 raise errors.ParameterError(field)
3184 class LURemoveNode(LogicalUnit):
3185 """Logical unit for removing a node.
3188 HPATH = "node-remove"
3189 HTYPE = constants.HTYPE_NODE
3194 def BuildHooksEnv(self):
3197 This doesn't run on the target node in the pre phase as a failed
3198 node would then be impossible to remove.
3202 "OP_TARGET": self.op.node_name,
3203 "NODE_NAME": self.op.node_name,
3205 all_nodes = self.cfg.GetNodeList()
3207 all_nodes.remove(self.op.node_name)
3209 logging.warning("Node %s which is about to be removed not found"
3210 " in the all nodes list", self.op.node_name)
3211 return env, all_nodes, all_nodes
3213 def CheckPrereq(self):
3214 """Check prerequisites.
3217 - the node exists in the configuration
3218 - it does not have primary or secondary instances
3219 - it's not the master
3221 Any errors are signaled by raising errors.OpPrereqError.
3224 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3225 node = self.cfg.GetNodeInfo(self.op.node_name)
3226 assert node is not None
3228 instance_list = self.cfg.GetInstanceList()
3230 masternode = self.cfg.GetMasterNode()
3231 if node.name == masternode:
3232 raise errors.OpPrereqError("Node is the master node,"
3233 " you need to failover first.",
3236 for instance_name in instance_list:
3237 instance = self.cfg.GetInstanceInfo(instance_name)
3238 if node.name in instance.all_nodes:
3239 raise errors.OpPrereqError("Instance %s is still running on the node,"
3240 " please remove first." % instance_name,
3242 self.op.node_name = node.name
3245 def Exec(self, feedback_fn):
3246 """Removes the node from the cluster.
3250 logging.info("Stopping the node daemon and removing configs from node %s",
3253 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3255 # Promote nodes to master candidate as needed
3256 _AdjustCandidatePool(self, exceptions=[node.name])
3257 self.context.RemoveNode(node.name)
3259 # Run post hooks on the node before it's removed
3260 hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3262 hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3264 # pylint: disable-msg=W0702
3265 self.LogWarning("Errors occurred running hooks on %s" % node.name)
3267 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3268 msg = result.fail_msg
3270 self.LogWarning("Errors encountered on the remote node while leaving"
3271 " the cluster: %s", msg)
3273 # Remove node from our /etc/hosts
3274 if self.cfg.GetClusterInfo().modify_etc_hosts:
3275 master_node = self.cfg.GetMasterNode()
3276 result = self.rpc.call_etc_hosts_modify(master_node,
3277 constants.ETC_HOSTS_REMOVE,
3279 result.Raise("Can't update hosts file with new host data")
3280 _RedistributeAncillaryFiles(self)
3283 class LUQueryNodes(NoHooksLU):
3284 """Logical unit for querying nodes.
3287 # pylint: disable-msg=W0142
3290 ("names", _EmptyList, _TListOf(_TNonEmptyString)),
3291 ("use_locking", False, _TBool),
3295 _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
3296 "master_candidate", "offline", "drained"]
3298 _FIELDS_DYNAMIC = utils.FieldSet(
3300 "mtotal", "mnode", "mfree",
3302 "ctotal", "cnodes", "csockets",
3305 _FIELDS_STATIC = utils.FieldSet(*[
3306 "pinst_cnt", "sinst_cnt",
3307 "pinst_list", "sinst_list",
3308 "pip", "sip", "tags",
3310 "role"] + _SIMPLE_FIELDS
3313 def CheckArguments(self):
3314 _CheckOutputFields(static=self._FIELDS_STATIC,
3315 dynamic=self._FIELDS_DYNAMIC,
3316 selected=self.op.output_fields)
3318 def ExpandNames(self):
3319 self.needed_locks = {}
3320 self.share_locks[locking.LEVEL_NODE] = 1
3323 self.wanted = _GetWantedNodes(self, self.op.names)
3325 self.wanted = locking.ALL_SET
3327 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3328 self.do_locking = self.do_node_query and self.op.use_locking
3330 # if we don't request only static fields, we need to lock the nodes
3331 self.needed_locks[locking.LEVEL_NODE] = self.wanted
3333 def Exec(self, feedback_fn):
3334 """Computes the list of nodes and their attributes.
3337 all_info = self.cfg.GetAllNodesInfo()
3339 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3340 elif self.wanted != locking.ALL_SET:
3341 nodenames = self.wanted
3342 missing = set(nodenames).difference(all_info.keys())
3344 raise errors.OpExecError(
3345 "Some nodes were removed before retrieving their data: %s" % missing)
3347 nodenames = all_info.keys()
3349 nodenames = utils.NiceSort(nodenames)
3350 nodelist = [all_info[name] for name in nodenames]
3352 # begin data gathering
3354 if self.do_node_query:
3356 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3357 self.cfg.GetHypervisorType())
3358 for name in nodenames:
3359 nodeinfo = node_data[name]
3360 if not nodeinfo.fail_msg and nodeinfo.payload:
3361 nodeinfo = nodeinfo.payload
3362 fn = utils.TryConvert
3364 "mtotal": fn(int, nodeinfo.get('memory_total', None)),
3365 "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
3366 "mfree": fn(int, nodeinfo.get('memory_free', None)),
3367 "dtotal": fn(int, nodeinfo.get('vg_size', None)),
3368 "dfree": fn(int, nodeinfo.get('vg_free', None)),
3369 "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
3370 "bootid": nodeinfo.get('bootid', None),
3371 "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
3372 "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
3375 live_data[name] = {}
3377 live_data = dict.fromkeys(nodenames, {})
3379 node_to_primary = dict([(name, set()) for name in nodenames])
3380 node_to_secondary = dict([(name, set()) for name in nodenames])
3382 inst_fields = frozenset(("pinst_cnt", "pinst_list",
3383 "sinst_cnt", "sinst_list"))
3384 if inst_fields & frozenset(self.op.output_fields):
3385 inst_data = self.cfg.GetAllInstancesInfo()
3387 for inst in inst_data.values():
3388 if inst.primary_node in node_to_primary:
3389 node_to_primary[inst.primary_node].add(inst.name)
3390 for secnode in inst.secondary_nodes:
3391 if secnode in node_to_secondary:
3392 node_to_secondary[secnode].add(inst.name)
3394 master_node = self.cfg.GetMasterNode()
3396 # end data gathering
3399 for node in nodelist:
3401 for field in self.op.output_fields:
3402 if field in self._SIMPLE_FIELDS:
3403 val = getattr(node, field)
3404 elif field == "pinst_list":
3405 val = list(node_to_primary[node.name])
3406 elif field == "sinst_list":
3407 val = list(node_to_secondary[node.name])
3408 elif field == "pinst_cnt":
3409 val = len(node_to_primary[node.name])
3410 elif field == "sinst_cnt":
3411 val = len(node_to_secondary[node.name])
3412 elif field == "pip":
3413 val = node.primary_ip
3414 elif field == "sip":
3415 val = node.secondary_ip
3416 elif field == "tags":
3417 val = list(node.GetTags())
3418 elif field == "master":
3419 val = node.name == master_node
3420 elif self._FIELDS_DYNAMIC.Matches(field):
3421 val = live_data[node.name].get(field, None)
3422 elif field == "role":
3423 if node.name == master_node:
3425 elif node.master_candidate:
3434 raise errors.ParameterError(field)
3435 node_output.append(val)
3436 output.append(node_output)
3441 class LUQueryNodeVolumes(NoHooksLU):
3442 """Logical unit for getting volumes on node(s).
3446 ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
3447 ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
3450 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3451 _FIELDS_STATIC = utils.FieldSet("node")
3453 def CheckArguments(self):
3454 _CheckOutputFields(static=self._FIELDS_STATIC,
3455 dynamic=self._FIELDS_DYNAMIC,
3456 selected=self.op.output_fields)
3458 def ExpandNames(self):
3459 self.needed_locks = {}
3460 self.share_locks[locking.LEVEL_NODE] = 1
3461 if not self.op.nodes:
3462 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3464 self.needed_locks[locking.LEVEL_NODE] = \
3465 _GetWantedNodes(self, self.op.nodes)
3467 def Exec(self, feedback_fn):
3468 """Computes the list of nodes and their attributes.
3471 nodenames = self.acquired_locks[locking.LEVEL_NODE]
3472 volumes = self.rpc.call_node_volumes(nodenames)
3474 ilist = [self.cfg.GetInstanceInfo(iname) for iname
3475 in self.cfg.GetInstanceList()]
3477 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3480 for node in nodenames:
3481 nresult = volumes[node]
3484 msg = nresult.fail_msg
3486 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3489 node_vols = nresult.payload[:]
3490 node_vols.sort(key=lambda vol: vol['dev'])
3492 for vol in node_vols:
3494 for field in self.op.output_fields:
3497 elif field == "phys":
3501 elif field == "name":
3503 elif field == "size":
3504 val = int(float(vol['size']))
3505 elif field == "instance":
3507 if node not in lv_by_node[inst]:
3509 if vol['name'] in lv_by_node[inst][node]:
3515 raise errors.ParameterError(field)
3516 node_output.append(str(val))
3518 output.append(node_output)
3523 class LUQueryNodeStorage(NoHooksLU):
3524 """Logical unit for getting information on storage units on node(s).
3527 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3529 ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
3530 ("storage_type", _NoDefault, _CheckStorageType),
3531 ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
3532 ("name", None, _TMaybeString),
3536 def CheckArguments(self):
3537 _CheckOutputFields(static=self._FIELDS_STATIC,
3538 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3539 selected=self.op.output_fields)
3541 def ExpandNames(self):
3542 self.needed_locks = {}
3543 self.share_locks[locking.LEVEL_NODE] = 1
3546 self.needed_locks[locking.LEVEL_NODE] = \
3547 _GetWantedNodes(self, self.op.nodes)
3549 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3551 def Exec(self, feedback_fn):
3552 """Computes the list of nodes and their attributes.
3555 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3557 # Always get name to sort by
3558 if constants.SF_NAME in self.op.output_fields:
3559 fields = self.op.output_fields[:]
3561 fields = [constants.SF_NAME] + self.op.output_fields
3563 # Never ask for node or type as it's only known to the LU
3564 for extra in [constants.SF_NODE, constants.SF_TYPE]:
3565 while extra in fields:
3566 fields.remove(extra)
3568 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3569 name_idx = field_idx[constants.SF_NAME]
3571 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3572 data = self.rpc.call_storage_list(self.nodes,
3573 self.op.storage_type, st_args,
3574 self.op.name, fields)
3578 for node in utils.NiceSort(self.nodes):
3579 nresult = data[node]
3583 msg = nresult.fail_msg
3585 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3588 rows = dict([(row[name_idx], row) for row in nresult.payload])
3590 for name in utils.NiceSort(rows.keys()):
3595 for field in self.op.output_fields:
3596 if field == constants.SF_NODE:
3598 elif field == constants.SF_TYPE:
3599 val = self.op.storage_type
3600 elif field in field_idx:
3601 val = row[field_idx[field]]
3603 raise errors.ParameterError(field)
3612 class LUModifyNodeStorage(NoHooksLU):
3613 """Logical unit for modifying a storage volume on a node.
3618 ("storage_type", _NoDefault, _CheckStorageType),
3619 ("name", _NoDefault, _TNonEmptyString),
3620 ("changes", _NoDefault, _TDict),
3624 def CheckArguments(self):
3625 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3627 storage_type = self.op.storage_type
3630 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3632 raise errors.OpPrereqError("Storage units of type '%s' can not be"
3633 " modified" % storage_type,
3636 diff = set(self.op.changes.keys()) - modifiable
3638 raise errors.OpPrereqError("The following fields can not be modified for"
3639 " storage units of type '%s': %r" %
3640 (storage_type, list(diff)),
3643 def ExpandNames(self):
3644 self.needed_locks = {
3645 locking.LEVEL_NODE: self.op.node_name,
3648 def Exec(self, feedback_fn):
3649 """Computes the list of nodes and their attributes.
3652 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3653 result = self.rpc.call_storage_modify(self.op.node_name,
3654 self.op.storage_type, st_args,
3655 self.op.name, self.op.changes)
3656 result.Raise("Failed to modify storage unit '%s' on %s" %
3657 (self.op.name, self.op.node_name))
3660 class LUAddNode(LogicalUnit):
3661 """Logical unit for adding node to the cluster.
3665 HTYPE = constants.HTYPE_NODE
3668 ("primary_ip", None, _NoType),
3669 ("secondary_ip", None, _TMaybeString),
3670 ("readd", False, _TBool),
3671 ("nodegroup", None, _TMaybeString)
3674 def CheckArguments(self):
3675 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
3676 # validate/normalize the node name
3677 self.hostname = netutils.GetHostname(name=self.op.node_name,
3678 family=self.primary_ip_family)
3679 self.op.node_name = self.hostname.name
3680 if self.op.readd and self.op.nodegroup:
3681 raise errors.OpPrereqError("Cannot pass a nodegroup when a node is"
3682 " being readded", errors.ECODE_INVAL)
3684 def BuildHooksEnv(self):
3687 This will run on all nodes before, and on all nodes + the new node after.
3691 "OP_TARGET": self.op.node_name,
3692 "NODE_NAME": self.op.node_name,
3693 "NODE_PIP": self.op.primary_ip,
3694 "NODE_SIP": self.op.secondary_ip,
3696 nodes_0 = self.cfg.GetNodeList()
3697 nodes_1 = nodes_0 + [self.op.node_name, ]
3698 return env, nodes_0, nodes_1
3700 def CheckPrereq(self):
3701 """Check prerequisites.
3704 - the new node is not already in the config
3706 - its parameters (single/dual homed) matches the cluster
3708 Any errors are signaled by raising errors.OpPrereqError.
3712 hostname = self.hostname
3713 node = hostname.name
3714 primary_ip = self.op.primary_ip = hostname.ip
3715 if self.op.secondary_ip is None:
3716 if self.primary_ip_family == netutils.IP6Address.family:
3717 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
3718 " IPv4 address must be given as secondary",
3720 self.op.secondary_ip = primary_ip
3722 secondary_ip = self.op.secondary_ip
3723 if not netutils.IP4Address.IsValid(secondary_ip):
3724 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
3725 " address" % secondary_ip, errors.ECODE_INVAL)
3727 node_list = cfg.GetNodeList()
3728 if not self.op.readd and node in node_list:
3729 raise errors.OpPrereqError("Node %s is already in the configuration" %
3730 node, errors.ECODE_EXISTS)
3731 elif self.op.readd and node not in node_list:
3732 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3735 self.changed_primary_ip = False
3737 for existing_node_name in node_list:
3738 existing_node = cfg.GetNodeInfo(existing_node_name)
3740 if self.op.readd and node == existing_node_name:
3741 if existing_node.secondary_ip != secondary_ip:
3742 raise errors.OpPrereqError("Readded node doesn't have the same IP"
3743 " address configuration as before",
3745 if existing_node.primary_ip != primary_ip:
3746 self.changed_primary_ip = True
3750 if (existing_node.primary_ip == primary_ip or
3751 existing_node.secondary_ip == primary_ip or
3752 existing_node.primary_ip == secondary_ip or
3753 existing_node.secondary_ip == secondary_ip):
3754 raise errors.OpPrereqError("New node ip address(es) conflict with"
3755 " existing node %s" % existing_node.name,
3756 errors.ECODE_NOTUNIQUE)
3758 # check that the type of the node (single versus dual homed) is the
3759 # same as for the master
3760 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3761 master_singlehomed = myself.secondary_ip == myself.primary_ip
3762 newbie_singlehomed = secondary_ip == primary_ip
3763 if master_singlehomed != newbie_singlehomed:
3764 if master_singlehomed:
3765 raise errors.OpPrereqError("The master has no private ip but the"
3766 " new node has one",
3769 raise errors.OpPrereqError("The master has a private ip but the"
3770 " new node doesn't have one",
3773 # checks reachability
3774 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3775 raise errors.OpPrereqError("Node not reachable by ping",
3776 errors.ECODE_ENVIRON)
3778 if not newbie_singlehomed:
3779 # check reachability from my secondary ip to newbie's secondary ip
3780 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3781 source=myself.secondary_ip):
3782 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3783 " based ping to noded port",
3784 errors.ECODE_ENVIRON)
3791 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3794 self.new_node = self.cfg.GetNodeInfo(node)
3795 assert self.new_node is not None, "Can't retrieve locked node %s" % node
3797 nodegroup = cfg.LookupNodeGroup(self.op.nodegroup)
3798 self.new_node = objects.Node(name=node,
3799 primary_ip=primary_ip,
3800 secondary_ip=secondary_ip,
3801 master_candidate=self.master_candidate,
3802 offline=False, drained=False,
3803 nodegroup=nodegroup)
3805 def Exec(self, feedback_fn):
3806 """Adds the new node to the cluster.
3809 new_node = self.new_node
3810 node = new_node.name
3812 # for re-adds, reset the offline/drained/master-candidate flags;
3813 # we need to reset here, otherwise offline would prevent RPC calls
3814 # later in the procedure; this also means that if the re-add
3815 # fails, we are left with a non-offlined, broken node
3817 new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3818 self.LogInfo("Readding a node, the offline/drained flags were reset")
3819 # if we demote the node, we do cleanup later in the procedure
3820 new_node.master_candidate = self.master_candidate
3821 if self.changed_primary_ip:
3822 new_node.primary_ip = self.op.primary_ip
3824 # notify the user about any possible mc promotion
3825 if new_node.master_candidate:
3826 self.LogInfo("Node will be a master candidate")
3828 # check connectivity
3829 result = self.rpc.call_version([node])[node]
3830 result.Raise("Can't get version information from node %s" % node)
3831 if constants.PROTOCOL_VERSION == result.payload:
3832 logging.info("Communication to node %s fine, sw version %s match",
3833 node, result.payload)
3835 raise errors.OpExecError("Version mismatch master version %s,"
3836 " node version %s" %
3837 (constants.PROTOCOL_VERSION, result.payload))
3839 # Add node to our /etc/hosts, and add key to known_hosts
3840 if self.cfg.GetClusterInfo().modify_etc_hosts:
3841 master_node = self.cfg.GetMasterNode()
3842 result = self.rpc.call_etc_hosts_modify(master_node,
3843 constants.ETC_HOSTS_ADD,
3846 result.Raise("Can't update hosts file with new host data")
3848 if new_node.secondary_ip != new_node.primary_ip:
3849 result = self.rpc.call_node_has_ip_address(new_node.name,
3850 new_node.secondary_ip)
3851 result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3852 prereq=True, ecode=errors.ECODE_ENVIRON)
3853 if not result.payload:
3854 raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3855 " you gave (%s). Please fix and re-run this"
3856 " command." % new_node.secondary_ip)
3858 node_verify_list = [self.cfg.GetMasterNode()]
3859 node_verify_param = {
3860 constants.NV_NODELIST: [node],
3861 # TODO: do a node-net-test as well?
3864 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3865 self.cfg.GetClusterName())
3866 for verifier in node_verify_list:
3867 result[verifier].Raise("Cannot communicate with node %s" % verifier)
3868 nl_payload = result[verifier].payload[constants.NV_NODELIST]
3870 for failed in nl_payload:
3871 feedback_fn("ssh/hostname verification failed"
3872 " (checking from %s): %s" %
3873 (verifier, nl_payload[failed]))
3874 raise errors.OpExecError("ssh/hostname verification failed.")
3877 _RedistributeAncillaryFiles(self)
3878 self.context.ReaddNode(new_node)
3879 # make sure we redistribute the config
3880 self.cfg.Update(new_node, feedback_fn)
3881 # and make sure the new node will not have old files around
3882 if not new_node.master_candidate:
3883 result = self.rpc.call_node_demote_from_mc(new_node.name)
3884 msg = result.fail_msg
3886 self.LogWarning("Node failed to demote itself from master"
3887 " candidate status: %s" % msg)
3889 _RedistributeAncillaryFiles(self, additional_nodes=[node])
3890 self.context.AddNode(new_node, self.proc.GetECId())
3893 class LUSetNodeParams(LogicalUnit):
3894 """Modifies the parameters of a node.
3897 HPATH = "node-modify"
3898 HTYPE = constants.HTYPE_NODE
3901 ("master_candidate", None, _TMaybeBool),
3902 ("offline", None, _TMaybeBool),
3903 ("drained", None, _TMaybeBool),
3904 ("auto_promote", False, _TBool),
3909 def CheckArguments(self):
3910 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3911 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3912 if all_mods.count(None) == 3:
3913 raise errors.OpPrereqError("Please pass at least one modification",
3915 if all_mods.count(True) > 1:
3916 raise errors.OpPrereqError("Can't set the node into more than one"
3917 " state at the same time",
3920 # Boolean value that tells us whether we're offlining or draining the node
3921 self.offline_or_drain = (self.op.offline == True or
3922 self.op.drained == True)
3923 self.deoffline_or_drain = (self.op.offline == False or
3924 self.op.drained == False)
3925 self.might_demote = (self.op.master_candidate == False or
3926 self.offline_or_drain)
3928 self.lock_all = self.op.auto_promote and self.might_demote
3931 def ExpandNames(self):
3933 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3935 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3937 def BuildHooksEnv(self):
3940 This runs on the master node.
3944 "OP_TARGET": self.op.node_name,
3945 "MASTER_CANDIDATE": str(self.op.master_candidate),
3946 "OFFLINE": str(self.op.offline),
3947 "DRAINED": str(self.op.drained),
3949 nl = [self.cfg.GetMasterNode(),
3953 def CheckPrereq(self):
3954 """Check prerequisites.
3956 This only checks the instance list against the existing names.
3959 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3961 if (self.op.master_candidate is not None or
3962 self.op.drained is not None or
3963 self.op.offline is not None):
3964 # we can't change the master's node flags
3965 if self.op.node_name == self.cfg.GetMasterNode():
3966 raise errors.OpPrereqError("The master role can be changed"
3967 " only via master-failover",
3971 if node.master_candidate and self.might_demote and not self.lock_all:
3972 assert not self.op.auto_promote, "auto-promote set but lock_all not"
3973 # check if after removing the current node, we're missing master
3975 (mc_remaining, mc_should, _) = \
3976 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3977 if mc_remaining < mc_should:
3978 raise errors.OpPrereqError("Not enough master candidates, please"
3979 " pass auto_promote to allow promotion",
3982 if (self.op.master_candidate == True and
3983 ((node.offline and not self.op.offline == False) or
3984 (node.drained and not self.op.drained == False))):
3985 raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3986 " to master_candidate" % node.name,
3989 # If we're being deofflined/drained, we'll MC ourself if needed
3990 if (self.deoffline_or_drain and not self.offline_or_drain and not
3991 self.op.master_candidate == True and not node.master_candidate):
3992 self.op.master_candidate = _DecideSelfPromotion(self)
3993 if self.op.master_candidate:
3994 self.LogInfo("Autopromoting node to master candidate")
3998 def Exec(self, feedback_fn):
4007 if self.op.offline is not None:
4008 node.offline = self.op.offline
4009 result.append(("offline", str(self.op.offline)))
4010 if self.op.offline == True:
4011 if node.master_candidate:
4012 node.master_candidate = False
4014 result.append(("master_candidate", "auto-demotion due to offline"))
4016 node.drained = False
4017 result.append(("drained", "clear drained status due to offline"))
4019 if self.op.master_candidate is not None:
4020 node.master_candidate = self.op.master_candidate
4022 result.append(("master_candidate", str(self.op.master_candidate)))
4023 if self.op.master_candidate == False:
4024 rrc = self.rpc.call_node_demote_from_mc(node.name)
4027 self.LogWarning("Node failed to demote itself: %s" % msg)
4029 if self.op.drained is not None:
4030 node.drained = self.op.drained
4031 result.append(("drained", str(self.op.drained)))
4032 if self.op.drained == True:
4033 if node.master_candidate:
4034 node.master_candidate = False
4036 result.append(("master_candidate", "auto-demotion due to drain"))
4037 rrc = self.rpc.call_node_demote_from_mc(node.name)
4040 self.LogWarning("Node failed to demote itself: %s" % msg)
4042 node.offline = False
4043 result.append(("offline", "clear offline status due to drain"))
4045 # we locked all nodes, we adjust the CP before updating this node
4047 _AdjustCandidatePool(self, [node.name])
4049 # this will trigger configuration file update, if needed
4050 self.cfg.Update(node, feedback_fn)
4052 # this will trigger job queue propagation or cleanup
4054 self.context.ReaddNode(node)
4059 class LUPowercycleNode(NoHooksLU):
4060 """Powercycles a node.
4069 def CheckArguments(self):
4070 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4071 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4072 raise errors.OpPrereqError("The node is the master and the force"
4073 " parameter was not set",
4076 def ExpandNames(self):
4077 """Locking for PowercycleNode.
4079 This is a last-resort option and shouldn't block on other
4080 jobs. Therefore, we grab no locks.
4083 self.needed_locks = {}
4085 def Exec(self, feedback_fn):
4089 result = self.rpc.call_node_powercycle(self.op.node_name,
4090 self.cfg.GetHypervisorType())
4091 result.Raise("Failed to schedule the reboot")
4092 return result.payload
4095 class LUQueryClusterInfo(NoHooksLU):
4096 """Query cluster configuration.
4101 def ExpandNames(self):
4102 self.needed_locks = {}
4104 def Exec(self, feedback_fn):
4105 """Return cluster config.
4108 cluster = self.cfg.GetClusterInfo()
4111 # Filter just for enabled hypervisors
4112 for os_name, hv_dict in cluster.os_hvp.items():
4113 os_hvp[os_name] = {}
4114 for hv_name, hv_params in hv_dict.items():
4115 if hv_name in cluster.enabled_hypervisors:
4116 os_hvp[os_name][hv_name] = hv_params
4118 # Convert ip_family to ip_version
4119 primary_ip_version = constants.IP4_VERSION
4120 if cluster.primary_ip_family == netutils.IP6Address.family:
4121 primary_ip_version = constants.IP6_VERSION
4124 "software_version": constants.RELEASE_VERSION,
4125 "protocol_version": constants.PROTOCOL_VERSION,
4126 "config_version": constants.CONFIG_VERSION,
4127 "os_api_version": max(constants.OS_API_VERSIONS),
4128 "export_version": constants.EXPORT_VERSION,
4129 "architecture": (platform.architecture()[0], platform.machine()),
4130 "name": cluster.cluster_name,
4131 "master": cluster.master_node,
4132 "default_hypervisor": cluster.enabled_hypervisors[0],
4133 "enabled_hypervisors": cluster.enabled_hypervisors,
4134 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4135 for hypervisor_name in cluster.enabled_hypervisors]),
4137 "beparams": cluster.beparams,
4138 "osparams": cluster.osparams,
4139 "nicparams": cluster.nicparams,
4140 "candidate_pool_size": cluster.candidate_pool_size,
4141 "master_netdev": cluster.master_netdev,
4142 "volume_group_name": cluster.volume_group_name,
4143 "drbd_usermode_helper": cluster.drbd_usermode_helper,
4144 "file_storage_dir": cluster.file_storage_dir,
4145 "maintain_node_health": cluster.maintain_node_health,
4146 "ctime": cluster.ctime,
4147 "mtime": cluster.mtime,
4148 "uuid": cluster.uuid,
4149 "tags": list(cluster.GetTags()),
4150 "uid_pool": cluster.uid_pool,
4151 "default_iallocator": cluster.default_iallocator,
4152 "reserved_lvs": cluster.reserved_lvs,
4153 "primary_ip_version": primary_ip_version,
4159 class LUQueryConfigValues(NoHooksLU):
4160 """Return configuration values.
4163 _OP_PARAMS = [_POutputFields]
4165 _FIELDS_DYNAMIC = utils.FieldSet()
4166 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4169 def CheckArguments(self):
4170 _CheckOutputFields(static=self._FIELDS_STATIC,
4171 dynamic=self._FIELDS_DYNAMIC,
4172 selected=self.op.output_fields)
4174 def ExpandNames(self):
4175 self.needed_locks = {}
4177 def Exec(self, feedback_fn):
4178 """Dump a representation of the cluster config to the standard output.
4182 for field in self.op.output_fields:
4183 if field == "cluster_name":
4184 entry = self.cfg.GetClusterName()
4185 elif field == "master_node":
4186 entry = self.cfg.GetMasterNode()
4187 elif field == "drain_flag":
4188 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4189 elif field == "watcher_pause":
4190 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4192 raise errors.ParameterError(field)
4193 values.append(entry)
4197 class LUActivateInstanceDisks(NoHooksLU):
4198 """Bring up an instance's disks.
4203 ("ignore_size", False, _TBool),
4207 def ExpandNames(self):
4208 self._ExpandAndLockInstance()
4209 self.needed_locks[locking.LEVEL_NODE] = []
4210 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4212 def DeclareLocks(self, level):
4213 if level == locking.LEVEL_NODE:
4214 self._LockInstancesNodes()
4216 def CheckPrereq(self):
4217 """Check prerequisites.
4219 This checks that the instance is in the cluster.
4222 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4223 assert self.instance is not None, \
4224 "Cannot retrieve locked instance %s" % self.op.instance_name
4225 _CheckNodeOnline(self, self.instance.primary_node)
4227 def Exec(self, feedback_fn):
4228 """Activate the disks.
4231 disks_ok, disks_info = \
4232 _AssembleInstanceDisks(self, self.instance,
4233 ignore_size=self.op.ignore_size)
4235 raise errors.OpExecError("Cannot activate block devices")
4240 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4242 """Prepare the block devices for an instance.
4244 This sets up the block devices on all nodes.
4246 @type lu: L{LogicalUnit}
4247 @param lu: the logical unit on whose behalf we execute
4248 @type instance: L{objects.Instance}
4249 @param instance: the instance for whose disks we assemble
4250 @type disks: list of L{objects.Disk} or None
4251 @param disks: which disks to assemble (or all, if None)
4252 @type ignore_secondaries: boolean
4253 @param ignore_secondaries: if true, errors on secondary nodes
4254 won't result in an error return from the function
4255 @type ignore_size: boolean
4256 @param ignore_size: if true, the current known size of the disk
4257 will not be used during the disk activation, useful for cases
4258 when the size is wrong
4259 @return: False if the operation failed, otherwise a list of
4260 (host, instance_visible_name, node_visible_name)
4261 with the mapping from node devices to instance devices
4266 iname = instance.name
4267 disks = _ExpandCheckDisks(instance, disks)
4269 # With the two passes mechanism we try to reduce the window of
4270 # opportunity for the race condition of switching DRBD to primary
4271 # before handshaking occured, but we do not eliminate it
4273 # The proper fix would be to wait (with some limits) until the
4274 # connection has been made and drbd transitions from WFConnection
4275 # into any other network-connected state (Connected, SyncTarget,
4278 # 1st pass, assemble on all nodes in secondary mode
4279 for inst_disk in disks:
4280 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4282 node_disk = node_disk.Copy()
4283 node_disk.UnsetSize()
4284 lu.cfg.SetDiskID(node_disk, node)
4285 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
4286 msg = result.fail_msg
4288 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4289 " (is_primary=False, pass=1): %s",
4290 inst_disk.iv_name, node, msg)
4291 if not ignore_secondaries:
4294 # FIXME: race condition on drbd migration to primary
4296 # 2nd pass, do only the primary node
4297 for inst_disk in disks:
4300 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4301 if node != instance.primary_node:
4304 node_disk = node_disk.Copy()
4305 node_disk.UnsetSize()
4306 lu.cfg.SetDiskID(node_disk, node)
4307 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
4308 msg = result.fail_msg
4310 lu.proc.LogWarning("Could not prepare block device %s on node %s"
4311 " (is_primary=True, pass=2): %s",
4312 inst_disk.iv_name, node, msg)
4315 dev_path = result.payload
4317 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4319 # leave the disks configured for the primary node
4320 # this is a workaround that would be fixed better by
4321 # improving the logical/physical id handling
4323 lu.cfg.SetDiskID(disk, instance.primary_node)
4325 return disks_ok, device_info
4328 def _StartInstanceDisks(lu, instance, force):
4329 """Start the disks of an instance.
4332 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4333 ignore_secondaries=force)
4335 _ShutdownInstanceDisks(lu, instance)
4336 if force is not None and not force:
4337 lu.proc.LogWarning("", hint="If the message above refers to a"
4339 " you can retry the operation using '--force'.")
4340 raise errors.OpExecError("Disk consistency error")
4343 class LUDeactivateInstanceDisks(NoHooksLU):
4344 """Shutdown an instance's disks.
4352 def ExpandNames(self):
4353 self._ExpandAndLockInstance()
4354 self.needed_locks[locking.LEVEL_NODE] = []
4355 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4357 def DeclareLocks(self, level):
4358 if level == locking.LEVEL_NODE:
4359 self._LockInstancesNodes()
4361 def CheckPrereq(self):
4362 """Check prerequisites.
4364 This checks that the instance is in the cluster.
4367 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4368 assert self.instance is not None, \
4369 "Cannot retrieve locked instance %s" % self.op.instance_name
4371 def Exec(self, feedback_fn):
4372 """Deactivate the disks
4375 instance = self.instance
4376 _SafeShutdownInstanceDisks(self, instance)
4379 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4380 """Shutdown block devices of an instance.
4382 This function checks if an instance is running, before calling
4383 _ShutdownInstanceDisks.
4386 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4387 _ShutdownInstanceDisks(lu, instance, disks=disks)
4390 def _ExpandCheckDisks(instance, disks):
4391 """Return the instance disks selected by the disks list
4393 @type disks: list of L{objects.Disk} or None
4394 @param disks: selected disks
4395 @rtype: list of L{objects.Disk}
4396 @return: selected instance disks to act on
4400 return instance.disks
4402 if not set(disks).issubset(instance.disks):
4403 raise errors.ProgrammerError("Can only act on disks belonging to the"
4408 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4409 """Shutdown block devices of an instance.
4411 This does the shutdown on all nodes of the instance.
4413 If the ignore_primary is false, errors on the primary node are
4418 disks = _ExpandCheckDisks(instance, disks)
4421 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4422 lu.cfg.SetDiskID(top_disk, node)
4423 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4424 msg = result.fail_msg
4426 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4427 disk.iv_name, node, msg)
4428 if not ignore_primary or node != instance.primary_node:
4433 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4434 """Checks if a node has enough free memory.
4436 This function check if a given node has the needed amount of free
4437 memory. In case the node has less memory or we cannot get the
4438 information from the node, this function raise an OpPrereqError
4441 @type lu: C{LogicalUnit}
4442 @param lu: a logical unit from which we get configuration data
4444 @param node: the node to check
4445 @type reason: C{str}
4446 @param reason: string to use in the error message
4447 @type requested: C{int}
4448 @param requested: the amount of memory in MiB to check for
4449 @type hypervisor_name: C{str}
4450 @param hypervisor_name: the hypervisor to ask for memory stats
4451 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4452 we cannot check the node
4455 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4456 nodeinfo[node].Raise("Can't get data from node %s" % node,
4457 prereq=True, ecode=errors.ECODE_ENVIRON)
4458 free_mem = nodeinfo[node].payload.get('memory_free', None)
4459 if not isinstance(free_mem, int):
4460 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4461 " was '%s'" % (node, free_mem),
4462 errors.ECODE_ENVIRON)
4463 if requested > free_mem:
4464 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4465 " needed %s MiB, available %s MiB" %
4466 (node, reason, requested, free_mem),
4470 def _CheckNodesFreeDisk(lu, nodenames, requested):
4471 """Checks if nodes have enough free disk space in the default VG.
4473 This function check if all given nodes have the needed amount of
4474 free disk. In case any node has less disk or we cannot get the
4475 information from the node, this function raise an OpPrereqError
4478 @type lu: C{LogicalUnit}
4479 @param lu: a logical unit from which we get configuration data
4480 @type nodenames: C{list}
4481 @param nodenames: the list of node names to check
4482 @type requested: C{int}
4483 @param requested: the amount of disk in MiB to check for
4484 @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4485 we cannot check the node
4488 nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4489 lu.cfg.GetHypervisorType())
4490 for node in nodenames:
4491 info = nodeinfo[node]
4492 info.Raise("Cannot get current information from node %s" % node,
4493 prereq=True, ecode=errors.ECODE_ENVIRON)
4494 vg_free = info.payload.get("vg_free", None)
4495 if not isinstance(vg_free, int):
4496 raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4497 " result was '%s'" % (node, vg_free),
4498 errors.ECODE_ENVIRON)
4499 if requested > vg_free:
4500 raise errors.OpPrereqError("Not enough disk space on target node %s:"
4501 " required %d MiB, available %d MiB" %
4502 (node, requested, vg_free),
4506 class LUStartupInstance(LogicalUnit):
4507 """Starts an instance.
4510 HPATH = "instance-start"
4511 HTYPE = constants.HTYPE_INSTANCE
4515 ("hvparams", _EmptyDict, _TDict),
4516 ("beparams", _EmptyDict, _TDict),
4520 def CheckArguments(self):
4522 if self.op.beparams:
4523 # fill the beparams dict
4524 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4526 def ExpandNames(self):
4527 self._ExpandAndLockInstance()
4529 def BuildHooksEnv(self):
4532 This runs on master, primary and secondary nodes of the instance.
4536 "FORCE": self.op.force,
4538 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4539 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4542 def CheckPrereq(self):
4543 """Check prerequisites.
4545 This checks that the instance is in the cluster.
4548 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4549 assert self.instance is not None, \
4550 "Cannot retrieve locked instance %s" % self.op.instance_name
4553 if self.op.hvparams:
4554 # check hypervisor parameter syntax (locally)
4555 cluster = self.cfg.GetClusterInfo()
4556 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4557 filled_hvp = cluster.FillHV(instance)
4558 filled_hvp.update(self.op.hvparams)
4559 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4560 hv_type.CheckParameterSyntax(filled_hvp)
4561 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4563 _CheckNodeOnline(self, instance.primary_node)
4565 bep = self.cfg.GetClusterInfo().FillBE(instance)
4566 # check bridges existence
4567 _CheckInstanceBridgesExist(self, instance)
4569 remote_info = self.rpc.call_instance_info(instance.primary_node,
4571 instance.hypervisor)
4572 remote_info.Raise("Error checking node %s" % instance.primary_node,
4573 prereq=True, ecode=errors.ECODE_ENVIRON)
4574 if not remote_info.payload: # not running already
4575 _CheckNodeFreeMemory(self, instance.primary_node,
4576 "starting instance %s" % instance.name,
4577 bep[constants.BE_MEMORY], instance.hypervisor)
4579 def Exec(self, feedback_fn):
4580 """Start the instance.
4583 instance = self.instance
4584 force = self.op.force
4586 self.cfg.MarkInstanceUp(instance.name)
4588 node_current = instance.primary_node
4590 _StartInstanceDisks(self, instance, force)
4592 result = self.rpc.call_instance_start(node_current, instance,
4593 self.op.hvparams, self.op.beparams)
4594 msg = result.fail_msg
4596 _ShutdownInstanceDisks(self, instance)
4597 raise errors.OpExecError("Could not start instance: %s" % msg)
4600 class LURebootInstance(LogicalUnit):
4601 """Reboot an instance.
4604 HPATH = "instance-reboot"
4605 HTYPE = constants.HTYPE_INSTANCE
4608 ("ignore_secondaries", False, _TBool),
4609 ("reboot_type", _NoDefault, _TElemOf(constants.REBOOT_TYPES)),
4614 def ExpandNames(self):
4615 self._ExpandAndLockInstance()
4617 def BuildHooksEnv(self):
4620 This runs on master, primary and secondary nodes of the instance.
4624 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4625 "REBOOT_TYPE": self.op.reboot_type,
4626 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
4628 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4629 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4632 def CheckPrereq(self):
4633 """Check prerequisites.
4635 This checks that the instance is in the cluster.
4638 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4639 assert self.instance is not None, \
4640 "Cannot retrieve locked instance %s" % self.op.instance_name
4642 _CheckNodeOnline(self, instance.primary_node)
4644 # check bridges existence
4645 _CheckInstanceBridgesExist(self, instance)
4647 def Exec(self, feedback_fn):
4648 """Reboot the instance.
4651 instance = self.instance
4652 ignore_secondaries = self.op.ignore_secondaries
4653 reboot_type = self.op.reboot_type
4655 node_current = instance.primary_node
4657 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4658 constants.INSTANCE_REBOOT_HARD]:
4659 for disk in instance.disks:
4660 self.cfg.SetDiskID(disk, node_current)
4661 result = self.rpc.call_instance_reboot(node_current, instance,
4663 self.op.shutdown_timeout)
4664 result.Raise("Could not reboot instance")
4666 result = self.rpc.call_instance_shutdown(node_current, instance,
4667 self.op.shutdown_timeout)
4668 result.Raise("Could not shutdown instance for full reboot")
4669 _ShutdownInstanceDisks(self, instance)
4670 _StartInstanceDisks(self, instance, ignore_secondaries)
4671 result = self.rpc.call_instance_start(node_current, instance, None, None)
4672 msg = result.fail_msg
4674 _ShutdownInstanceDisks(self, instance)
4675 raise errors.OpExecError("Could not start instance for"
4676 " full reboot: %s" % msg)
4678 self.cfg.MarkInstanceUp(instance.name)
4681 class LUShutdownInstance(LogicalUnit):
4682 """Shutdown an instance.
4685 HPATH = "instance-stop"
4686 HTYPE = constants.HTYPE_INSTANCE
4689 ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, _TPositiveInt),
4693 def ExpandNames(self):
4694 self._ExpandAndLockInstance()
4696 def BuildHooksEnv(self):
4699 This runs on master, primary and secondary nodes of the instance.
4702 env = _BuildInstanceHookEnvByObject(self, self.instance)
4703 env["TIMEOUT"] = self.op.timeout
4704 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4707 def CheckPrereq(self):
4708 """Check prerequisites.
4710 This checks that the instance is in the cluster.
4713 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4714 assert self.instance is not None, \
4715 "Cannot retrieve locked instance %s" % self.op.instance_name
4716 _CheckNodeOnline(self, self.instance.primary_node)
4718 def Exec(self, feedback_fn):
4719 """Shutdown the instance.
4722 instance = self.instance
4723 node_current = instance.primary_node
4724 timeout = self.op.timeout
4725 self.cfg.MarkInstanceDown(instance.name)
4726 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4727 msg = result.fail_msg
4729 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4731 _ShutdownInstanceDisks(self, instance)
4734 class LUReinstallInstance(LogicalUnit):
4735 """Reinstall an instance.
4738 HPATH = "instance-reinstall"
4739 HTYPE = constants.HTYPE_INSTANCE
4742 ("os_type", None, _TMaybeString),
4743 ("force_variant", False, _TBool),
4747 def ExpandNames(self):
4748 self._ExpandAndLockInstance()
4750 def BuildHooksEnv(self):
4753 This runs on master, primary and secondary nodes of the instance.
4756 env = _BuildInstanceHookEnvByObject(self, self.instance)
4757 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4760 def CheckPrereq(self):
4761 """Check prerequisites.
4763 This checks that the instance is in the cluster and is not running.
4766 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4767 assert instance is not None, \
4768 "Cannot retrieve locked instance %s" % self.op.instance_name
4769 _CheckNodeOnline(self, instance.primary_node)
4771 if instance.disk_template == constants.DT_DISKLESS:
4772 raise errors.OpPrereqError("Instance '%s' has no disks" %
4773 self.op.instance_name,
4775 _CheckInstanceDown(self, instance, "cannot reinstall")
4777 if self.op.os_type is not None:
4779 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4780 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4782 self.instance = instance
4784 def Exec(self, feedback_fn):
4785 """Reinstall the instance.
4788 inst = self.instance
4790 if self.op.os_type is not None:
4791 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4792 inst.os = self.op.os_type
4793 self.cfg.Update(inst, feedback_fn)
4795 _StartInstanceDisks(self, inst, None)
4797 feedback_fn("Running the instance OS create scripts...")
4798 # FIXME: pass debug option from opcode to backend
4799 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4800 self.op.debug_level)
4801 result.Raise("Could not install OS for instance %s on node %s" %
4802 (inst.name, inst.primary_node))
4804 _ShutdownInstanceDisks(self, inst)
4807 class LURecreateInstanceDisks(LogicalUnit):
4808 """Recreate an instance's missing disks.
4811 HPATH = "instance-recreate-disks"
4812 HTYPE = constants.HTYPE_INSTANCE
4815 ("disks", _EmptyList, _TListOf(_TPositiveInt)),
4819 def ExpandNames(self):
4820 self._ExpandAndLockInstance()
4822 def BuildHooksEnv(self):
4825 This runs on master, primary and secondary nodes of the instance.
4828 env = _BuildInstanceHookEnvByObject(self, self.instance)
4829 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4832 def CheckPrereq(self):
4833 """Check prerequisites.
4835 This checks that the instance is in the cluster and is not running.
4838 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4839 assert instance is not None, \
4840 "Cannot retrieve locked instance %s" % self.op.instance_name
4841 _CheckNodeOnline(self, instance.primary_node)
4843 if instance.disk_template == constants.DT_DISKLESS:
4844 raise errors.OpPrereqError("Instance '%s' has no disks" %
4845 self.op.instance_name, errors.ECODE_INVAL)
4846 _CheckInstanceDown(self, instance, "cannot recreate disks")
4848 if not self.op.disks:
4849 self.op.disks = range(len(instance.disks))
4851 for idx in self.op.disks:
4852 if idx >= len(instance.disks):
4853 raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4856 self.instance = instance
4858 def Exec(self, feedback_fn):
4859 """Recreate the disks.
4863 for idx, _ in enumerate(self.instance.disks):
4864 if idx not in self.op.disks: # disk idx has not been passed in
4868 _CreateDisks(self, self.instance, to_skip=to_skip)
4871 class LURenameInstance(LogicalUnit):
4872 """Rename an instance.
4875 HPATH = "instance-rename"
4876 HTYPE = constants.HTYPE_INSTANCE
4879 ("new_name", _NoDefault, _TNonEmptyString),
4880 ("ip_check", False, _TBool),
4881 ("name_check", True, _TBool),
4884 def CheckArguments(self):
4888 if self.op.ip_check and not self.op.name_check:
4889 # TODO: make the ip check more flexible and not depend on the name check
4890 raise errors.OpPrereqError("Cannot do ip check without a name check",
4893 def BuildHooksEnv(self):
4896 This runs on master, primary and secondary nodes of the instance.
4899 env = _BuildInstanceHookEnvByObject(self, self.instance)
4900 env["INSTANCE_NEW_NAME"] = self.op.new_name
4901 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4904 def CheckPrereq(self):
4905 """Check prerequisites.
4907 This checks that the instance is in the cluster and is not running.
4910 self.op.instance_name = _ExpandInstanceName(self.cfg,
4911 self.op.instance_name)
4912 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4913 assert instance is not None
4914 _CheckNodeOnline(self, instance.primary_node)
4915 _CheckInstanceDown(self, instance, "cannot rename")
4916 self.instance = instance
4918 new_name = self.op.new_name
4919 if self.op.name_check:
4920 hostname = netutils.GetHostname(name=new_name)
4921 new_name = hostname.name
4922 if (self.op.ip_check and
4923 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
4924 raise errors.OpPrereqError("IP %s of instance %s already in use" %
4925 (hostname.ip, new_name),
4926 errors.ECODE_NOTUNIQUE)
4928 instance_list = self.cfg.GetInstanceList()
4929 if new_name in instance_list:
4930 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4931 new_name, errors.ECODE_EXISTS)
4933 def Exec(self, feedback_fn):
4934 """Reinstall the instance.
4937 inst = self.instance
4938 old_name = inst.name
4940 if inst.disk_template == constants.DT_FILE:
4941 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4943 self.cfg.RenameInstance(inst.name, self.op.new_name)
4944 # Change the instance lock. This is definitely safe while we hold the BGL
4945 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4946 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4948 # re-read the instance from the configuration after rename
4949 inst = self.cfg.GetInstanceInfo(self.op.new_name)
4951 if inst.disk_template == constants.DT_FILE:
4952 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4953 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4954 old_file_storage_dir,
4955 new_file_storage_dir)
4956 result.Raise("Could not rename on node %s directory '%s' to '%s'"
4957 " (but the instance has been renamed in Ganeti)" %
4958 (inst.primary_node, old_file_storage_dir,
4959 new_file_storage_dir))
4961 _StartInstanceDisks(self, inst, None)
4963 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4964 old_name, self.op.debug_level)
4965 msg = result.fail_msg
4967 msg = ("Could not run OS rename script for instance %s on node %s"
4968 " (but the instance has been renamed in Ganeti): %s" %
4969 (inst.name, inst.primary_node, msg))
4970 self.proc.LogWarning(msg)
4972 _ShutdownInstanceDisks(self, inst)
4977 class LURemoveInstance(LogicalUnit):
4978 """Remove an instance.
4981 HPATH = "instance-remove"
4982 HTYPE = constants.HTYPE_INSTANCE
4985 ("ignore_failures", False, _TBool),
4990 def ExpandNames(self):
4991 self._ExpandAndLockInstance()
4992 self.needed_locks[locking.LEVEL_NODE] = []
4993 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4995 def DeclareLocks(self, level):
4996 if level == locking.LEVEL_NODE:
4997 self._LockInstancesNodes()
4999 def BuildHooksEnv(self):
5002 This runs on master, primary and secondary nodes of the instance.
5005 env = _BuildInstanceHookEnvByObject(self, self.instance)
5006 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5007 nl = [self.cfg.GetMasterNode()]
5008 nl_post = list(self.instance.all_nodes) + nl
5009 return env, nl, nl_post
5011 def CheckPrereq(self):
5012 """Check prerequisites.
5014 This checks that the instance is in the cluster.
5017 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5018 assert self.instance is not None, \
5019 "Cannot retrieve locked instance %s" % self.op.instance_name
5021 def Exec(self, feedback_fn):
5022 """Remove the instance.
5025 instance = self.instance
5026 logging.info("Shutting down instance %s on node %s",
5027 instance.name, instance.primary_node)
5029 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5030 self.op.shutdown_timeout)
5031 msg = result.fail_msg
5033 if self.op.ignore_failures:
5034 feedback_fn("Warning: can't shutdown instance: %s" % msg)
5036 raise errors.OpExecError("Could not shutdown instance %s on"
5038 (instance.name, instance.primary_node, msg))
5040 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5043 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5044 """Utility function to remove an instance.
5047 logging.info("Removing block devices for instance %s", instance.name)
5049 if not _RemoveDisks(lu, instance):
5050 if not ignore_failures:
5051 raise errors.OpExecError("Can't remove instance's disks")
5052 feedback_fn("Warning: can't remove instance's disks")
5054 logging.info("Removing instance %s out of cluster config", instance.name)
5056 lu.cfg.RemoveInstance(instance.name)
5058 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5059 "Instance lock removal conflict"
5061 # Remove lock for the instance
5062 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5065 class LUQueryInstances(NoHooksLU):
5066 """Logical unit for querying instances.
5069 # pylint: disable-msg=W0142
5071 ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
5072 ("names", _EmptyList, _TListOf(_TNonEmptyString)),
5073 ("use_locking", False, _TBool),
5076 _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
5077 "serial_no", "ctime", "mtime", "uuid"]
5078 _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
5080 "disk_template", "ip", "mac", "bridge",
5081 "nic_mode", "nic_link",
5082 "sda_size", "sdb_size", "vcpus", "tags",
5083 "network_port", "beparams",
5084 r"(disk)\.(size)/([0-9]+)",
5085 r"(disk)\.(sizes)", "disk_usage",
5086 r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
5087 r"(nic)\.(bridge)/([0-9]+)",
5088 r"(nic)\.(macs|ips|modes|links|bridges)",
5089 r"(disk|nic)\.(count)",
5091 ] + _SIMPLE_FIELDS +
5093 for name in constants.HVS_PARAMETERS
5094 if name not in constants.HVC_GLOBALS] +
5096 for name in constants.BES_PARAMETERS])
5097 _FIELDS_DYNAMIC = utils.FieldSet("oper_state",
5103 def CheckArguments(self):
5104 _CheckOutputFields(static=self._FIELDS_STATIC,
5105 dynamic=self._FIELDS_DYNAMIC,
5106 selected=self.op.output_fields)
5108 def ExpandNames(self):
5109 self.needed_locks = {}
5110 self.share_locks[locking.LEVEL_INSTANCE] = 1
5111 self.share_locks[locking.LEVEL_NODE] = 1
5114 self.wanted = _GetWantedInstances(self, self.op.names)
5116 self.wanted = locking.ALL_SET
5118 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
5119 self.do_locking = self.do_node_query and self.op.use_locking
5121 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5122 self.needed_locks[locking.LEVEL_NODE] = []
5123 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5125 def DeclareLocks(self, level):
5126 if level == locking.LEVEL_NODE and self.do_locking:
5127 self._LockInstancesNodes()
5129 def Exec(self, feedback_fn):
5130 """Computes the list of nodes and their attributes.
5133 # pylint: disable-msg=R0912
5134 # way too many branches here
5135 all_info = self.cfg.GetAllInstancesInfo()
5136 if self.wanted == locking.ALL_SET:
5137 # caller didn't specify instance names, so ordering is not important
5139 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5141 instance_names = all_info.keys()
5142 instance_names = utils.NiceSort(instance_names)
5144 # caller did specify names, so we must keep the ordering
5146 tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
5148 tgt_set = all_info.keys()
5149 missing = set(self.wanted).difference(tgt_set)
5151 raise errors.OpExecError("Some instances were removed before"
5152 " retrieving their data: %s" % missing)
5153 instance_names = self.wanted
5155 instance_list = [all_info[iname] for iname in instance_names]
5157 # begin data gathering
5159 nodes = frozenset([inst.primary_node for inst in instance_list])
5160 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5164 if self.do_node_query:
5166 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
5168 result = node_data[name]
5170 # offline nodes will be in both lists
5171 off_nodes.append(name)
5173 bad_nodes.append(name)
5176 live_data.update(result.payload)
5177 # else no instance is alive
5179 live_data = dict([(name, {}) for name in instance_names])
5181 # end data gathering
5186 cluster = self.cfg.GetClusterInfo()
5187 for instance in instance_list:
5189 i_hv = cluster.FillHV(instance, skip_globals=True)
5190 i_be = cluster.FillBE(instance)
5191 i_nicp = [cluster.SimpleFillNIC(nic.nicparams) for nic in instance.nics]
5192 for field in self.op.output_fields:
5193 st_match = self._FIELDS_STATIC.Matches(field)
5194 if field in self._SIMPLE_FIELDS:
5195 val = getattr(instance, field)
5196 elif field == "pnode":
5197 val = instance.primary_node
5198 elif field == "snodes":
5199 val = list(instance.secondary_nodes)
5200 elif field == "admin_state":
5201 val = instance.admin_up
5202 elif field == "oper_state":
5203 if instance.primary_node in bad_nodes:
5206 val = bool(live_data.get(instance.name))
5207 elif field == "status":
5208 if instance.primary_node in off_nodes:
5209 val = "ERROR_nodeoffline"
5210 elif instance.primary_node in bad_nodes:
5211 val = "ERROR_nodedown"
5213 running = bool(live_data.get(instance.name))
5215 if instance.admin_up:
5220 if instance.admin_up:
5224 elif field == "oper_ram":
5225 if instance.primary_node in bad_nodes:
5227 elif instance.name in live_data:
5228 val = live_data[instance.name].get("memory", "?")
5231 elif field == "oper_vcpus":
5232 if instance.primary_node in bad_nodes:
5234 elif instance.name in live_data:
5235 val = live_data[instance.name].get("vcpus", "?")
5238 elif field == "vcpus":
5239 val = i_be[constants.BE_VCPUS]
5240 elif field == "disk_template":
5241 val = instance.disk_template
5244 val = instance.nics[0].ip
5247 elif field == "nic_mode":
5249 val = i_nicp[0][constants.NIC_MODE]
5252 elif field == "nic_link":
5254 val = i_nicp[0][constants.NIC_LINK]
5257 elif field == "bridge":
5258 if (instance.nics and
5259 i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
5260 val = i_nicp[0][constants.NIC_LINK]
5263 elif field == "mac":
5265 val = instance.nics[0].mac
5268 elif field == "sda_size" or field == "sdb_size":
5269 idx = ord(field[2]) - ord('a')
5271 val = instance.FindDisk(idx).size
5272 except errors.OpPrereqError:
5274 elif field == "disk_usage": # total disk usage per node
5275 disk_sizes = [{'size': disk.size} for disk in instance.disks]
5276 val = _ComputeDiskSize(instance.disk_template, disk_sizes)
5277 elif field == "tags":
5278 val = list(instance.GetTags())
5279 elif field == "hvparams":
5281 elif (field.startswith(HVPREFIX) and
5282 field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
5283 field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
5284 val = i_hv.get(field[len(HVPREFIX):], None)
5285 elif field == "beparams":
5287 elif (field.startswith(BEPREFIX) and
5288 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
5289 val = i_be.get(field[len(BEPREFIX):], None)
5290 elif st_match and st_match.groups():
5291 # matches a variable list
5292 st_groups = st_match.groups()
5293 if st_groups and st_groups[0] == "disk":
5294 if st_groups[1] == "count":
5295 val = len(instance.disks)
5296 elif st_groups[1] == "sizes":
5297 val = [disk.size for disk in instance.disks]
5298 elif st_groups[1] == "size":
5300 val = instance.FindDisk(st_groups[2]).size
5301 except errors.OpPrereqError:
5304 assert False, "Unhandled disk parameter"
5305 elif st_groups[0] == "nic":
5306 if st_groups[1] == "count":
5307 val = len(instance.nics)
5308 elif st_groups[1] == "macs":
5309 val = [nic.mac for nic in instance.nics]
5310 elif st_groups[1] == "ips":
5311 val = [nic.ip for nic in instance.nics]
5312 elif st_groups[1] == "modes":
5313 val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
5314 elif st_groups[1] == "links":
5315 val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
5316 elif st_groups[1] == "bridges":
5319 if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
5320 val.append(nicp[constants.NIC_LINK])
5325 nic_idx = int(st_groups[2])
5326 if nic_idx >= len(instance.nics):
5329 if st_groups[1] == "mac":
5330 val = instance.nics[nic_idx].mac
5331 elif st_groups[1] == "ip":
5332 val = instance.nics[nic_idx].ip
5333 elif st_groups[1] == "mode":
5334 val = i_nicp[nic_idx][constants.NIC_MODE]
5335 elif st_groups[1] == "link":
5336 val = i_nicp[nic_idx][constants.NIC_LINK]
5337 elif st_groups[1] == "bridge":
5338 nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
5339 if nic_mode == constants.NIC_MODE_BRIDGED:
5340 val = i_nicp[nic_idx][constants.NIC_LINK]
5344 assert False, "Unhandled NIC parameter"
5346 assert False, ("Declared but unhandled variable parameter '%s'" %
5349 assert False, "Declared but unhandled parameter '%s'" % field
5356 class LUFailoverInstance(LogicalUnit):
5357 """Failover an instance.
5360 HPATH = "instance-failover"
5361 HTYPE = constants.HTYPE_INSTANCE
5364 ("ignore_consistency", False, _TBool),
5369 def ExpandNames(self):
5370 self._ExpandAndLockInstance()
5371 self.needed_locks[locking.LEVEL_NODE] = []
5372 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5374 def DeclareLocks(self, level):
5375 if level == locking.LEVEL_NODE:
5376 self._LockInstancesNodes()
5378 def BuildHooksEnv(self):
5381 This runs on master, primary and secondary nodes of the instance.
5384 instance = self.instance
5385 source_node = instance.primary_node
5386 target_node = instance.secondary_nodes[0]
5388 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5389 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5390 "OLD_PRIMARY": source_node,
5391 "OLD_SECONDARY": target_node,
5392 "NEW_PRIMARY": target_node,
5393 "NEW_SECONDARY": source_node,
5395 env.update(_BuildInstanceHookEnvByObject(self, instance))
5396 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5398 nl_post.append(source_node)
5399 return env, nl, nl_post
5401 def CheckPrereq(self):
5402 """Check prerequisites.
5404 This checks that the instance is in the cluster.
5407 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5408 assert self.instance is not None, \
5409 "Cannot retrieve locked instance %s" % self.op.instance_name
5411 bep = self.cfg.GetClusterInfo().FillBE(instance)
5412 if instance.disk_template not in constants.DTS_NET_MIRROR:
5413 raise errors.OpPrereqError("Instance's disk layout is not"
5414 " network mirrored, cannot failover.",
5417 secondary_nodes = instance.secondary_nodes
5418 if not secondary_nodes:
5419 raise errors.ProgrammerError("no secondary node but using "
5420 "a mirrored disk template")
5422 target_node = secondary_nodes[0]
5423 _CheckNodeOnline(self, target_node)
5424 _CheckNodeNotDrained(self, target_node)
5425 if instance.admin_up:
5426 # check memory requirements on the secondary node
5427 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5428 instance.name, bep[constants.BE_MEMORY],
5429 instance.hypervisor)
5431 self.LogInfo("Not checking memory on the secondary node as"
5432 " instance will not be started")
5434 # check bridge existance
5435 _CheckInstanceBridgesExist(self, instance, node=target_node)
5437 def Exec(self, feedback_fn):
5438 """Failover an instance.
5440 The failover is done by shutting it down on its present node and
5441 starting it on the secondary.
5444 instance = self.instance
5445 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5447 source_node = instance.primary_node
5448 target_node = instance.secondary_nodes[0]
5450 if instance.admin_up:
5451 feedback_fn("* checking disk consistency between source and target")
5452 for dev in instance.disks:
5453 # for drbd, these are drbd over lvm
5454 if not _CheckDiskConsistency(self, dev, target_node, False):
5455 if not self.op.ignore_consistency:
5456 raise errors.OpExecError("Disk %s is degraded on target node,"
5457 " aborting failover." % dev.iv_name)
5459 feedback_fn("* not checking disk consistency as instance is not running")
5461 feedback_fn("* shutting down instance on source node")
5462 logging.info("Shutting down instance %s on node %s",
5463 instance.name, source_node)
5465 result = self.rpc.call_instance_shutdown(source_node, instance,
5466 self.op.shutdown_timeout)
5467 msg = result.fail_msg
5469 if self.op.ignore_consistency or primary_node.offline:
5470 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5471 " Proceeding anyway. Please make sure node"
5472 " %s is down. Error details: %s",
5473 instance.name, source_node, source_node, msg)
5475 raise errors.OpExecError("Could not shutdown instance %s on"
5477 (instance.name, source_node, msg))
5479 feedback_fn("* deactivating the instance's disks on source node")
5480 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5481 raise errors.OpExecError("Can't shut down the instance's disks.")
5483 instance.primary_node = target_node
5484 # distribute new instance config to the other nodes
5485 self.cfg.Update(instance, feedback_fn)
5487 # Only start the instance if it's marked as up
5488 if instance.admin_up:
5489 feedback_fn("* activating the instance's disks on target node")
5490 logging.info("Starting instance %s on node %s",
5491 instance.name, target_node)
5493 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5494 ignore_secondaries=True)
5496 _ShutdownInstanceDisks(self, instance)
5497 raise errors.OpExecError("Can't activate the instance's disks")
5499 feedback_fn("* starting the instance on the target node")
5500 result = self.rpc.call_instance_start(target_node, instance, None, None)
5501 msg = result.fail_msg
5503 _ShutdownInstanceDisks(self, instance)
5504 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5505 (instance.name, target_node, msg))
5508 class LUMigrateInstance(LogicalUnit):
5509 """Migrate an instance.
5511 This is migration without shutting down, compared to the failover,
5512 which is done with shutdown.
5515 HPATH = "instance-migrate"
5516 HTYPE = constants.HTYPE_INSTANCE
5521 ("cleanup", False, _TBool),
5526 def ExpandNames(self):
5527 self._ExpandAndLockInstance()
5529 self.needed_locks[locking.LEVEL_NODE] = []
5530 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5532 self._migrater = TLMigrateInstance(self, self.op.instance_name,
5534 self.tasklets = [self._migrater]
5536 def DeclareLocks(self, level):
5537 if level == locking.LEVEL_NODE:
5538 self._LockInstancesNodes()
5540 def BuildHooksEnv(self):
5543 This runs on master, primary and secondary nodes of the instance.
5546 instance = self._migrater.instance
5547 source_node = instance.primary_node
5548 target_node = instance.secondary_nodes[0]
5549 env = _BuildInstanceHookEnvByObject(self, instance)
5550 env["MIGRATE_LIVE"] = self._migrater.live
5551 env["MIGRATE_CLEANUP"] = self.op.cleanup
5553 "OLD_PRIMARY": source_node,
5554 "OLD_SECONDARY": target_node,
5555 "NEW_PRIMARY": target_node,
5556 "NEW_SECONDARY": source_node,
5558 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5560 nl_post.append(source_node)
5561 return env, nl, nl_post
5564 class LUMoveInstance(LogicalUnit):
5565 """Move an instance by data-copying.
5568 HPATH = "instance-move"
5569 HTYPE = constants.HTYPE_INSTANCE
5572 ("target_node", _NoDefault, _TNonEmptyString),
5577 def ExpandNames(self):
5578 self._ExpandAndLockInstance()
5579 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5580 self.op.target_node = target_node
5581 self.needed_locks[locking.LEVEL_NODE] = [target_node]
5582 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5584 def DeclareLocks(self, level):
5585 if level == locking.LEVEL_NODE:
5586 self._LockInstancesNodes(primary_only=True)
5588 def BuildHooksEnv(self):
5591 This runs on master, primary and secondary nodes of the instance.
5595 "TARGET_NODE": self.op.target_node,
5596 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5598 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5599 nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5600 self.op.target_node]
5603 def CheckPrereq(self):
5604 """Check prerequisites.
5606 This checks that the instance is in the cluster.
5609 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5610 assert self.instance is not None, \
5611 "Cannot retrieve locked instance %s" % self.op.instance_name
5613 node = self.cfg.GetNodeInfo(self.op.target_node)
5614 assert node is not None, \
5615 "Cannot retrieve locked node %s" % self.op.target_node
5617 self.target_node = target_node = node.name
5619 if target_node == instance.primary_node:
5620 raise errors.OpPrereqError("Instance %s is already on the node %s" %
5621 (instance.name, target_node),
5624 bep = self.cfg.GetClusterInfo().FillBE(instance)
5626 for idx, dsk in enumerate(instance.disks):
5627 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5628 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5629 " cannot copy" % idx, errors.ECODE_STATE)
5631 _CheckNodeOnline(self, target_node)
5632 _CheckNodeNotDrained(self, target_node)
5634 if instance.admin_up:
5635 # check memory requirements on the secondary node
5636 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5637 instance.name, bep[constants.BE_MEMORY],
5638 instance.hypervisor)
5640 self.LogInfo("Not checking memory on the secondary node as"
5641 " instance will not be started")
5643 # check bridge existance
5644 _CheckInstanceBridgesExist(self, instance, node=target_node)
5646 def Exec(self, feedback_fn):
5647 """Move an instance.
5649 The move is done by shutting it down on its present node, copying
5650 the data over (slow) and starting it on the new node.
5653 instance = self.instance
5655 source_node = instance.primary_node
5656 target_node = self.target_node
5658 self.LogInfo("Shutting down instance %s on source node %s",
5659 instance.name, source_node)
5661 result = self.rpc.call_instance_shutdown(source_node, instance,
5662 self.op.shutdown_timeout)
5663 msg = result.fail_msg
5665 if self.op.ignore_consistency:
5666 self.proc.LogWarning("Could not shutdown instance %s on node %s."
5667 " Proceeding anyway. Please make sure node"
5668 " %s is down. Error details: %s",
5669 instance.name, source_node, source_node, msg)
5671 raise errors.OpExecError("Could not shutdown instance %s on"
5673 (instance.name, source_node, msg))
5675 # create the target disks
5677 _CreateDisks(self, instance, target_node=target_node)
5678 except errors.OpExecError:
5679 self.LogWarning("Device creation failed, reverting...")
5681 _RemoveDisks(self, instance, target_node=target_node)
5683 self.cfg.ReleaseDRBDMinors(instance.name)
5686 cluster_name = self.cfg.GetClusterInfo().cluster_name
5689 # activate, get path, copy the data over
5690 for idx, disk in enumerate(instance.disks):
5691 self.LogInfo("Copying data for disk %d", idx)
5692 result = self.rpc.call_blockdev_assemble(target_node, disk,
5693 instance.name, True)
5695 self.LogWarning("Can't assemble newly created disk %d: %s",
5696 idx, result.fail_msg)
5697 errs.append(result.fail_msg)
5699 dev_path = result.payload
5700 result = self.rpc.call_blockdev_export(source_node, disk,
5701 target_node, dev_path,
5704 self.LogWarning("Can't copy data over for disk %d: %s",
5705 idx, result.fail_msg)
5706 errs.append(result.fail_msg)
5710 self.LogWarning("Some disks failed to copy, aborting")
5712 _RemoveDisks(self, instance, target_node=target_node)
5714 self.cfg.ReleaseDRBDMinors(instance.name)
5715 raise errors.OpExecError("Errors during disk copy: %s" %
5718 instance.primary_node = target_node
5719 self.cfg.Update(instance, feedback_fn)
5721 self.LogInfo("Removing the disks on the original node")
5722 _RemoveDisks(self, instance, target_node=source_node)
5724 # Only start the instance if it's marked as up
5725 if instance.admin_up:
5726 self.LogInfo("Starting instance %s on node %s",
5727 instance.name, target_node)
5729 disks_ok, _ = _AssembleInstanceDisks(self, instance,
5730 ignore_secondaries=True)
5732 _ShutdownInstanceDisks(self, instance)
5733 raise errors.OpExecError("Can't activate the instance's disks")
5735 result = self.rpc.call_instance_start(target_node, instance, None, None)
5736 msg = result.fail_msg
5738 _ShutdownInstanceDisks(self, instance)
5739 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5740 (instance.name, target_node, msg))
5743 class LUMigrateNode(LogicalUnit):
5744 """Migrate all instances from a node.
5747 HPATH = "node-migrate"
5748 HTYPE = constants.HTYPE_NODE
5756 def ExpandNames(self):
5757 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5759 self.needed_locks = {
5760 locking.LEVEL_NODE: [self.op.node_name],
5763 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5765 # Create tasklets for migrating instances for all instances on this node
5769 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5770 logging.debug("Migrating instance %s", inst.name)
5771 names.append(inst.name)
5773 tasklets.append(TLMigrateInstance(self, inst.name, False))
5775 self.tasklets = tasklets
5777 # Declare instance locks
5778 self.needed_locks[locking.LEVEL_INSTANCE] = names
5780 def DeclareLocks(self, level):
5781 if level == locking.LEVEL_NODE:
5782 self._LockInstancesNodes()
5784 def BuildHooksEnv(self):
5787 This runs on the master, the primary and all the secondaries.
5791 "NODE_NAME": self.op.node_name,
5794 nl = [self.cfg.GetMasterNode()]
5796 return (env, nl, nl)
5799 class TLMigrateInstance(Tasklet):
5800 """Tasklet class for instance migration.
5803 @ivar live: whether the migration will be done live or non-live;
5804 this variable is initalized only after CheckPrereq has run
5807 def __init__(self, lu, instance_name, cleanup):
5808 """Initializes this class.
5811 Tasklet.__init__(self, lu)
5814 self.instance_name = instance_name
5815 self.cleanup = cleanup
5816 self.live = False # will be overridden later
5818 def CheckPrereq(self):
5819 """Check prerequisites.
5821 This checks that the instance is in the cluster.
5824 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5825 instance = self.cfg.GetInstanceInfo(instance_name)
5826 assert instance is not None
5828 if instance.disk_template != constants.DT_DRBD8:
5829 raise errors.OpPrereqError("Instance's disk layout is not"
5830 " drbd8, cannot migrate.", errors.ECODE_STATE)
5832 secondary_nodes = instance.secondary_nodes
5833 if not secondary_nodes:
5834 raise errors.ConfigurationError("No secondary node but using"
5835 " drbd8 disk template")
5837 i_be = self.cfg.GetClusterInfo().FillBE(instance)
5839 target_node = secondary_nodes[0]
5840 # check memory requirements on the secondary node
5841 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
5842 instance.name, i_be[constants.BE_MEMORY],
5843 instance.hypervisor)
5845 # check bridge existance
5846 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
5848 if not self.cleanup:
5849 _CheckNodeNotDrained(self.lu, target_node)
5850 result = self.rpc.call_instance_migratable(instance.primary_node,
5852 result.Raise("Can't migrate, please use failover",
5853 prereq=True, ecode=errors.ECODE_STATE)
5855 self.instance = instance
5857 if self.lu.op.live is not None and self.lu.op.mode is not None:
5858 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
5859 " parameters are accepted",
5861 if self.lu.op.live is not None:
5863 self.lu.op.mode = constants.HT_MIGRATION_LIVE
5865 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
5866 # reset the 'live' parameter to None so that repeated
5867 # invocations of CheckPrereq do not raise an exception
5868 self.lu.op.live = None
5869 elif self.lu.op.mode is None:
5870 # read the default value from the hypervisor
5871 i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
5872 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
5874 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
5876 def _WaitUntilSync(self):
5877 """Poll with custom rpc for disk sync.
5879 This uses our own step-based rpc call.
5882 self.feedback_fn("* wait until resync is done")
5886 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5888 self.instance.disks)
5890 for node, nres in result.items():
5891 nres.Raise("Cannot resync disks on node %s" % node)
5892 node_done, node_percent = nres.payload
5893 all_done = all_done and node_done
5894 if node_percent is not None:
5895 min_percent = min(min_percent, node_percent)
5897 if min_percent < 100:
5898 self.feedback_fn(" - progress: %.1f%%" % min_percent)
5901 def _EnsureSecondary(self, node):
5902 """Demote a node to secondary.
5905 self.feedback_fn("* switching node %s to secondary mode" % node)
5907 for dev in self.instance.disks:
5908 self.cfg.SetDiskID(dev, node)
5910 result = self.rpc.call_blockdev_close(node, self.instance.name,
5911 self.instance.disks)
5912 result.Raise("Cannot change disk to secondary on node %s" % node)
5914 def _GoStandalone(self):
5915 """Disconnect from the network.
5918 self.feedback_fn("* changing into standalone mode")
5919 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5920 self.instance.disks)
5921 for node, nres in result.items():
5922 nres.Raise("Cannot disconnect disks node %s" % node)
5924 def _GoReconnect(self, multimaster):
5925 """Reconnect to the network.
5931 msg = "single-master"
5932 self.feedback_fn("* changing disks into %s mode" % msg)
5933 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5934 self.instance.disks,
5935 self.instance.name, multimaster)
5936 for node, nres in result.items():
5937 nres.Raise("Cannot change disks config on node %s" % node)
5939 def _ExecCleanup(self):
5940 """Try to cleanup after a failed migration.
5942 The cleanup is done by:
5943 - check that the instance is running only on one node
5944 (and update the config if needed)
5945 - change disks on its secondary node to secondary
5946 - wait until disks are fully synchronized
5947 - disconnect from the network
5948 - change disks into single-master mode
5949 - wait again until disks are fully synchronized
5952 instance = self.instance
5953 target_node = self.target_node
5954 source_node = self.source_node
5956 # check running on only one node
5957 self.feedback_fn("* checking where the instance actually runs"
5958 " (if this hangs, the hypervisor might be in"
5960 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5961 for node, result in ins_l.items():
5962 result.Raise("Can't contact node %s" % node)
5964 runningon_source = instance.name in ins_l[source_node].payload
5965 runningon_target = instance.name in ins_l[target_node].payload
5967 if runningon_source and runningon_target:
5968 raise errors.OpExecError("Instance seems to be running on two nodes,"
5969 " or the hypervisor is confused. You will have"
5970 " to ensure manually that it runs only on one"
5971 " and restart this operation.")
5973 if not (runningon_source or runningon_target):
5974 raise errors.OpExecError("Instance does not seem to be running at all."
5975 " In this case, it's safer to repair by"
5976 " running 'gnt-instance stop' to ensure disk"
5977 " shutdown, and then restarting it.")
5979 if runningon_target:
5980 # the migration has actually succeeded, we need to update the config
5981 self.feedback_fn("* instance running on secondary node (%s),"
5982 " updating config" % target_node)
5983 instance.primary_node = target_node
5984 self.cfg.Update(instance, self.feedback_fn)
5985 demoted_node = source_node
5987 self.feedback_fn("* instance confirmed to be running on its"
5988 " primary node (%s)" % source_node)
5989 demoted_node = target_node
5991 self._EnsureSecondary(demoted_node)
5993 self._WaitUntilSync()
5994 except errors.OpExecError:
5995 # we ignore here errors, since if the device is standalone, it
5996 # won't be able to sync
5998 self._GoStandalone()
5999 self._GoReconnect(False)
6000 self._WaitUntilSync()
6002 self.feedback_fn("* done")
6004 def _RevertDiskStatus(self):
6005 """Try to revert the disk status after a failed migration.
6008 target_node = self.target_node
6010 self._EnsureSecondary(target_node)
6011 self._GoStandalone()
6012 self._GoReconnect(False)
6013 self._WaitUntilSync()
6014 except errors.OpExecError, err:
6015 self.lu.LogWarning("Migration failed and I can't reconnect the"
6016 " drives: error '%s'\n"
6017 "Please look and recover the instance status" %
6020 def _AbortMigration(self):
6021 """Call the hypervisor code to abort a started migration.
6024 instance = self.instance
6025 target_node = self.target_node
6026 migration_info = self.migration_info
6028 abort_result = self.rpc.call_finalize_migration(target_node,
6032 abort_msg = abort_result.fail_msg
6034 logging.error("Aborting migration failed on target node %s: %s",
6035 target_node, abort_msg)
6036 # Don't raise an exception here, as we stil have to try to revert the
6037 # disk status, even if this step failed.
6039 def _ExecMigration(self):
6040 """Migrate an instance.
6042 The migrate is done by:
6043 - change the disks into dual-master mode
6044 - wait until disks are fully synchronized again
6045 - migrate the instance
6046 - change disks on the new secondary node (the old primary) to secondary
6047 - wait until disks are fully synchronized
6048 - change disks into single-master mode
6051 instance = self.instance
6052 target_node = self.target_node
6053 source_node = self.source_node
6055 self.feedback_fn("* checking disk consistency between source and target")
6056 for dev in instance.disks:
6057 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6058 raise errors.OpExecError("Disk %s is degraded or not fully"
6059 " synchronized on target node,"
6060 " aborting migrate." % dev.iv_name)
6062 # First get the migration information from the remote node
6063 result = self.rpc.call_migration_info(source_node, instance)
6064 msg = result.fail_msg
6066 log_err = ("Failed fetching source migration information from %s: %s" %
6068 logging.error(log_err)
6069 raise errors.OpExecError(log_err)
6071 self.migration_info = migration_info = result.payload
6073 # Then switch the disks to master/master mode
6074 self._EnsureSecondary(target_node)
6075 self._GoStandalone()
6076 self._GoReconnect(True)
6077 self._WaitUntilSync()
6079 self.feedback_fn("* preparing %s to accept the instance" % target_node)
6080 result = self.rpc.call_accept_instance(target_node,
6083 self.nodes_ip[target_node])
6085 msg = result.fail_msg
6087 logging.error("Instance pre-migration failed, trying to revert"
6088 " disk status: %s", msg)
6089 self.feedback_fn("Pre-migration failed, aborting")
6090 self._AbortMigration()
6091 self._RevertDiskStatus()
6092 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6093 (instance.name, msg))
6095 self.feedback_fn("* migrating instance to %s" % target_node)
6097 result = self.rpc.call_instance_migrate(source_node, instance,
6098 self.nodes_ip[target_node],
6100 msg = result.fail_msg
6102 logging.error("Instance migration failed, trying to revert"
6103 " disk status: %s", msg)
6104 self.feedback_fn("Migration failed, aborting")
6105 self._AbortMigration()
6106 self._RevertDiskStatus()
6107 raise errors.OpExecError("Could not migrate instance %s: %s" %
6108 (instance.name, msg))
6111 instance.primary_node = target_node
6112 # distribute new instance config to the other nodes
6113 self.cfg.Update(instance, self.feedback_fn)
6115 result = self.rpc.call_finalize_migration(target_node,
6119 msg = result.fail_msg
6121 logging.error("Instance migration succeeded, but finalization failed:"
6123 raise errors.OpExecError("Could not finalize instance migration: %s" %
6126 self._EnsureSecondary(source_node)
6127 self._WaitUntilSync()
6128 self._GoStandalone()
6129 self._GoReconnect(False)
6130 self._WaitUntilSync()
6132 self.feedback_fn("* done")
6134 def Exec(self, feedback_fn):
6135 """Perform the migration.
6138 feedback_fn("Migrating instance %s" % self.instance.name)
6140 self.feedback_fn = feedback_fn
6142 self.source_node = self.instance.primary_node
6143 self.target_node = self.instance.secondary_nodes[0]
6144 self.all_nodes = [self.source_node, self.target_node]
6146 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6147 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6151 return self._ExecCleanup()
6153 return self._ExecMigration()
6156 def _CreateBlockDev(lu, node, instance, device, force_create,
6158 """Create a tree of block devices on a given node.
6160 If this device type has to be created on secondaries, create it and
6163 If not, just recurse to children keeping the same 'force' value.
6165 @param lu: the lu on whose behalf we execute
6166 @param node: the node on which to create the device
6167 @type instance: L{objects.Instance}
6168 @param instance: the instance which owns the device
6169 @type device: L{objects.Disk}
6170 @param device: the device to create
6171 @type force_create: boolean
6172 @param force_create: whether to force creation of this device; this
6173 will be change to True whenever we find a device which has
6174 CreateOnSecondary() attribute
6175 @param info: the extra 'metadata' we should attach to the device
6176 (this will be represented as a LVM tag)
6177 @type force_open: boolean
6178 @param force_open: this parameter will be passes to the
6179 L{backend.BlockdevCreate} function where it specifies
6180 whether we run on primary or not, and it affects both
6181 the child assembly and the device own Open() execution
6184 if device.CreateOnSecondary():
6188 for child in device.children:
6189 _CreateBlockDev(lu, node, instance, child, force_create,
6192 if not force_create:
6195 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6198 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6199 """Create a single block device on a given node.
6201 This will not recurse over children of the device, so they must be
6204 @param lu: the lu on whose behalf we execute
6205 @param node: the node on which to create the device
6206 @type instance: L{objects.Instance}
6207 @param instance: the instance which owns the device
6208 @type device: L{objects.Disk}
6209 @param device: the device to create
6210 @param info: the extra 'metadata' we should attach to the device
6211 (this will be represented as a LVM tag)
6212 @type force_open: boolean
6213 @param force_open: this parameter will be passes to the
6214 L{backend.BlockdevCreate} function where it specifies
6215 whether we run on primary or not, and it affects both
6216 the child assembly and the device own Open() execution
6219 lu.cfg.SetDiskID(device, node)
6220 result = lu.rpc.call_blockdev_create(node, device, device.size,
6221 instance.name, force_open, info)
6222 result.Raise("Can't create block device %s on"
6223 " node %s for instance %s" % (device, node, instance.name))
6224 if device.physical_id is None:
6225 device.physical_id = result.payload
6228 def _GenerateUniqueNames(lu, exts):
6229 """Generate a suitable LV name.
6231 This will generate a logical volume name for the given instance.
6236 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6237 results.append("%s%s" % (new_id, val))
6241 def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
6243 """Generate a drbd8 device complete with its children.
6246 port = lu.cfg.AllocatePort()
6247 vgname = lu.cfg.GetVGName()
6248 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6249 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6250 logical_id=(vgname, names[0]))
6251 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6252 logical_id=(vgname, names[1]))
6253 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6254 logical_id=(primary, secondary, port,
6257 children=[dev_data, dev_meta],
6262 def _GenerateDiskTemplate(lu, template_name,
6263 instance_name, primary_node,
6264 secondary_nodes, disk_info,
6265 file_storage_dir, file_driver,
6267 """Generate the entire disk layout for a given template type.
6270 #TODO: compute space requirements
6272 vgname = lu.cfg.GetVGName()
6273 disk_count = len(disk_info)
6275 if template_name == constants.DT_DISKLESS:
6277 elif template_name == constants.DT_PLAIN:
6278 if len(secondary_nodes) != 0:
6279 raise errors.ProgrammerError("Wrong template configuration")
6281 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6282 for i in range(disk_count)])
6283 for idx, disk in enumerate(disk_info):
6284 disk_index = idx + base_index
6285 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6286 logical_id=(vgname, names[idx]),
6287 iv_name="disk/%d" % disk_index,
6289 disks.append(disk_dev)
6290 elif template_name == constants.DT_DRBD8:
6291 if len(secondary_nodes) != 1:
6292 raise errors.ProgrammerError("Wrong template configuration")
6293 remote_node = secondary_nodes[0]
6294 minors = lu.cfg.AllocateDRBDMinor(
6295 [primary_node, remote_node] * len(disk_info), instance_name)
6298 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6299 for i in range(disk_count)]):
6300 names.append(lv_prefix + "_data")
6301 names.append(lv_prefix + "_meta")
6302 for idx, disk in enumerate(disk_info):
6303 disk_index = idx + base_index
6304 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6305 disk["size"], names[idx*2:idx*2+2],
6306 "disk/%d" % disk_index,
6307 minors[idx*2], minors[idx*2+1])
6308 disk_dev.mode = disk["mode"]
6309 disks.append(disk_dev)
6310 elif template_name == constants.DT_FILE:
6311 if len(secondary_nodes) != 0:
6312 raise errors.ProgrammerError("Wrong template configuration")
6314 _RequireFileStorage()
6316 for idx, disk in enumerate(disk_info):
6317 disk_index = idx + base_index
6318 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6319 iv_name="disk/%d" % disk_index,
6320 logical_id=(file_driver,
6321 "%s/disk%d" % (file_storage_dir,
6324 disks.append(disk_dev)
6326 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6330 def _GetInstanceInfoText(instance):
6331 """Compute that text that should be added to the disk's metadata.
6334 return "originstname+%s" % instance.name
6337 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6338 """Create all disks for an instance.
6340 This abstracts away some work from AddInstance.
6342 @type lu: L{LogicalUnit}
6343 @param lu: the logical unit on whose behalf we execute
6344 @type instance: L{objects.Instance}
6345 @param instance: the instance whose disks we should create
6347 @param to_skip: list of indices to skip
6348 @type target_node: string
6349 @param target_node: if passed, overrides the target node for creation
6351 @return: the success of the creation
6354 info = _GetInstanceInfoText(instance)
6355 if target_node is None:
6356 pnode = instance.primary_node
6357 all_nodes = instance.all_nodes
6362 if instance.disk_template == constants.DT_FILE:
6363 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6364 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6366 result.Raise("Failed to create directory '%s' on"
6367 " node %s" % (file_storage_dir, pnode))
6369 # Note: this needs to be kept in sync with adding of disks in
6370 # LUSetInstanceParams
6371 for idx, device in enumerate(instance.disks):
6372 if to_skip and idx in to_skip:
6374 logging.info("Creating volume %s for instance %s",
6375 device.iv_name, instance.name)
6377 for node in all_nodes:
6378 f_create = node == pnode
6379 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6382 def _RemoveDisks(lu, instance, target_node=None):
6383 """Remove all disks for an instance.
6385 This abstracts away some work from `AddInstance()` and
6386 `RemoveInstance()`. Note that in case some of the devices couldn't
6387 be removed, the removal will continue with the other ones (compare
6388 with `_CreateDisks()`).
6390 @type lu: L{LogicalUnit}
6391 @param lu: the logical unit on whose behalf we execute
6392 @type instance: L{objects.Instance}
6393 @param instance: the instance whose disks we should remove
6394 @type target_node: string
6395 @param target_node: used to override the node on which to remove the disks
6397 @return: the success of the removal
6400 logging.info("Removing block devices for instance %s", instance.name)
6403 for device in instance.disks:
6405 edata = [(target_node, device)]
6407 edata = device.ComputeNodeTree(instance.primary_node)
6408 for node, disk in edata:
6409 lu.cfg.SetDiskID(disk, node)
6410 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6412 lu.LogWarning("Could not remove block device %s on node %s,"
6413 " continuing anyway: %s", device.iv_name, node, msg)
6416 if instance.disk_template == constants.DT_FILE:
6417 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6421 tgt = instance.primary_node
6422 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6424 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6425 file_storage_dir, instance.primary_node, result.fail_msg)
6431 def _ComputeDiskSize(disk_template, disks):
6432 """Compute disk size requirements in the volume group
6435 # Required free disk space as a function of disk and swap space
6437 constants.DT_DISKLESS: None,
6438 constants.DT_PLAIN: sum(d["size"] for d in disks),
6439 # 128 MB are added for drbd metadata for each disk
6440 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6441 constants.DT_FILE: None,
6444 if disk_template not in req_size_dict:
6445 raise errors.ProgrammerError("Disk template '%s' size requirement"
6446 " is unknown" % disk_template)
6448 return req_size_dict[disk_template]
6451 def _CheckHVParams(lu, nodenames, hvname, hvparams):
6452 """Hypervisor parameter validation.
6454 This function abstract the hypervisor parameter validation to be
6455 used in both instance create and instance modify.
6457 @type lu: L{LogicalUnit}
6458 @param lu: the logical unit for which we check
6459 @type nodenames: list
6460 @param nodenames: the list of nodes on which we should check
6461 @type hvname: string
6462 @param hvname: the name of the hypervisor we should use
6463 @type hvparams: dict
6464 @param hvparams: the parameters which we need to check
6465 @raise errors.OpPrereqError: if the parameters are not valid
6468 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6471 for node in nodenames:
6475 info.Raise("Hypervisor parameter validation failed on node %s" % node)
6478 def _CheckOSParams(lu, required, nodenames, osname, osparams):
6479 """OS parameters validation.
6481 @type lu: L{LogicalUnit}
6482 @param lu: the logical unit for which we check
6483 @type required: boolean
6484 @param required: whether the validation should fail if the OS is not
6486 @type nodenames: list
6487 @param nodenames: the list of nodes on which we should check
6488 @type osname: string
6489 @param osname: the name of the hypervisor we should use
6490 @type osparams: dict
6491 @param osparams: the parameters which we need to check
6492 @raise errors.OpPrereqError: if the parameters are not valid
6495 result = lu.rpc.call_os_validate(required, nodenames, osname,
6496 [constants.OS_VALIDATE_PARAMETERS],
6498 for node, nres in result.items():
6499 # we don't check for offline cases since this should be run only
6500 # against the master node and/or an instance's nodes
6501 nres.Raise("OS Parameters validation failed on node %s" % node)
6502 if not nres.payload:
6503 lu.LogInfo("OS %s not found on node %s, validation skipped",
6507 class LUCreateInstance(LogicalUnit):
6508 """Create an instance.
6511 HPATH = "instance-add"
6512 HTYPE = constants.HTYPE_INSTANCE
6515 ("mode", _NoDefault, _TElemOf(constants.INSTANCE_CREATE_MODES)),
6516 ("start", True, _TBool),
6517 ("wait_for_sync", True, _TBool),
6518 ("ip_check", True, _TBool),
6519 ("name_check", True, _TBool),
6520 ("disks", _NoDefault, _TListOf(_TDict)),
6521 ("nics", _NoDefault, _TListOf(_TDict)),
6522 ("hvparams", _EmptyDict, _TDict),
6523 ("beparams", _EmptyDict, _TDict),
6524 ("osparams", _EmptyDict, _TDict),
6525 ("no_install", None, _TMaybeBool),
6526 ("os_type", None, _TMaybeString),
6527 ("force_variant", False, _TBool),
6528 ("source_handshake", None, _TOr(_TList, _TNone)),
6529 ("source_x509_ca", None, _TMaybeString),
6530 ("source_instance_name", None, _TMaybeString),
6531 ("src_node", None, _TMaybeString),
6532 ("src_path", None, _TMaybeString),
6533 ("pnode", None, _TMaybeString),
6534 ("snode", None, _TMaybeString),
6535 ("iallocator", None, _TMaybeString),
6536 ("hypervisor", None, _TMaybeString),
6537 ("disk_template", _NoDefault, _CheckDiskTemplate),
6538 ("identify_defaults", False, _TBool),
6539 ("file_driver", None, _TOr(_TNone, _TElemOf(constants.FILE_DRIVER))),
6540 ("file_storage_dir", None, _TMaybeString),
6544 def CheckArguments(self):
6548 # do not require name_check to ease forward/backward compatibility
6550 if self.op.no_install and self.op.start:
6551 self.LogInfo("No-installation mode selected, disabling startup")
6552 self.op.start = False
6553 # validate/normalize the instance name
6554 self.op.instance_name = \
6555 netutils.Hostname.GetNormalizedName(self.op.instance_name)
6557 if self.op.ip_check and not self.op.name_check:
6558 # TODO: make the ip check more flexible and not depend on the name check
6559 raise errors.OpPrereqError("Cannot do ip check without a name check",
6562 # check nics' parameter names
6563 for nic in self.op.nics:
6564 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6566 # check disks. parameter names and consistent adopt/no-adopt strategy
6567 has_adopt = has_no_adopt = False
6568 for disk in self.op.disks:
6569 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6574 if has_adopt and has_no_adopt:
6575 raise errors.OpPrereqError("Either all disks are adopted or none is",
6578 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
6579 raise errors.OpPrereqError("Disk adoption is not supported for the"
6580 " '%s' disk template" %
6581 self.op.disk_template,
6583 if self.op.iallocator is not None:
6584 raise errors.OpPrereqError("Disk adoption not allowed with an"
6585 " iallocator script", errors.ECODE_INVAL)
6586 if self.op.mode == constants.INSTANCE_IMPORT:
6587 raise errors.OpPrereqError("Disk adoption not allowed for"
6588 " instance import", errors.ECODE_INVAL)
6590 self.adopt_disks = has_adopt
6592 # instance name verification
6593 if self.op.name_check:
6594 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
6595 self.op.instance_name = self.hostname1.name
6596 # used in CheckPrereq for ip ping check
6597 self.check_ip = self.hostname1.ip
6598 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6599 raise errors.OpPrereqError("Remote imports require names to be checked" %
6602 self.check_ip = None
6604 # file storage checks
6605 if (self.op.file_driver and
6606 not self.op.file_driver in constants.FILE_DRIVER):
6607 raise errors.OpPrereqError("Invalid file driver name '%s'" %
6608 self.op.file_driver, errors.ECODE_INVAL)
6610 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6611 raise errors.OpPrereqError("File storage directory path not absolute",
6614 ### Node/iallocator related checks
6615 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
6617 if self.op.pnode is not None:
6618 if self.op.disk_template in constants.DTS_NET_MIRROR:
6619 if self.op.snode is None:
6620 raise errors.OpPrereqError("The networked disk templates need"
6621 " a mirror node", errors.ECODE_INVAL)
6623 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
6625 self.op.snode = None
6627 self._cds = _GetClusterDomainSecret()
6629 if self.op.mode == constants.INSTANCE_IMPORT:
6630 # On import force_variant must be True, because if we forced it at
6631 # initial install, our only chance when importing it back is that it
6633 self.op.force_variant = True
6635 if self.op.no_install:
6636 self.LogInfo("No-installation mode has no effect during import")
6638 elif self.op.mode == constants.INSTANCE_CREATE:
6639 if self.op.os_type is None:
6640 raise errors.OpPrereqError("No guest OS specified",
6642 if self.op.disk_template is None:
6643 raise errors.OpPrereqError("No disk template specified",
6646 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6647 # Check handshake to ensure both clusters have the same domain secret
6648 src_handshake = self.op.source_handshake
6649 if not src_handshake:
6650 raise errors.OpPrereqError("Missing source handshake",
6653 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
6656 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
6659 # Load and check source CA
6660 self.source_x509_ca_pem = self.op.source_x509_ca
6661 if not self.source_x509_ca_pem:
6662 raise errors.OpPrereqError("Missing source X509 CA",
6666 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
6668 except OpenSSL.crypto.Error, err:
6669 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
6670 (err, ), errors.ECODE_INVAL)
6672 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
6673 if errcode is not None:
6674 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
6677 self.source_x509_ca = cert
6679 src_instance_name = self.op.source_instance_name
6680 if not src_instance_name:
6681 raise errors.OpPrereqError("Missing source instance name",
6684 self.source_instance_name = \
6685 netutils.GetHostname(name=src_instance_name).name
6688 raise errors.OpPrereqError("Invalid instance creation mode %r" %
6689 self.op.mode, errors.ECODE_INVAL)
6691 def ExpandNames(self):
6692 """ExpandNames for CreateInstance.
6694 Figure out the right locks for instance creation.
6697 self.needed_locks = {}
6699 instance_name = self.op.instance_name
6700 # this is just a preventive check, but someone might still add this
6701 # instance in the meantime, and creation will fail at lock-add time
6702 if instance_name in self.cfg.GetInstanceList():
6703 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6704 instance_name, errors.ECODE_EXISTS)
6706 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6708 if self.op.iallocator:
6709 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6711 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6712 nodelist = [self.op.pnode]
6713 if self.op.snode is not None:
6714 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6715 nodelist.append(self.op.snode)
6716 self.needed_locks[locking.LEVEL_NODE] = nodelist
6718 # in case of import lock the source node too
6719 if self.op.mode == constants.INSTANCE_IMPORT:
6720 src_node = self.op.src_node
6721 src_path = self.op.src_path
6723 if src_path is None:
6724 self.op.src_path = src_path = self.op.instance_name
6726 if src_node is None:
6727 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6728 self.op.src_node = None
6729 if os.path.isabs(src_path):
6730 raise errors.OpPrereqError("Importing an instance from an absolute"
6731 " path requires a source node option.",
6734 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6735 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6736 self.needed_locks[locking.LEVEL_NODE].append(src_node)
6737 if not os.path.isabs(src_path):
6738 self.op.src_path = src_path = \
6739 utils.PathJoin(constants.EXPORT_DIR, src_path)
6741 def _RunAllocator(self):
6742 """Run the allocator based on input opcode.
6745 nics = [n.ToDict() for n in self.nics]
6746 ial = IAllocator(self.cfg, self.rpc,
6747 mode=constants.IALLOCATOR_MODE_ALLOC,
6748 name=self.op.instance_name,
6749 disk_template=self.op.disk_template,
6752 vcpus=self.be_full[constants.BE_VCPUS],
6753 mem_size=self.be_full[constants.BE_MEMORY],
6756 hypervisor=self.op.hypervisor,
6759 ial.Run(self.op.iallocator)
6762 raise errors.OpPrereqError("Can't compute nodes using"
6763 " iallocator '%s': %s" %
6764 (self.op.iallocator, ial.info),
6766 if len(ial.result) != ial.required_nodes:
6767 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6768 " of nodes (%s), required %s" %
6769 (self.op.iallocator, len(ial.result),
6770 ial.required_nodes), errors.ECODE_FAULT)
6771 self.op.pnode = ial.result[0]
6772 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6773 self.op.instance_name, self.op.iallocator,
6774 utils.CommaJoin(ial.result))
6775 if ial.required_nodes == 2:
6776 self.op.snode = ial.result[1]
6778 def BuildHooksEnv(self):
6781 This runs on master, primary and secondary nodes of the instance.
6785 "ADD_MODE": self.op.mode,
6787 if self.op.mode == constants.INSTANCE_IMPORT:
6788 env["SRC_NODE"] = self.op.src_node
6789 env["SRC_PATH"] = self.op.src_path
6790 env["SRC_IMAGES"] = self.src_images
6792 env.update(_BuildInstanceHookEnv(
6793 name=self.op.instance_name,
6794 primary_node=self.op.pnode,
6795 secondary_nodes=self.secondaries,
6796 status=self.op.start,
6797 os_type=self.op.os_type,
6798 memory=self.be_full[constants.BE_MEMORY],
6799 vcpus=self.be_full[constants.BE_VCPUS],
6800 nics=_NICListToTuple(self, self.nics),
6801 disk_template=self.op.disk_template,
6802 disks=[(d["size"], d["mode"]) for d in self.disks],
6805 hypervisor_name=self.op.hypervisor,
6808 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6812 def _ReadExportInfo(self):
6813 """Reads the export information from disk.
6815 It will override the opcode source node and path with the actual
6816 information, if these two were not specified before.
6818 @return: the export information
6821 assert self.op.mode == constants.INSTANCE_IMPORT
6823 src_node = self.op.src_node
6824 src_path = self.op.src_path
6826 if src_node is None:
6827 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6828 exp_list = self.rpc.call_export_list(locked_nodes)
6830 for node in exp_list:
6831 if exp_list[node].fail_msg:
6833 if src_path in exp_list[node].payload:
6835 self.op.src_node = src_node = node
6836 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6840 raise errors.OpPrereqError("No export found for relative path %s" %
6841 src_path, errors.ECODE_INVAL)
6843 _CheckNodeOnline(self, src_node)
6844 result = self.rpc.call_export_info(src_node, src_path)
6845 result.Raise("No export or invalid export found in dir %s" % src_path)
6847 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6848 if not export_info.has_section(constants.INISECT_EXP):
6849 raise errors.ProgrammerError("Corrupted export config",
6850 errors.ECODE_ENVIRON)
6852 ei_version = export_info.get(constants.INISECT_EXP, "version")
6853 if (int(ei_version) != constants.EXPORT_VERSION):
6854 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6855 (ei_version, constants.EXPORT_VERSION),
6856 errors.ECODE_ENVIRON)
6859 def _ReadExportParams(self, einfo):
6860 """Use export parameters as defaults.
6862 In case the opcode doesn't specify (as in override) some instance
6863 parameters, then try to use them from the export information, if
6867 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
6869 if self.op.disk_template is None:
6870 if einfo.has_option(constants.INISECT_INS, "disk_template"):
6871 self.op.disk_template = einfo.get(constants.INISECT_INS,
6874 raise errors.OpPrereqError("No disk template specified and the export"
6875 " is missing the disk_template information",
6878 if not self.op.disks:
6879 if einfo.has_option(constants.INISECT_INS, "disk_count"):
6881 # TODO: import the disk iv_name too
6882 for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
6883 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
6884 disks.append({"size": disk_sz})
6885 self.op.disks = disks
6887 raise errors.OpPrereqError("No disk info specified and the export"
6888 " is missing the disk information",
6891 if (not self.op.nics and
6892 einfo.has_option(constants.INISECT_INS, "nic_count")):
6894 for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
6896 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
6897 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
6902 if (self.op.hypervisor is None and
6903 einfo.has_option(constants.INISECT_INS, "hypervisor")):
6904 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
6905 if einfo.has_section(constants.INISECT_HYP):
6906 # use the export parameters but do not override the ones
6907 # specified by the user
6908 for name, value in einfo.items(constants.INISECT_HYP):
6909 if name not in self.op.hvparams:
6910 self.op.hvparams[name] = value
6912 if einfo.has_section(constants.INISECT_BEP):
6913 # use the parameters, without overriding
6914 for name, value in einfo.items(constants.INISECT_BEP):
6915 if name not in self.op.beparams:
6916 self.op.beparams[name] = value
6918 # try to read the parameters old style, from the main section
6919 for name in constants.BES_PARAMETERS:
6920 if (name not in self.op.beparams and
6921 einfo.has_option(constants.INISECT_INS, name)):
6922 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
6924 if einfo.has_section(constants.INISECT_OSP):
6925 # use the parameters, without overriding
6926 for name, value in einfo.items(constants.INISECT_OSP):
6927 if name not in self.op.osparams:
6928 self.op.osparams[name] = value
6930 def _RevertToDefaults(self, cluster):
6931 """Revert the instance parameters to the default values.
6935 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
6936 for name in self.op.hvparams.keys():
6937 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
6938 del self.op.hvparams[name]
6940 be_defs = cluster.SimpleFillBE({})
6941 for name in self.op.beparams.keys():
6942 if name in be_defs and be_defs[name] == self.op.beparams[name]:
6943 del self.op.beparams[name]
6945 nic_defs = cluster.SimpleFillNIC({})
6946 for nic in self.op.nics:
6947 for name in constants.NICS_PARAMETERS:
6948 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
6951 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
6952 for name in self.op.osparams.keys():
6953 if name in os_defs and os_defs[name] == self.op.osparams[name]:
6954 del self.op.osparams[name]
6956 def CheckPrereq(self):
6957 """Check prerequisites.
6960 if self.op.mode == constants.INSTANCE_IMPORT:
6961 export_info = self._ReadExportInfo()
6962 self._ReadExportParams(export_info)
6964 _CheckDiskTemplate(self.op.disk_template)
6966 if (not self.cfg.GetVGName() and
6967 self.op.disk_template not in constants.DTS_NOT_LVM):
6968 raise errors.OpPrereqError("Cluster does not support lvm-based"
6969 " instances", errors.ECODE_STATE)
6971 if self.op.hypervisor is None:
6972 self.op.hypervisor = self.cfg.GetHypervisorType()
6974 cluster = self.cfg.GetClusterInfo()
6975 enabled_hvs = cluster.enabled_hypervisors
6976 if self.op.hypervisor not in enabled_hvs:
6977 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6978 " cluster (%s)" % (self.op.hypervisor,
6979 ",".join(enabled_hvs)),
6982 # check hypervisor parameter syntax (locally)
6983 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6984 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
6986 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6987 hv_type.CheckParameterSyntax(filled_hvp)
6988 self.hv_full = filled_hvp
6989 # check that we don't specify global parameters on an instance
6990 _CheckGlobalHvParams(self.op.hvparams)
6992 # fill and remember the beparams dict
6993 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6994 self.be_full = cluster.SimpleFillBE(self.op.beparams)
6996 # build os parameters
6997 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
6999 # now that hvp/bep are in final format, let's reset to defaults,
7001 if self.op.identify_defaults:
7002 self._RevertToDefaults(cluster)
7006 for idx, nic in enumerate(self.op.nics):
7007 nic_mode_req = nic.get("mode", None)
7008 nic_mode = nic_mode_req
7009 if nic_mode is None:
7010 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7012 # in routed mode, for the first nic, the default ip is 'auto'
7013 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7014 default_ip_mode = constants.VALUE_AUTO
7016 default_ip_mode = constants.VALUE_NONE
7018 # ip validity checks
7019 ip = nic.get("ip", default_ip_mode)
7020 if ip is None or ip.lower() == constants.VALUE_NONE:
7022 elif ip.lower() == constants.VALUE_AUTO:
7023 if not self.op.name_check:
7024 raise errors.OpPrereqError("IP address set to auto but name checks"
7025 " have been skipped",
7027 nic_ip = self.hostname1.ip
7029 if not netutils.IPAddress.IsValid(ip):
7030 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7034 # TODO: check the ip address for uniqueness
7035 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7036 raise errors.OpPrereqError("Routed nic mode requires an ip address",
7039 # MAC address verification
7040 mac = nic.get("mac", constants.VALUE_AUTO)
7041 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7042 mac = utils.NormalizeAndValidateMac(mac)
7045 self.cfg.ReserveMAC(mac, self.proc.GetECId())
7046 except errors.ReservationError:
7047 raise errors.OpPrereqError("MAC address %s already in use"
7048 " in cluster" % mac,
7049 errors.ECODE_NOTUNIQUE)
7051 # bridge verification
7052 bridge = nic.get("bridge", None)
7053 link = nic.get("link", None)
7055 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7056 " at the same time", errors.ECODE_INVAL)
7057 elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7058 raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7065 nicparams[constants.NIC_MODE] = nic_mode_req
7067 nicparams[constants.NIC_LINK] = link
7069 check_params = cluster.SimpleFillNIC(nicparams)
7070 objects.NIC.CheckParameterSyntax(check_params)
7071 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7073 # disk checks/pre-build
7075 for disk in self.op.disks:
7076 mode = disk.get("mode", constants.DISK_RDWR)
7077 if mode not in constants.DISK_ACCESS_SET:
7078 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7079 mode, errors.ECODE_INVAL)
7080 size = disk.get("size", None)
7082 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7085 except (TypeError, ValueError):
7086 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7088 new_disk = {"size": size, "mode": mode}
7090 new_disk["adopt"] = disk["adopt"]
7091 self.disks.append(new_disk)
7093 if self.op.mode == constants.INSTANCE_IMPORT:
7095 # Check that the new instance doesn't have less disks than the export
7096 instance_disks = len(self.disks)
7097 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7098 if instance_disks < export_disks:
7099 raise errors.OpPrereqError("Not enough disks to import."
7100 " (instance: %d, export: %d)" %
7101 (instance_disks, export_disks),
7105 for idx in range(export_disks):
7106 option = 'disk%d_dump' % idx
7107 if export_info.has_option(constants.INISECT_INS, option):
7108 # FIXME: are the old os-es, disk sizes, etc. useful?
7109 export_name = export_info.get(constants.INISECT_INS, option)
7110 image = utils.PathJoin(self.op.src_path, export_name)
7111 disk_images.append(image)
7113 disk_images.append(False)
7115 self.src_images = disk_images
7117 old_name = export_info.get(constants.INISECT_INS, 'name')
7119 exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7120 except (TypeError, ValueError), err:
7121 raise errors.OpPrereqError("Invalid export file, nic_count is not"
7122 " an integer: %s" % str(err),
7124 if self.op.instance_name == old_name:
7125 for idx, nic in enumerate(self.nics):
7126 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7127 nic_mac_ini = 'nic%d_mac' % idx
7128 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7130 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7132 # ip ping checks (we use the same ip that was resolved in ExpandNames)
7133 if self.op.ip_check:
7134 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7135 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7136 (self.check_ip, self.op.instance_name),
7137 errors.ECODE_NOTUNIQUE)
7139 #### mac address generation
7140 # By generating here the mac address both the allocator and the hooks get
7141 # the real final mac address rather than the 'auto' or 'generate' value.
7142 # There is a race condition between the generation and the instance object
7143 # creation, which means that we know the mac is valid now, but we're not
7144 # sure it will be when we actually add the instance. If things go bad
7145 # adding the instance will abort because of a duplicate mac, and the
7146 # creation job will fail.
7147 for nic in self.nics:
7148 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7149 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7153 if self.op.iallocator is not None:
7154 self._RunAllocator()
7156 #### node related checks
7158 # check primary node
7159 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7160 assert self.pnode is not None, \
7161 "Cannot retrieve locked node %s" % self.op.pnode
7163 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7164 pnode.name, errors.ECODE_STATE)
7166 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7167 pnode.name, errors.ECODE_STATE)
7169 self.secondaries = []
7171 # mirror node verification
7172 if self.op.disk_template in constants.DTS_NET_MIRROR:
7173 if self.op.snode == pnode.name:
7174 raise errors.OpPrereqError("The secondary node cannot be the"
7175 " primary node.", errors.ECODE_INVAL)
7176 _CheckNodeOnline(self, self.op.snode)
7177 _CheckNodeNotDrained(self, self.op.snode)
7178 self.secondaries.append(self.op.snode)
7180 nodenames = [pnode.name] + self.secondaries
7182 req_size = _ComputeDiskSize(self.op.disk_template,
7185 # Check lv size requirements, if not adopting
7186 if req_size is not None and not self.adopt_disks:
7187 _CheckNodesFreeDisk(self, nodenames, req_size)
7189 if self.adopt_disks: # instead, we must check the adoption data
7190 all_lvs = set([i["adopt"] for i in self.disks])
7191 if len(all_lvs) != len(self.disks):
7192 raise errors.OpPrereqError("Duplicate volume names given for adoption",
7194 for lv_name in all_lvs:
7196 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7197 except errors.ReservationError:
7198 raise errors.OpPrereqError("LV named %s used by another instance" %
7199 lv_name, errors.ECODE_NOTUNIQUE)
7201 node_lvs = self.rpc.call_lv_list([pnode.name],
7202 self.cfg.GetVGName())[pnode.name]
7203 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7204 node_lvs = node_lvs.payload
7205 delta = all_lvs.difference(node_lvs.keys())
7207 raise errors.OpPrereqError("Missing logical volume(s): %s" %
7208 utils.CommaJoin(delta),
7210 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7212 raise errors.OpPrereqError("Online logical volumes found, cannot"
7213 " adopt: %s" % utils.CommaJoin(online_lvs),
7215 # update the size of disk based on what is found
7216 for dsk in self.disks:
7217 dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
7219 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7221 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7222 # check OS parameters (remotely)
7223 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7225 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7227 # memory check on primary node
7229 _CheckNodeFreeMemory(self, self.pnode.name,
7230 "creating instance %s" % self.op.instance_name,
7231 self.be_full[constants.BE_MEMORY],
7234 self.dry_run_result = list(nodenames)
7236 def Exec(self, feedback_fn):
7237 """Create and add the instance to the cluster.
7240 instance = self.op.instance_name
7241 pnode_name = self.pnode.name
7243 ht_kind = self.op.hypervisor
7244 if ht_kind in constants.HTS_REQ_PORT:
7245 network_port = self.cfg.AllocatePort()
7249 if constants.ENABLE_FILE_STORAGE:
7250 # this is needed because os.path.join does not accept None arguments
7251 if self.op.file_storage_dir is None:
7252 string_file_storage_dir = ""
7254 string_file_storage_dir = self.op.file_storage_dir
7256 # build the full file storage dir path
7257 file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7258 string_file_storage_dir, instance)
7260 file_storage_dir = ""
7262 disks = _GenerateDiskTemplate(self,
7263 self.op.disk_template,
7264 instance, pnode_name,
7268 self.op.file_driver,
7271 iobj = objects.Instance(name=instance, os=self.op.os_type,
7272 primary_node=pnode_name,
7273 nics=self.nics, disks=disks,
7274 disk_template=self.op.disk_template,
7276 network_port=network_port,
7277 beparams=self.op.beparams,
7278 hvparams=self.op.hvparams,
7279 hypervisor=self.op.hypervisor,
7280 osparams=self.op.osparams,
7283 if self.adopt_disks:
7284 # rename LVs to the newly-generated names; we need to construct
7285 # 'fake' LV disks with the old data, plus the new unique_id
7286 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7288 for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7289 rename_to.append(t_dsk.logical_id)
7290 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7291 self.cfg.SetDiskID(t_dsk, pnode_name)
7292 result = self.rpc.call_blockdev_rename(pnode_name,
7293 zip(tmp_disks, rename_to))
7294 result.Raise("Failed to rename adoped LVs")
7296 feedback_fn("* creating instance disks...")
7298 _CreateDisks(self, iobj)
7299 except errors.OpExecError:
7300 self.LogWarning("Device creation failed, reverting...")
7302 _RemoveDisks(self, iobj)
7304 self.cfg.ReleaseDRBDMinors(instance)
7307 feedback_fn("adding instance %s to cluster config" % instance)
7309 self.cfg.AddInstance(iobj, self.proc.GetECId())
7311 # Declare that we don't want to remove the instance lock anymore, as we've
7312 # added the instance to the config
7313 del self.remove_locks[locking.LEVEL_INSTANCE]
7314 # Unlock all the nodes
7315 if self.op.mode == constants.INSTANCE_IMPORT:
7316 nodes_keep = [self.op.src_node]
7317 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7318 if node != self.op.src_node]
7319 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7320 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7322 self.context.glm.release(locking.LEVEL_NODE)
7323 del self.acquired_locks[locking.LEVEL_NODE]
7325 if self.op.wait_for_sync:
7326 disk_abort = not _WaitForSync(self, iobj)
7327 elif iobj.disk_template in constants.DTS_NET_MIRROR:
7328 # make sure the disks are not degraded (still sync-ing is ok)
7330 feedback_fn("* checking mirrors status")
7331 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7336 _RemoveDisks(self, iobj)
7337 self.cfg.RemoveInstance(iobj.name)
7338 # Make sure the instance lock gets removed
7339 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7340 raise errors.OpExecError("There are some degraded disks for"
7343 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7344 if self.op.mode == constants.INSTANCE_CREATE:
7345 if not self.op.no_install:
7346 feedback_fn("* running the instance OS create scripts...")
7347 # FIXME: pass debug option from opcode to backend
7348 result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7349 self.op.debug_level)
7350 result.Raise("Could not add os for instance %s"
7351 " on node %s" % (instance, pnode_name))
7353 elif self.op.mode == constants.INSTANCE_IMPORT:
7354 feedback_fn("* running the instance OS import scripts...")
7358 for idx, image in enumerate(self.src_images):
7362 # FIXME: pass debug option from opcode to backend
7363 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7364 constants.IEIO_FILE, (image, ),
7365 constants.IEIO_SCRIPT,
7366 (iobj.disks[idx], idx),
7368 transfers.append(dt)
7371 masterd.instance.TransferInstanceData(self, feedback_fn,
7372 self.op.src_node, pnode_name,
7373 self.pnode.secondary_ip,
7375 if not compat.all(import_result):
7376 self.LogWarning("Some disks for instance %s on node %s were not"
7377 " imported successfully" % (instance, pnode_name))
7379 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7380 feedback_fn("* preparing remote import...")
7381 connect_timeout = constants.RIE_CONNECT_TIMEOUT
7382 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7384 disk_results = masterd.instance.RemoteImport(self, feedback_fn, iobj,
7385 self.source_x509_ca,
7386 self._cds, timeouts)
7387 if not compat.all(disk_results):
7388 # TODO: Should the instance still be started, even if some disks
7389 # failed to import (valid for local imports, too)?
7390 self.LogWarning("Some disks for instance %s on node %s were not"
7391 " imported successfully" % (instance, pnode_name))
7393 # Run rename script on newly imported instance
7394 assert iobj.name == instance
7395 feedback_fn("Running rename script for %s" % instance)
7396 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7397 self.source_instance_name,
7398 self.op.debug_level)
7400 self.LogWarning("Failed to run rename script for %s on node"
7401 " %s: %s" % (instance, pnode_name, result.fail_msg))
7404 # also checked in the prereq part
7405 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7409 iobj.admin_up = True
7410 self.cfg.Update(iobj, feedback_fn)
7411 logging.info("Starting instance %s on node %s", instance, pnode_name)
7412 feedback_fn("* starting instance...")
7413 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7414 result.Raise("Could not start instance")
7416 return list(iobj.all_nodes)
7419 class LUConnectConsole(NoHooksLU):
7420 """Connect to an instance's console.
7422 This is somewhat special in that it returns the command line that
7423 you need to run on the master node in order to connect to the
7432 def ExpandNames(self):
7433 self._ExpandAndLockInstance()
7435 def CheckPrereq(self):
7436 """Check prerequisites.
7438 This checks that the instance is in the cluster.
7441 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7442 assert self.instance is not None, \
7443 "Cannot retrieve locked instance %s" % self.op.instance_name
7444 _CheckNodeOnline(self, self.instance.primary_node)
7446 def Exec(self, feedback_fn):
7447 """Connect to the console of an instance
7450 instance = self.instance
7451 node = instance.primary_node
7453 node_insts = self.rpc.call_instance_list([node],
7454 [instance.hypervisor])[node]
7455 node_insts.Raise("Can't get node information from %s" % node)
7457 if instance.name not in node_insts.payload:
7458 raise errors.OpExecError("Instance %s is not running." % instance.name)
7460 logging.debug("Connecting to console of %s on %s", instance.name, node)
7462 hyper = hypervisor.GetHypervisor(instance.hypervisor)
7463 cluster = self.cfg.GetClusterInfo()
7464 # beparams and hvparams are passed separately, to avoid editing the
7465 # instance and then saving the defaults in the instance itself.
7466 hvparams = cluster.FillHV(instance)
7467 beparams = cluster.FillBE(instance)
7468 console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
7471 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
7474 class LUReplaceDisks(LogicalUnit):
7475 """Replace the disks of an instance.
7478 HPATH = "mirrors-replace"
7479 HTYPE = constants.HTYPE_INSTANCE
7482 ("mode", _NoDefault, _TElemOf(constants.REPLACE_MODES)),
7483 ("disks", _EmptyList, _TListOf(_TPositiveInt)),
7484 ("remote_node", None, _TMaybeString),
7485 ("iallocator", None, _TMaybeString),
7486 ("early_release", False, _TBool),
7490 def CheckArguments(self):
7491 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7494 def ExpandNames(self):
7495 self._ExpandAndLockInstance()
7497 if self.op.iallocator is not None:
7498 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7500 elif self.op.remote_node is not None:
7501 remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7502 self.op.remote_node = remote_node
7504 # Warning: do not remove the locking of the new secondary here
7505 # unless DRBD8.AddChildren is changed to work in parallel;
7506 # currently it doesn't since parallel invocations of
7507 # FindUnusedMinor will conflict
7508 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7509 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7512 self.needed_locks[locking.LEVEL_NODE] = []
7513 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7515 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7516 self.op.iallocator, self.op.remote_node,
7517 self.op.disks, False, self.op.early_release)
7519 self.tasklets = [self.replacer]
7521 def DeclareLocks(self, level):
7522 # If we're not already locking all nodes in the set we have to declare the
7523 # instance's primary/secondary nodes.
7524 if (level == locking.LEVEL_NODE and
7525 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7526 self._LockInstancesNodes()
7528 def BuildHooksEnv(self):
7531 This runs on the master, the primary and all the secondaries.
7534 instance = self.replacer.instance
7536 "MODE": self.op.mode,
7537 "NEW_SECONDARY": self.op.remote_node,
7538 "OLD_SECONDARY": instance.secondary_nodes[0],
7540 env.update(_BuildInstanceHookEnvByObject(self, instance))
7542 self.cfg.GetMasterNode(),
7543 instance.primary_node,
7545 if self.op.remote_node is not None:
7546 nl.append(self.op.remote_node)
7550 class TLReplaceDisks(Tasklet):
7551 """Replaces disks for an instance.
7553 Note: Locking is not within the scope of this class.
7556 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7557 disks, delay_iallocator, early_release):
7558 """Initializes this class.
7561 Tasklet.__init__(self, lu)
7564 self.instance_name = instance_name
7566 self.iallocator_name = iallocator_name
7567 self.remote_node = remote_node
7569 self.delay_iallocator = delay_iallocator
7570 self.early_release = early_release
7573 self.instance = None
7574 self.new_node = None
7575 self.target_node = None
7576 self.other_node = None
7577 self.remote_node_info = None
7578 self.node_secondary_ip = None
7581 def CheckArguments(mode, remote_node, iallocator):
7582 """Helper function for users of this class.
7585 # check for valid parameter combination
7586 if mode == constants.REPLACE_DISK_CHG:
7587 if remote_node is None and iallocator is None:
7588 raise errors.OpPrereqError("When changing the secondary either an"
7589 " iallocator script must be used or the"
7590 " new node given", errors.ECODE_INVAL)
7592 if remote_node is not None and iallocator is not None:
7593 raise errors.OpPrereqError("Give either the iallocator or the new"
7594 " secondary, not both", errors.ECODE_INVAL)
7596 elif remote_node is not None or iallocator is not None:
7597 # Not replacing the secondary
7598 raise errors.OpPrereqError("The iallocator and new node options can"
7599 " only be used when changing the"
7600 " secondary node", errors.ECODE_INVAL)
7603 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7604 """Compute a new secondary node using an IAllocator.
7607 ial = IAllocator(lu.cfg, lu.rpc,
7608 mode=constants.IALLOCATOR_MODE_RELOC,
7610 relocate_from=relocate_from)
7612 ial.Run(iallocator_name)
7615 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7616 " %s" % (iallocator_name, ial.info),
7619 if len(ial.result) != ial.required_nodes:
7620 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7621 " of nodes (%s), required %s" %
7623 len(ial.result), ial.required_nodes),
7626 remote_node_name = ial.result[0]
7628 lu.LogInfo("Selected new secondary for instance '%s': %s",
7629 instance_name, remote_node_name)
7631 return remote_node_name
7633 def _FindFaultyDisks(self, node_name):
7634 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7637 def CheckPrereq(self):
7638 """Check prerequisites.
7640 This checks that the instance is in the cluster.
7643 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7644 assert instance is not None, \
7645 "Cannot retrieve locked instance %s" % self.instance_name
7647 if instance.disk_template != constants.DT_DRBD8:
7648 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7649 " instances", errors.ECODE_INVAL)
7651 if len(instance.secondary_nodes) != 1:
7652 raise errors.OpPrereqError("The instance has a strange layout,"
7653 " expected one secondary but found %d" %
7654 len(instance.secondary_nodes),
7657 if not self.delay_iallocator:
7658 self._CheckPrereq2()
7660 def _CheckPrereq2(self):
7661 """Check prerequisites, second part.
7663 This function should always be part of CheckPrereq. It was separated and is
7664 now called from Exec because during node evacuation iallocator was only
7665 called with an unmodified cluster model, not taking planned changes into
7669 instance = self.instance
7670 secondary_node = instance.secondary_nodes[0]
7672 if self.iallocator_name is None:
7673 remote_node = self.remote_node
7675 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7676 instance.name, instance.secondary_nodes)
7678 if remote_node is not None:
7679 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7680 assert self.remote_node_info is not None, \
7681 "Cannot retrieve locked node %s" % remote_node
7683 self.remote_node_info = None
7685 if remote_node == self.instance.primary_node:
7686 raise errors.OpPrereqError("The specified node is the primary node of"
7687 " the instance.", errors.ECODE_INVAL)
7689 if remote_node == secondary_node:
7690 raise errors.OpPrereqError("The specified node is already the"
7691 " secondary node of the instance.",
7694 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7695 constants.REPLACE_DISK_CHG):
7696 raise errors.OpPrereqError("Cannot specify disks to be replaced",
7699 if self.mode == constants.REPLACE_DISK_AUTO:
7700 faulty_primary = self._FindFaultyDisks(instance.primary_node)
7701 faulty_secondary = self._FindFaultyDisks(secondary_node)
7703 if faulty_primary and faulty_secondary:
7704 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7705 " one node and can not be repaired"
7706 " automatically" % self.instance_name,
7710 self.disks = faulty_primary
7711 self.target_node = instance.primary_node
7712 self.other_node = secondary_node
7713 check_nodes = [self.target_node, self.other_node]
7714 elif faulty_secondary:
7715 self.disks = faulty_secondary
7716 self.target_node = secondary_node
7717 self.other_node = instance.primary_node
7718 check_nodes = [self.target_node, self.other_node]
7724 # Non-automatic modes
7725 if self.mode == constants.REPLACE_DISK_PRI:
7726 self.target_node = instance.primary_node
7727 self.other_node = secondary_node
7728 check_nodes = [self.target_node, self.other_node]
7730 elif self.mode == constants.REPLACE_DISK_SEC:
7731 self.target_node = secondary_node
7732 self.other_node = instance.primary_node
7733 check_nodes = [self.target_node, self.other_node]
7735 elif self.mode == constants.REPLACE_DISK_CHG:
7736 self.new_node = remote_node
7737 self.other_node = instance.primary_node
7738 self.target_node = secondary_node
7739 check_nodes = [self.new_node, self.other_node]
7741 _CheckNodeNotDrained(self.lu, remote_node)
7743 old_node_info = self.cfg.GetNodeInfo(secondary_node)
7744 assert old_node_info is not None
7745 if old_node_info.offline and not self.early_release:
7746 # doesn't make sense to delay the release
7747 self.early_release = True
7748 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7749 " early-release mode", secondary_node)
7752 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7755 # If not specified all disks should be replaced
7757 self.disks = range(len(self.instance.disks))
7759 for node in check_nodes:
7760 _CheckNodeOnline(self.lu, node)
7762 # Check whether disks are valid
7763 for disk_idx in self.disks:
7764 instance.FindDisk(disk_idx)
7766 # Get secondary node IP addresses
7769 for node_name in [self.target_node, self.other_node, self.new_node]:
7770 if node_name is not None:
7771 node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7773 self.node_secondary_ip = node_2nd_ip
7775 def Exec(self, feedback_fn):
7776 """Execute disk replacement.
7778 This dispatches the disk replacement to the appropriate handler.
7781 if self.delay_iallocator:
7782 self._CheckPrereq2()
7785 feedback_fn("No disks need replacement")
7788 feedback_fn("Replacing disk(s) %s for %s" %
7789 (utils.CommaJoin(self.disks), self.instance.name))
7791 activate_disks = (not self.instance.admin_up)
7793 # Activate the instance disks if we're replacing them on a down instance
7795 _StartInstanceDisks(self.lu, self.instance, True)
7798 # Should we replace the secondary node?
7799 if self.new_node is not None:
7800 fn = self._ExecDrbd8Secondary
7802 fn = self._ExecDrbd8DiskOnly
7804 return fn(feedback_fn)
7807 # Deactivate the instance disks if we're replacing them on a
7810 _SafeShutdownInstanceDisks(self.lu, self.instance)
7812 def _CheckVolumeGroup(self, nodes):
7813 self.lu.LogInfo("Checking volume groups")
7815 vgname = self.cfg.GetVGName()
7817 # Make sure volume group exists on all involved nodes
7818 results = self.rpc.call_vg_list(nodes)
7820 raise errors.OpExecError("Can't list volume groups on the nodes")
7824 res.Raise("Error checking node %s" % node)
7825 if vgname not in res.payload:
7826 raise errors.OpExecError("Volume group '%s' not found on node %s" %
7829 def _CheckDisksExistence(self, nodes):
7830 # Check disk existence
7831 for idx, dev in enumerate(self.instance.disks):
7832 if idx not in self.disks:
7836 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7837 self.cfg.SetDiskID(dev, node)
7839 result = self.rpc.call_blockdev_find(node, dev)
7841 msg = result.fail_msg
7842 if msg or not result.payload:
7844 msg = "disk not found"
7845 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7848 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7849 for idx, dev in enumerate(self.instance.disks):
7850 if idx not in self.disks:
7853 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7856 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7858 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7859 " replace disks for instance %s" %
7860 (node_name, self.instance.name))
7862 def _CreateNewStorage(self, node_name):
7863 vgname = self.cfg.GetVGName()
7866 for idx, dev in enumerate(self.instance.disks):
7867 if idx not in self.disks:
7870 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7872 self.cfg.SetDiskID(dev, node_name)
7874 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7875 names = _GenerateUniqueNames(self.lu, lv_names)
7877 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7878 logical_id=(vgname, names[0]))
7879 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7880 logical_id=(vgname, names[1]))
7882 new_lvs = [lv_data, lv_meta]
7883 old_lvs = dev.children
7884 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7886 # we pass force_create=True to force the LVM creation
7887 for new_lv in new_lvs:
7888 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7889 _GetInstanceInfoText(self.instance), False)
7893 def _CheckDevices(self, node_name, iv_names):
7894 for name, (dev, _, _) in iv_names.iteritems():
7895 self.cfg.SetDiskID(dev, node_name)
7897 result = self.rpc.call_blockdev_find(node_name, dev)
7899 msg = result.fail_msg
7900 if msg or not result.payload:
7902 msg = "disk not found"
7903 raise errors.OpExecError("Can't find DRBD device %s: %s" %
7906 if result.payload.is_degraded:
7907 raise errors.OpExecError("DRBD device %s is degraded!" % name)
7909 def _RemoveOldStorage(self, node_name, iv_names):
7910 for name, (_, old_lvs, _) in iv_names.iteritems():
7911 self.lu.LogInfo("Remove logical volumes for %s" % name)
7914 self.cfg.SetDiskID(lv, node_name)
7916 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7918 self.lu.LogWarning("Can't remove old LV: %s" % msg,
7919 hint="remove unused LVs manually")
7921 def _ReleaseNodeLock(self, node_name):
7922 """Releases the lock for a given node."""
7923 self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7925 def _ExecDrbd8DiskOnly(self, feedback_fn):
7926 """Replace a disk on the primary or secondary for DRBD 8.
7928 The algorithm for replace is quite complicated:
7930 1. for each disk to be replaced:
7932 1. create new LVs on the target node with unique names
7933 1. detach old LVs from the drbd device
7934 1. rename old LVs to name_replaced.<time_t>
7935 1. rename new LVs to old LVs
7936 1. attach the new LVs (with the old names now) to the drbd device
7938 1. wait for sync across all devices
7940 1. for each modified disk:
7942 1. remove old LVs (which have the name name_replaces.<time_t>)
7944 Failures are not very well handled.
7949 # Step: check device activation
7950 self.lu.LogStep(1, steps_total, "Check device existence")
7951 self._CheckDisksExistence([self.other_node, self.target_node])
7952 self._CheckVolumeGroup([self.target_node, self.other_node])
7954 # Step: check other node consistency
7955 self.lu.LogStep(2, steps_total, "Check peer consistency")
7956 self._CheckDisksConsistency(self.other_node,
7957 self.other_node == self.instance.primary_node,
7960 # Step: create new storage
7961 self.lu.LogStep(3, steps_total, "Allocate new storage")
7962 iv_names = self._CreateNewStorage(self.target_node)
7964 # Step: for each lv, detach+rename*2+attach
7965 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7966 for dev, old_lvs, new_lvs in iv_names.itervalues():
7967 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7969 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7971 result.Raise("Can't detach drbd from local storage on node"
7972 " %s for device %s" % (self.target_node, dev.iv_name))
7974 #cfg.Update(instance)
7976 # ok, we created the new LVs, so now we know we have the needed
7977 # storage; as such, we proceed on the target node to rename
7978 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7979 # using the assumption that logical_id == physical_id (which in
7980 # turn is the unique_id on that node)
7982 # FIXME(iustin): use a better name for the replaced LVs
7983 temp_suffix = int(time.time())
7984 ren_fn = lambda d, suff: (d.physical_id[0],
7985 d.physical_id[1] + "_replaced-%s" % suff)
7987 # Build the rename list based on what LVs exist on the node
7988 rename_old_to_new = []
7989 for to_ren in old_lvs:
7990 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7991 if not result.fail_msg and result.payload:
7993 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7995 self.lu.LogInfo("Renaming the old LVs on the target node")
7996 result = self.rpc.call_blockdev_rename(self.target_node,
7998 result.Raise("Can't rename old LVs on node %s" % self.target_node)
8000 # Now we rename the new LVs to the old LVs
8001 self.lu.LogInfo("Renaming the new LVs on the target node")
8002 rename_new_to_old = [(new, old.physical_id)
8003 for old, new in zip(old_lvs, new_lvs)]
8004 result = self.rpc.call_blockdev_rename(self.target_node,
8006 result.Raise("Can't rename new LVs on node %s" % self.target_node)
8008 for old, new in zip(old_lvs, new_lvs):
8009 new.logical_id = old.logical_id
8010 self.cfg.SetDiskID(new, self.target_node)
8012 for disk in old_lvs:
8013 disk.logical_id = ren_fn(disk, temp_suffix)
8014 self.cfg.SetDiskID(disk, self.target_node)
8016 # Now that the new lvs have the old name, we can add them to the device
8017 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8018 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8020 msg = result.fail_msg
8022 for new_lv in new_lvs:
8023 msg2 = self.rpc.call_blockdev_remove(self.target_node,
8026 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8027 hint=("cleanup manually the unused logical"
8029 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8031 dev.children = new_lvs
8033 self.cfg.Update(self.instance, feedback_fn)
8036 if self.early_release:
8037 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8039 self._RemoveOldStorage(self.target_node, iv_names)
8040 # WARNING: we release both node locks here, do not do other RPCs
8041 # than WaitForSync to the primary node
8042 self._ReleaseNodeLock([self.target_node, self.other_node])
8045 # This can fail as the old devices are degraded and _WaitForSync
8046 # does a combined result over all disks, so we don't check its return value
8047 self.lu.LogStep(cstep, steps_total, "Sync devices")
8049 _WaitForSync(self.lu, self.instance)
8051 # Check all devices manually
8052 self._CheckDevices(self.instance.primary_node, iv_names)
8054 # Step: remove old storage
8055 if not self.early_release:
8056 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8058 self._RemoveOldStorage(self.target_node, iv_names)
8060 def _ExecDrbd8Secondary(self, feedback_fn):
8061 """Replace the secondary node for DRBD 8.
8063 The algorithm for replace is quite complicated:
8064 - for all disks of the instance:
8065 - create new LVs on the new node with same names
8066 - shutdown the drbd device on the old secondary
8067 - disconnect the drbd network on the primary
8068 - create the drbd device on the new secondary
8069 - network attach the drbd on the primary, using an artifice:
8070 the drbd code for Attach() will connect to the network if it
8071 finds a device which is connected to the good local disks but
8073 - wait for sync across all devices
8074 - remove all disks from the old secondary
8076 Failures are not very well handled.
8081 # Step: check device activation
8082 self.lu.LogStep(1, steps_total, "Check device existence")
8083 self._CheckDisksExistence([self.instance.primary_node])
8084 self._CheckVolumeGroup([self.instance.primary_node])
8086 # Step: check other node consistency
8087 self.lu.LogStep(2, steps_total, "Check peer consistency")
8088 self._CheckDisksConsistency(self.instance.primary_node, True, True)
8090 # Step: create new storage
8091 self.lu.LogStep(3, steps_total, "Allocate new storage")
8092 for idx, dev in enumerate(self.instance.disks):
8093 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8094 (self.new_node, idx))
8095 # we pass force_create=True to force LVM creation
8096 for new_lv in dev.children:
8097 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8098 _GetInstanceInfoText(self.instance), False)
8100 # Step 4: dbrd minors and drbd setups changes
8101 # after this, we must manually remove the drbd minors on both the
8102 # error and the success paths
8103 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8104 minors = self.cfg.AllocateDRBDMinor([self.new_node
8105 for dev in self.instance.disks],
8107 logging.debug("Allocated minors %r", minors)
8110 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8111 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8112 (self.new_node, idx))
8113 # create new devices on new_node; note that we create two IDs:
8114 # one without port, so the drbd will be activated without
8115 # networking information on the new node at this stage, and one
8116 # with network, for the latter activation in step 4
8117 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8118 if self.instance.primary_node == o_node1:
8121 assert self.instance.primary_node == o_node2, "Three-node instance?"
8124 new_alone_id = (self.instance.primary_node, self.new_node, None,
8125 p_minor, new_minor, o_secret)
8126 new_net_id = (self.instance.primary_node, self.new_node, o_port,
8127 p_minor, new_minor, o_secret)
8129 iv_names[idx] = (dev, dev.children, new_net_id)
8130 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8132 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8133 logical_id=new_alone_id,
8134 children=dev.children,
8137 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8138 _GetInstanceInfoText(self.instance), False)
8139 except errors.GenericError:
8140 self.cfg.ReleaseDRBDMinors(self.instance.name)
8143 # We have new devices, shutdown the drbd on the old secondary
8144 for idx, dev in enumerate(self.instance.disks):
8145 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8146 self.cfg.SetDiskID(dev, self.target_node)
8147 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8149 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8150 "node: %s" % (idx, msg),
8151 hint=("Please cleanup this device manually as"
8152 " soon as possible"))
8154 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8155 result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8156 self.node_secondary_ip,
8157 self.instance.disks)\
8158 [self.instance.primary_node]
8160 msg = result.fail_msg
8162 # detaches didn't succeed (unlikely)
8163 self.cfg.ReleaseDRBDMinors(self.instance.name)
8164 raise errors.OpExecError("Can't detach the disks from the network on"
8165 " old node: %s" % (msg,))
8167 # if we managed to detach at least one, we update all the disks of
8168 # the instance to point to the new secondary
8169 self.lu.LogInfo("Updating instance configuration")
8170 for dev, _, new_logical_id in iv_names.itervalues():
8171 dev.logical_id = new_logical_id
8172 self.cfg.SetDiskID(dev, self.instance.primary_node)
8174 self.cfg.Update(self.instance, feedback_fn)
8176 # and now perform the drbd attach
8177 self.lu.LogInfo("Attaching primary drbds to new secondary"
8178 " (standalone => connected)")
8179 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8181 self.node_secondary_ip,
8182 self.instance.disks,
8185 for to_node, to_result in result.items():
8186 msg = to_result.fail_msg
8188 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8190 hint=("please do a gnt-instance info to see the"
8191 " status of disks"))
8193 if self.early_release:
8194 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8196 self._RemoveOldStorage(self.target_node, iv_names)
8197 # WARNING: we release all node locks here, do not do other RPCs
8198 # than WaitForSync to the primary node
8199 self._ReleaseNodeLock([self.instance.primary_node,
8204 # This can fail as the old devices are degraded and _WaitForSync
8205 # does a combined result over all disks, so we don't check its return value
8206 self.lu.LogStep(cstep, steps_total, "Sync devices")
8208 _WaitForSync(self.lu, self.instance)
8210 # Check all devices manually
8211 self._CheckDevices(self.instance.primary_node, iv_names)
8213 # Step: remove old storage
8214 if not self.early_release:
8215 self.lu.LogStep(cstep, steps_total, "Removing old storage")
8216 self._RemoveOldStorage(self.target_node, iv_names)
8219 class LURepairNodeStorage(NoHooksLU):
8220 """Repairs the volume group on a node.
8225 ("storage_type", _NoDefault, _CheckStorageType),
8226 ("name", _NoDefault, _TNonEmptyString),
8227 ("ignore_consistency", False, _TBool),
8231 def CheckArguments(self):
8232 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8234 storage_type = self.op.storage_type
8236 if (constants.SO_FIX_CONSISTENCY not in
8237 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8238 raise errors.OpPrereqError("Storage units of type '%s' can not be"
8239 " repaired" % storage_type,
8242 def ExpandNames(self):
8243 self.needed_locks = {
8244 locking.LEVEL_NODE: [self.op.node_name],
8247 def _CheckFaultyDisks(self, instance, node_name):
8248 """Ensure faulty disks abort the opcode or at least warn."""
8250 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8252 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8253 " node '%s'" % (instance.name, node_name),
8255 except errors.OpPrereqError, err:
8256 if self.op.ignore_consistency:
8257 self.proc.LogWarning(str(err.args[0]))
8261 def CheckPrereq(self):
8262 """Check prerequisites.
8265 # Check whether any instance on this node has faulty disks
8266 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8267 if not inst.admin_up:
8269 check_nodes = set(inst.all_nodes)
8270 check_nodes.discard(self.op.node_name)
8271 for inst_node_name in check_nodes:
8272 self._CheckFaultyDisks(inst, inst_node_name)
8274 def Exec(self, feedback_fn):
8275 feedback_fn("Repairing storage unit '%s' on %s ..." %
8276 (self.op.name, self.op.node_name))
8278 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8279 result = self.rpc.call_storage_execute(self.op.node_name,
8280 self.op.storage_type, st_args,
8282 constants.SO_FIX_CONSISTENCY)
8283 result.Raise("Failed to repair storage unit '%s' on %s" %
8284 (self.op.name, self.op.node_name))
8287 class LUNodeEvacuationStrategy(NoHooksLU):
8288 """Computes the node evacuation strategy.
8292 ("nodes", _NoDefault, _TListOf(_TNonEmptyString)),
8293 ("remote_node", None, _TMaybeString),
8294 ("iallocator", None, _TMaybeString),
8298 def CheckArguments(self):
8299 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8301 def ExpandNames(self):
8302 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8303 self.needed_locks = locks = {}
8304 if self.op.remote_node is None:
8305 locks[locking.LEVEL_NODE] = locking.ALL_SET
8307 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8308 locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8310 def Exec(self, feedback_fn):
8311 if self.op.remote_node is not None:
8313 for node in self.op.nodes:
8314 instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8317 if i.primary_node == self.op.remote_node:
8318 raise errors.OpPrereqError("Node %s is the primary node of"
8319 " instance %s, cannot use it as"
8321 (self.op.remote_node, i.name),
8323 result.append([i.name, self.op.remote_node])
8325 ial = IAllocator(self.cfg, self.rpc,
8326 mode=constants.IALLOCATOR_MODE_MEVAC,
8327 evac_nodes=self.op.nodes)
8328 ial.Run(self.op.iallocator, validate=True)
8330 raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8336 class LUGrowDisk(LogicalUnit):
8337 """Grow a disk of an instance.
8341 HTYPE = constants.HTYPE_INSTANCE
8344 ("disk", _NoDefault, _TInt),
8345 ("amount", _NoDefault, _TInt),
8346 ("wait_for_sync", True, _TBool),
8350 def ExpandNames(self):
8351 self._ExpandAndLockInstance()
8352 self.needed_locks[locking.LEVEL_NODE] = []
8353 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8355 def DeclareLocks(self, level):
8356 if level == locking.LEVEL_NODE:
8357 self._LockInstancesNodes()
8359 def BuildHooksEnv(self):
8362 This runs on the master, the primary and all the secondaries.
8366 "DISK": self.op.disk,
8367 "AMOUNT": self.op.amount,
8369 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8370 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8373 def CheckPrereq(self):
8374 """Check prerequisites.
8376 This checks that the instance is in the cluster.
8379 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8380 assert instance is not None, \
8381 "Cannot retrieve locked instance %s" % self.op.instance_name
8382 nodenames = list(instance.all_nodes)
8383 for node in nodenames:
8384 _CheckNodeOnline(self, node)
8386 self.instance = instance
8388 if instance.disk_template not in constants.DTS_GROWABLE:
8389 raise errors.OpPrereqError("Instance's disk layout does not support"
8390 " growing.", errors.ECODE_INVAL)
8392 self.disk = instance.FindDisk(self.op.disk)
8394 if instance.disk_template != constants.DT_FILE:
8395 # TODO: check the free disk space for file, when that feature will be
8397 _CheckNodesFreeDisk(self, nodenames, self.op.amount)
8399 def Exec(self, feedback_fn):
8400 """Execute disk grow.
8403 instance = self.instance
8406 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8408 raise errors.OpExecError("Cannot activate block device to grow")
8410 for node in instance.all_nodes:
8411 self.cfg.SetDiskID(disk, node)
8412 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8413 result.Raise("Grow request failed to node %s" % node)
8415 # TODO: Rewrite code to work properly
8416 # DRBD goes into sync mode for a short amount of time after executing the
8417 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8418 # calling "resize" in sync mode fails. Sleeping for a short amount of
8419 # time is a work-around.
8422 disk.RecordGrow(self.op.amount)
8423 self.cfg.Update(instance, feedback_fn)
8424 if self.op.wait_for_sync:
8425 disk_abort = not _WaitForSync(self, instance, disks=[disk])
8427 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8428 " status.\nPlease check the instance.")
8429 if not instance.admin_up:
8430 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8431 elif not instance.admin_up:
8432 self.proc.LogWarning("Not shutting down the disk even if the instance is"
8433 " not supposed to be running because no wait for"
8434 " sync mode was requested.")
8437 class LUQueryInstanceData(NoHooksLU):
8438 """Query runtime instance data.
8442 ("instances", _EmptyList, _TListOf(_TNonEmptyString)),
8443 ("static", False, _TBool),
8447 def ExpandNames(self):
8448 self.needed_locks = {}
8449 self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8451 if self.op.instances:
8452 self.wanted_names = []
8453 for name in self.op.instances:
8454 full_name = _ExpandInstanceName(self.cfg, name)
8455 self.wanted_names.append(full_name)
8456 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8458 self.wanted_names = None
8459 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8461 self.needed_locks[locking.LEVEL_NODE] = []
8462 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8464 def DeclareLocks(self, level):
8465 if level == locking.LEVEL_NODE:
8466 self._LockInstancesNodes()
8468 def CheckPrereq(self):
8469 """Check prerequisites.
8471 This only checks the optional instance list against the existing names.
8474 if self.wanted_names is None:
8475 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8477 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8478 in self.wanted_names]
8480 def _ComputeBlockdevStatus(self, node, instance_name, dev):
8481 """Returns the status of a block device
8484 if self.op.static or not node:
8487 self.cfg.SetDiskID(dev, node)
8489 result = self.rpc.call_blockdev_find(node, dev)
8493 result.Raise("Can't compute disk status for %s" % instance_name)
8495 status = result.payload
8499 return (status.dev_path, status.major, status.minor,
8500 status.sync_percent, status.estimated_time,
8501 status.is_degraded, status.ldisk_status)
8503 def _ComputeDiskStatus(self, instance, snode, dev):
8504 """Compute block device status.
8507 if dev.dev_type in constants.LDS_DRBD:
8508 # we change the snode then (otherwise we use the one passed in)
8509 if dev.logical_id[0] == instance.primary_node:
8510 snode = dev.logical_id[1]
8512 snode = dev.logical_id[0]
8514 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8516 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8519 dev_children = [self._ComputeDiskStatus(instance, snode, child)
8520 for child in dev.children]
8525 "iv_name": dev.iv_name,
8526 "dev_type": dev.dev_type,
8527 "logical_id": dev.logical_id,
8528 "physical_id": dev.physical_id,
8529 "pstatus": dev_pstatus,
8530 "sstatus": dev_sstatus,
8531 "children": dev_children,
8538 def Exec(self, feedback_fn):
8539 """Gather and return data"""
8542 cluster = self.cfg.GetClusterInfo()
8544 for instance in self.wanted_instances:
8545 if not self.op.static:
8546 remote_info = self.rpc.call_instance_info(instance.primary_node,
8548 instance.hypervisor)
8549 remote_info.Raise("Error checking node %s" % instance.primary_node)
8550 remote_info = remote_info.payload
8551 if remote_info and "state" in remote_info:
8554 remote_state = "down"
8557 if instance.admin_up:
8560 config_state = "down"
8562 disks = [self._ComputeDiskStatus(instance, None, device)
8563 for device in instance.disks]
8566 "name": instance.name,
8567 "config_state": config_state,
8568 "run_state": remote_state,
8569 "pnode": instance.primary_node,
8570 "snodes": instance.secondary_nodes,
8572 # this happens to be the same format used for hooks
8573 "nics": _NICListToTuple(self, instance.nics),
8574 "disk_template": instance.disk_template,
8576 "hypervisor": instance.hypervisor,
8577 "network_port": instance.network_port,
8578 "hv_instance": instance.hvparams,
8579 "hv_actual": cluster.FillHV(instance, skip_globals=True),
8580 "be_instance": instance.beparams,
8581 "be_actual": cluster.FillBE(instance),
8582 "os_instance": instance.osparams,
8583 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
8584 "serial_no": instance.serial_no,
8585 "mtime": instance.mtime,
8586 "ctime": instance.ctime,
8587 "uuid": instance.uuid,
8590 result[instance.name] = idict
8595 class LUSetInstanceParams(LogicalUnit):
8596 """Modifies an instances's parameters.
8599 HPATH = "instance-modify"
8600 HTYPE = constants.HTYPE_INSTANCE
8603 ("nics", _EmptyList, _TList),
8604 ("disks", _EmptyList, _TList),
8605 ("beparams", _EmptyDict, _TDict),
8606 ("hvparams", _EmptyDict, _TDict),
8607 ("disk_template", None, _TMaybeString),
8608 ("remote_node", None, _TMaybeString),
8609 ("os_name", None, _TMaybeString),
8610 ("force_variant", False, _TBool),
8611 ("osparams", None, _TOr(_TDict, _TNone)),
8616 def CheckArguments(self):
8617 if not (self.op.nics or self.op.disks or self.op.disk_template or
8618 self.op.hvparams or self.op.beparams or self.op.os_name):
8619 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8621 if self.op.hvparams:
8622 _CheckGlobalHvParams(self.op.hvparams)
8626 for disk_op, disk_dict in self.op.disks:
8627 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
8628 if disk_op == constants.DDM_REMOVE:
8631 elif disk_op == constants.DDM_ADD:
8634 if not isinstance(disk_op, int):
8635 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8636 if not isinstance(disk_dict, dict):
8637 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8638 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8640 if disk_op == constants.DDM_ADD:
8641 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8642 if mode not in constants.DISK_ACCESS_SET:
8643 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8645 size = disk_dict.get('size', None)
8647 raise errors.OpPrereqError("Required disk parameter size missing",
8651 except (TypeError, ValueError), err:
8652 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8653 str(err), errors.ECODE_INVAL)
8654 disk_dict['size'] = size
8656 # modification of disk
8657 if 'size' in disk_dict:
8658 raise errors.OpPrereqError("Disk size change not possible, use"
8659 " grow-disk", errors.ECODE_INVAL)
8661 if disk_addremove > 1:
8662 raise errors.OpPrereqError("Only one disk add or remove operation"
8663 " supported at a time", errors.ECODE_INVAL)
8665 if self.op.disks and self.op.disk_template is not None:
8666 raise errors.OpPrereqError("Disk template conversion and other disk"
8667 " changes not supported at the same time",
8670 if self.op.disk_template:
8671 _CheckDiskTemplate(self.op.disk_template)
8672 if (self.op.disk_template in constants.DTS_NET_MIRROR and
8673 self.op.remote_node is None):
8674 raise errors.OpPrereqError("Changing the disk template to a mirrored"
8675 " one requires specifying a secondary node",
8680 for nic_op, nic_dict in self.op.nics:
8681 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
8682 if nic_op == constants.DDM_REMOVE:
8685 elif nic_op == constants.DDM_ADD:
8688 if not isinstance(nic_op, int):
8689 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8690 if not isinstance(nic_dict, dict):
8691 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8692 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8694 # nic_dict should be a dict
8695 nic_ip = nic_dict.get('ip', None)
8696 if nic_ip is not None:
8697 if nic_ip.lower() == constants.VALUE_NONE:
8698 nic_dict['ip'] = None
8700 if not netutils.IPAddress.IsValid(nic_ip):
8701 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8704 nic_bridge = nic_dict.get('bridge', None)
8705 nic_link = nic_dict.get('link', None)
8706 if nic_bridge and nic_link:
8707 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8708 " at the same time", errors.ECODE_INVAL)
8709 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8710 nic_dict['bridge'] = None
8711 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8712 nic_dict['link'] = None
8714 if nic_op == constants.DDM_ADD:
8715 nic_mac = nic_dict.get('mac', None)
8717 nic_dict['mac'] = constants.VALUE_AUTO
8719 if 'mac' in nic_dict:
8720 nic_mac = nic_dict['mac']
8721 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8722 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8724 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8725 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8726 " modifying an existing nic",
8729 if nic_addremove > 1:
8730 raise errors.OpPrereqError("Only one NIC add or remove operation"
8731 " supported at a time", errors.ECODE_INVAL)
8733 def ExpandNames(self):
8734 self._ExpandAndLockInstance()
8735 self.needed_locks[locking.LEVEL_NODE] = []
8736 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8738 def DeclareLocks(self, level):
8739 if level == locking.LEVEL_NODE:
8740 self._LockInstancesNodes()
8741 if self.op.disk_template and self.op.remote_node:
8742 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8743 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8745 def BuildHooksEnv(self):
8748 This runs on the master, primary and secondaries.
8752 if constants.BE_MEMORY in self.be_new:
8753 args['memory'] = self.be_new[constants.BE_MEMORY]
8754 if constants.BE_VCPUS in self.be_new:
8755 args['vcpus'] = self.be_new[constants.BE_VCPUS]
8756 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8757 # information at all.
8760 nic_override = dict(self.op.nics)
8761 for idx, nic in enumerate(self.instance.nics):
8762 if idx in nic_override:
8763 this_nic_override = nic_override[idx]
8765 this_nic_override = {}
8766 if 'ip' in this_nic_override:
8767 ip = this_nic_override['ip']
8770 if 'mac' in this_nic_override:
8771 mac = this_nic_override['mac']
8774 if idx in self.nic_pnew:
8775 nicparams = self.nic_pnew[idx]
8777 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
8778 mode = nicparams[constants.NIC_MODE]
8779 link = nicparams[constants.NIC_LINK]
8780 args['nics'].append((ip, mac, mode, link))
8781 if constants.DDM_ADD in nic_override:
8782 ip = nic_override[constants.DDM_ADD].get('ip', None)
8783 mac = nic_override[constants.DDM_ADD]['mac']
8784 nicparams = self.nic_pnew[constants.DDM_ADD]
8785 mode = nicparams[constants.NIC_MODE]
8786 link = nicparams[constants.NIC_LINK]
8787 args['nics'].append((ip, mac, mode, link))
8788 elif constants.DDM_REMOVE in nic_override:
8789 del args['nics'][-1]
8791 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8792 if self.op.disk_template:
8793 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8794 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8797 def CheckPrereq(self):
8798 """Check prerequisites.
8800 This only checks the instance list against the existing names.
8803 # checking the new params on the primary/secondary nodes
8805 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8806 cluster = self.cluster = self.cfg.GetClusterInfo()
8807 assert self.instance is not None, \
8808 "Cannot retrieve locked instance %s" % self.op.instance_name
8809 pnode = instance.primary_node
8810 nodelist = list(instance.all_nodes)
8813 if self.op.os_name and not self.op.force:
8814 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8815 self.op.force_variant)
8816 instance_os = self.op.os_name
8818 instance_os = instance.os
8820 if self.op.disk_template:
8821 if instance.disk_template == self.op.disk_template:
8822 raise errors.OpPrereqError("Instance already has disk template %s" %
8823 instance.disk_template, errors.ECODE_INVAL)
8825 if (instance.disk_template,
8826 self.op.disk_template) not in self._DISK_CONVERSIONS:
8827 raise errors.OpPrereqError("Unsupported disk template conversion from"
8828 " %s to %s" % (instance.disk_template,
8829 self.op.disk_template),
8831 _CheckInstanceDown(self, instance, "cannot change disk template")
8832 if self.op.disk_template in constants.DTS_NET_MIRROR:
8833 if self.op.remote_node == pnode:
8834 raise errors.OpPrereqError("Given new secondary node %s is the same"
8835 " as the primary node of the instance" %
8836 self.op.remote_node, errors.ECODE_STATE)
8837 _CheckNodeOnline(self, self.op.remote_node)
8838 _CheckNodeNotDrained(self, self.op.remote_node)
8839 disks = [{"size": d.size} for d in instance.disks]
8840 required = _ComputeDiskSize(self.op.disk_template, disks)
8841 _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8843 # hvparams processing
8844 if self.op.hvparams:
8845 hv_type = instance.hypervisor
8846 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
8847 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
8848 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
8851 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
8852 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8853 self.hv_new = hv_new # the new actual values
8854 self.hv_inst = i_hvdict # the new dict (without defaults)
8856 self.hv_new = self.hv_inst = {}
8858 # beparams processing
8859 if self.op.beparams:
8860 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
8862 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
8863 be_new = cluster.SimpleFillBE(i_bedict)
8864 self.be_new = be_new # the new actual values
8865 self.be_inst = i_bedict # the new dict (without defaults)
8867 self.be_new = self.be_inst = {}
8869 # osparams processing
8870 if self.op.osparams:
8871 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
8872 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
8873 self.os_new = cluster.SimpleFillOS(instance_os, i_osdict)
8874 self.os_inst = i_osdict # the new dict (without defaults)
8876 self.os_new = self.os_inst = {}
8880 if constants.BE_MEMORY in self.op.beparams and not self.op.force:
8881 mem_check_list = [pnode]
8882 if be_new[constants.BE_AUTO_BALANCE]:
8883 # either we changed auto_balance to yes or it was from before
8884 mem_check_list.extend(instance.secondary_nodes)
8885 instance_info = self.rpc.call_instance_info(pnode, instance.name,
8886 instance.hypervisor)
8887 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8888 instance.hypervisor)
8889 pninfo = nodeinfo[pnode]
8890 msg = pninfo.fail_msg
8892 # Assume the primary node is unreachable and go ahead
8893 self.warn.append("Can't get info from primary node %s: %s" %
8895 elif not isinstance(pninfo.payload.get('memory_free', None), int):
8896 self.warn.append("Node data from primary node %s doesn't contain"
8897 " free memory information" % pnode)
8898 elif instance_info.fail_msg:
8899 self.warn.append("Can't get instance runtime information: %s" %
8900 instance_info.fail_msg)
8902 if instance_info.payload:
8903 current_mem = int(instance_info.payload['memory'])
8905 # Assume instance not running
8906 # (there is a slight race condition here, but it's not very probable,
8907 # and we have no other way to check)
8909 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8910 pninfo.payload['memory_free'])
8912 raise errors.OpPrereqError("This change will prevent the instance"
8913 " from starting, due to %d MB of memory"
8914 " missing on its primary node" % miss_mem,
8917 if be_new[constants.BE_AUTO_BALANCE]:
8918 for node, nres in nodeinfo.items():
8919 if node not in instance.secondary_nodes:
8923 self.warn.append("Can't get info from secondary node %s: %s" %
8925 elif not isinstance(nres.payload.get('memory_free', None), int):
8926 self.warn.append("Secondary node %s didn't return free"
8927 " memory information" % node)
8928 elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8929 self.warn.append("Not enough memory to failover instance to"
8930 " secondary node %s" % node)
8935 for nic_op, nic_dict in self.op.nics:
8936 if nic_op == constants.DDM_REMOVE:
8937 if not instance.nics:
8938 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8941 if nic_op != constants.DDM_ADD:
8943 if not instance.nics:
8944 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8945 " no NICs" % nic_op,
8947 if nic_op < 0 or nic_op >= len(instance.nics):
8948 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8950 (nic_op, len(instance.nics) - 1),
8952 old_nic_params = instance.nics[nic_op].nicparams
8953 old_nic_ip = instance.nics[nic_op].ip
8958 update_params_dict = dict([(key, nic_dict[key])
8959 for key in constants.NICS_PARAMETERS
8960 if key in nic_dict])
8962 if 'bridge' in nic_dict:
8963 update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8965 new_nic_params = _GetUpdatedParams(old_nic_params,
8967 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
8968 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
8969 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8970 self.nic_pinst[nic_op] = new_nic_params
8971 self.nic_pnew[nic_op] = new_filled_nic_params
8972 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8974 if new_nic_mode == constants.NIC_MODE_BRIDGED:
8975 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8976 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8978 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8980 self.warn.append(msg)
8982 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8983 if new_nic_mode == constants.NIC_MODE_ROUTED:
8984 if 'ip' in nic_dict:
8985 nic_ip = nic_dict['ip']
8989 raise errors.OpPrereqError('Cannot set the nic ip to None'
8990 ' on a routed nic', errors.ECODE_INVAL)
8991 if 'mac' in nic_dict:
8992 nic_mac = nic_dict['mac']
8994 raise errors.OpPrereqError('Cannot set the nic mac to None',
8996 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8997 # otherwise generate the mac
8998 nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9000 # or validate/reserve the current one
9002 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9003 except errors.ReservationError:
9004 raise errors.OpPrereqError("MAC address %s already in use"
9005 " in cluster" % nic_mac,
9006 errors.ECODE_NOTUNIQUE)
9009 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9010 raise errors.OpPrereqError("Disk operations not supported for"
9011 " diskless instances",
9013 for disk_op, _ in self.op.disks:
9014 if disk_op == constants.DDM_REMOVE:
9015 if len(instance.disks) == 1:
9016 raise errors.OpPrereqError("Cannot remove the last disk of"
9017 " an instance", errors.ECODE_INVAL)
9018 _CheckInstanceDown(self, instance, "cannot remove disks")
9020 if (disk_op == constants.DDM_ADD and
9021 len(instance.nics) >= constants.MAX_DISKS):
9022 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9023 " add more" % constants.MAX_DISKS,
9025 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9027 if disk_op < 0 or disk_op >= len(instance.disks):
9028 raise errors.OpPrereqError("Invalid disk index %s, valid values"
9030 (disk_op, len(instance.disks)),
9035 def _ConvertPlainToDrbd(self, feedback_fn):
9036 """Converts an instance from plain to drbd.
9039 feedback_fn("Converting template to drbd")
9040 instance = self.instance
9041 pnode = instance.primary_node
9042 snode = self.op.remote_node
9044 # create a fake disk info for _GenerateDiskTemplate
9045 disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9046 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9047 instance.name, pnode, [snode],
9048 disk_info, None, None, 0)
9049 info = _GetInstanceInfoText(instance)
9050 feedback_fn("Creating aditional volumes...")
9051 # first, create the missing data and meta devices
9052 for disk in new_disks:
9053 # unfortunately this is... not too nice
9054 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9056 for child in disk.children:
9057 _CreateSingleBlockDev(self, snode, instance, child, info, True)
9058 # at this stage, all new LVs have been created, we can rename the
9060 feedback_fn("Renaming original volumes...")
9061 rename_list = [(o, n.children[0].logical_id)
9062 for (o, n) in zip(instance.disks, new_disks)]
9063 result = self.rpc.call_blockdev_rename(pnode, rename_list)
9064 result.Raise("Failed to rename original LVs")
9066 feedback_fn("Initializing DRBD devices...")
9067 # all child devices are in place, we can now create the DRBD devices
9068 for disk in new_disks:
9069 for node in [pnode, snode]:
9070 f_create = node == pnode
9071 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9073 # at this point, the instance has been modified
9074 instance.disk_template = constants.DT_DRBD8
9075 instance.disks = new_disks
9076 self.cfg.Update(instance, feedback_fn)
9078 # disks are created, waiting for sync
9079 disk_abort = not _WaitForSync(self, instance)
9081 raise errors.OpExecError("There are some degraded disks for"
9082 " this instance, please cleanup manually")
9084 def _ConvertDrbdToPlain(self, feedback_fn):
9085 """Converts an instance from drbd to plain.
9088 instance = self.instance
9089 assert len(instance.secondary_nodes) == 1
9090 pnode = instance.primary_node
9091 snode = instance.secondary_nodes[0]
9092 feedback_fn("Converting template to plain")
9094 old_disks = instance.disks
9095 new_disks = [d.children[0] for d in old_disks]
9097 # copy over size and mode
9098 for parent, child in zip(old_disks, new_disks):
9099 child.size = parent.size
9100 child.mode = parent.mode
9102 # update instance structure
9103 instance.disks = new_disks
9104 instance.disk_template = constants.DT_PLAIN
9105 self.cfg.Update(instance, feedback_fn)
9107 feedback_fn("Removing volumes on the secondary node...")
9108 for disk in old_disks:
9109 self.cfg.SetDiskID(disk, snode)
9110 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9112 self.LogWarning("Could not remove block device %s on node %s,"
9113 " continuing anyway: %s", disk.iv_name, snode, msg)
9115 feedback_fn("Removing unneeded volumes on the primary node...")
9116 for idx, disk in enumerate(old_disks):
9117 meta = disk.children[1]
9118 self.cfg.SetDiskID(meta, pnode)
9119 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9121 self.LogWarning("Could not remove metadata for disk %d on node %s,"
9122 " continuing anyway: %s", idx, pnode, msg)
9125 def Exec(self, feedback_fn):
9126 """Modifies an instance.
9128 All parameters take effect only at the next restart of the instance.
9131 # Process here the warnings from CheckPrereq, as we don't have a
9132 # feedback_fn there.
9133 for warn in self.warn:
9134 feedback_fn("WARNING: %s" % warn)
9137 instance = self.instance
9139 for disk_op, disk_dict in self.op.disks:
9140 if disk_op == constants.DDM_REMOVE:
9141 # remove the last disk
9142 device = instance.disks.pop()
9143 device_idx = len(instance.disks)
9144 for node, disk in device.ComputeNodeTree(instance.primary_node):
9145 self.cfg.SetDiskID(disk, node)
9146 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9148 self.LogWarning("Could not remove disk/%d on node %s: %s,"
9149 " continuing anyway", device_idx, node, msg)
9150 result.append(("disk/%d" % device_idx, "remove"))
9151 elif disk_op == constants.DDM_ADD:
9153 if instance.disk_template == constants.DT_FILE:
9154 file_driver, file_path = instance.disks[0].logical_id
9155 file_path = os.path.dirname(file_path)
9157 file_driver = file_path = None
9158 disk_idx_base = len(instance.disks)
9159 new_disk = _GenerateDiskTemplate(self,
9160 instance.disk_template,
9161 instance.name, instance.primary_node,
9162 instance.secondary_nodes,
9167 instance.disks.append(new_disk)
9168 info = _GetInstanceInfoText(instance)
9170 logging.info("Creating volume %s for instance %s",
9171 new_disk.iv_name, instance.name)
9172 # Note: this needs to be kept in sync with _CreateDisks
9174 for node in instance.all_nodes:
9175 f_create = node == instance.primary_node
9177 _CreateBlockDev(self, node, instance, new_disk,
9178 f_create, info, f_create)
9179 except errors.OpExecError, err:
9180 self.LogWarning("Failed to create volume %s (%s) on"
9182 new_disk.iv_name, new_disk, node, err)
9183 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9184 (new_disk.size, new_disk.mode)))
9186 # change a given disk
9187 instance.disks[disk_op].mode = disk_dict['mode']
9188 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9190 if self.op.disk_template:
9191 r_shut = _ShutdownInstanceDisks(self, instance)
9193 raise errors.OpExecError("Cannot shutdow instance disks, unable to"
9194 " proceed with disk template conversion")
9195 mode = (instance.disk_template, self.op.disk_template)
9197 self._DISK_CONVERSIONS[mode](self, feedback_fn)
9199 self.cfg.ReleaseDRBDMinors(instance.name)
9201 result.append(("disk_template", self.op.disk_template))
9204 for nic_op, nic_dict in self.op.nics:
9205 if nic_op == constants.DDM_REMOVE:
9206 # remove the last nic
9207 del instance.nics[-1]
9208 result.append(("nic.%d" % len(instance.nics), "remove"))
9209 elif nic_op == constants.DDM_ADD:
9210 # mac and bridge should be set, by now
9211 mac = nic_dict['mac']
9212 ip = nic_dict.get('ip', None)
9213 nicparams = self.nic_pinst[constants.DDM_ADD]
9214 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9215 instance.nics.append(new_nic)
9216 result.append(("nic.%d" % (len(instance.nics) - 1),
9217 "add:mac=%s,ip=%s,mode=%s,link=%s" %
9218 (new_nic.mac, new_nic.ip,
9219 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9220 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9223 for key in 'mac', 'ip':
9225 setattr(instance.nics[nic_op], key, nic_dict[key])
9226 if nic_op in self.nic_pinst:
9227 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9228 for key, val in nic_dict.iteritems():
9229 result.append(("nic.%s/%d" % (key, nic_op), val))
9232 if self.op.hvparams:
9233 instance.hvparams = self.hv_inst
9234 for key, val in self.op.hvparams.iteritems():
9235 result.append(("hv/%s" % key, val))
9238 if self.op.beparams:
9239 instance.beparams = self.be_inst
9240 for key, val in self.op.beparams.iteritems():
9241 result.append(("be/%s" % key, val))
9245 instance.os = self.op.os_name
9248 if self.op.osparams:
9249 instance.osparams = self.os_inst
9250 for key, val in self.op.osparams.iteritems():
9251 result.append(("os/%s" % key, val))
9253 self.cfg.Update(instance, feedback_fn)
9257 _DISK_CONVERSIONS = {
9258 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9259 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9263 class LUQueryExports(NoHooksLU):
9264 """Query the exports list
9268 ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
9269 ("use_locking", False, _TBool),
9273 def ExpandNames(self):
9274 self.needed_locks = {}
9275 self.share_locks[locking.LEVEL_NODE] = 1
9276 if not self.op.nodes:
9277 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9279 self.needed_locks[locking.LEVEL_NODE] = \
9280 _GetWantedNodes(self, self.op.nodes)
9282 def Exec(self, feedback_fn):
9283 """Compute the list of all the exported system images.
9286 @return: a dictionary with the structure node->(export-list)
9287 where export-list is a list of the instances exported on
9291 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9292 rpcresult = self.rpc.call_export_list(self.nodes)
9294 for node in rpcresult:
9295 if rpcresult[node].fail_msg:
9296 result[node] = False
9298 result[node] = rpcresult[node].payload
9303 class LUPrepareExport(NoHooksLU):
9304 """Prepares an instance for an export and returns useful information.
9309 ("mode", _NoDefault, _TElemOf(constants.EXPORT_MODES)),
9313 def ExpandNames(self):
9314 self._ExpandAndLockInstance()
9316 def CheckPrereq(self):
9317 """Check prerequisites.
9320 instance_name = self.op.instance_name
9322 self.instance = self.cfg.GetInstanceInfo(instance_name)
9323 assert self.instance is not None, \
9324 "Cannot retrieve locked instance %s" % self.op.instance_name
9325 _CheckNodeOnline(self, self.instance.primary_node)
9327 self._cds = _GetClusterDomainSecret()
9329 def Exec(self, feedback_fn):
9330 """Prepares an instance for an export.
9333 instance = self.instance
9335 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9336 salt = utils.GenerateSecret(8)
9338 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9339 result = self.rpc.call_x509_cert_create(instance.primary_node,
9340 constants.RIE_CERT_VALIDITY)
9341 result.Raise("Can't create X509 key and certificate on %s" % result.node)
9343 (name, cert_pem) = result.payload
9345 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9349 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9350 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9352 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9358 class LUExportInstance(LogicalUnit):
9359 """Export an instance to an image in the cluster.
9362 HPATH = "instance-export"
9363 HTYPE = constants.HTYPE_INSTANCE
9366 ("target_node", _NoDefault, _TOr(_TNonEmptyString, _TList)),
9367 ("shutdown", True, _TBool),
9369 ("remove_instance", False, _TBool),
9370 ("ignore_remove_failures", False, _TBool),
9371 ("mode", constants.EXPORT_MODE_LOCAL, _TElemOf(constants.EXPORT_MODES)),
9372 ("x509_key_name", None, _TOr(_TList, _TNone)),
9373 ("destination_x509_ca", None, _TMaybeString),
9377 def CheckArguments(self):
9378 """Check the arguments.
9381 self.x509_key_name = self.op.x509_key_name
9382 self.dest_x509_ca_pem = self.op.destination_x509_ca
9384 if self.op.remove_instance and not self.op.shutdown:
9385 raise errors.OpPrereqError("Can not remove instance without shutting it"
9388 if self.op.mode == constants.EXPORT_MODE_REMOTE:
9389 if not self.x509_key_name:
9390 raise errors.OpPrereqError("Missing X509 key name for encryption",
9393 if not self.dest_x509_ca_pem:
9394 raise errors.OpPrereqError("Missing destination X509 CA",
9397 def ExpandNames(self):
9398 self._ExpandAndLockInstance()
9400 # Lock all nodes for local exports
9401 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9402 # FIXME: lock only instance primary and destination node
9404 # Sad but true, for now we have do lock all nodes, as we don't know where
9405 # the previous export might be, and in this LU we search for it and
9406 # remove it from its current node. In the future we could fix this by:
9407 # - making a tasklet to search (share-lock all), then create the
9408 # new one, then one to remove, after
9409 # - removing the removal operation altogether
9410 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9412 def DeclareLocks(self, level):
9413 """Last minute lock declaration."""
9414 # All nodes are locked anyway, so nothing to do here.
9416 def BuildHooksEnv(self):
9419 This will run on the master, primary node and target node.
9423 "EXPORT_MODE": self.op.mode,
9424 "EXPORT_NODE": self.op.target_node,
9425 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9426 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9427 # TODO: Generic function for boolean env variables
9428 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9431 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9433 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9435 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9436 nl.append(self.op.target_node)
9440 def CheckPrereq(self):
9441 """Check prerequisites.
9443 This checks that the instance and node names are valid.
9446 instance_name = self.op.instance_name
9448 self.instance = self.cfg.GetInstanceInfo(instance_name)
9449 assert self.instance is not None, \
9450 "Cannot retrieve locked instance %s" % self.op.instance_name
9451 _CheckNodeOnline(self, self.instance.primary_node)
9453 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9454 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9455 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9456 assert self.dst_node is not None
9458 _CheckNodeOnline(self, self.dst_node.name)
9459 _CheckNodeNotDrained(self, self.dst_node.name)
9462 self.dest_disk_info = None
9463 self.dest_x509_ca = None
9465 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9466 self.dst_node = None
9468 if len(self.op.target_node) != len(self.instance.disks):
9469 raise errors.OpPrereqError(("Received destination information for %s"
9470 " disks, but instance %s has %s disks") %
9471 (len(self.op.target_node), instance_name,
9472 len(self.instance.disks)),
9475 cds = _GetClusterDomainSecret()
9477 # Check X509 key name
9479 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9480 except (TypeError, ValueError), err:
9481 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9483 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9484 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9487 # Load and verify CA
9489 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9490 except OpenSSL.crypto.Error, err:
9491 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9492 (err, ), errors.ECODE_INVAL)
9494 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9495 if errcode is not None:
9496 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9497 (msg, ), errors.ECODE_INVAL)
9499 self.dest_x509_ca = cert
9501 # Verify target information
9503 for idx, disk_data in enumerate(self.op.target_node):
9505 (host, port, magic) = \
9506 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9507 except errors.GenericError, err:
9508 raise errors.OpPrereqError("Target info for disk %s: %s" %
9509 (idx, err), errors.ECODE_INVAL)
9511 disk_info.append((host, port, magic))
9513 assert len(disk_info) == len(self.op.target_node)
9514 self.dest_disk_info = disk_info
9517 raise errors.ProgrammerError("Unhandled export mode %r" %
9520 # instance disk type verification
9521 # TODO: Implement export support for file-based disks
9522 for disk in self.instance.disks:
9523 if disk.dev_type == constants.LD_FILE:
9524 raise errors.OpPrereqError("Export not supported for instances with"
9525 " file-based disks", errors.ECODE_INVAL)
9527 def _CleanupExports(self, feedback_fn):
9528 """Removes exports of current instance from all other nodes.
9530 If an instance in a cluster with nodes A..D was exported to node C, its
9531 exports will be removed from the nodes A, B and D.
9534 assert self.op.mode != constants.EXPORT_MODE_REMOTE
9536 nodelist = self.cfg.GetNodeList()
9537 nodelist.remove(self.dst_node.name)
9539 # on one-node clusters nodelist will be empty after the removal
9540 # if we proceed the backup would be removed because OpQueryExports
9541 # substitutes an empty list with the full cluster node list.
9542 iname = self.instance.name
9544 feedback_fn("Removing old exports for instance %s" % iname)
9545 exportlist = self.rpc.call_export_list(nodelist)
9546 for node in exportlist:
9547 if exportlist[node].fail_msg:
9549 if iname in exportlist[node].payload:
9550 msg = self.rpc.call_export_remove(node, iname).fail_msg
9552 self.LogWarning("Could not remove older export for instance %s"
9553 " on node %s: %s", iname, node, msg)
9555 def Exec(self, feedback_fn):
9556 """Export an instance to an image in the cluster.
9559 assert self.op.mode in constants.EXPORT_MODES
9561 instance = self.instance
9562 src_node = instance.primary_node
9564 if self.op.shutdown:
9565 # shutdown the instance, but not the disks
9566 feedback_fn("Shutting down instance %s" % instance.name)
9567 result = self.rpc.call_instance_shutdown(src_node, instance,
9568 self.op.shutdown_timeout)
9569 # TODO: Maybe ignore failures if ignore_remove_failures is set
9570 result.Raise("Could not shutdown instance %s on"
9571 " node %s" % (instance.name, src_node))
9573 # set the disks ID correctly since call_instance_start needs the
9574 # correct drbd minor to create the symlinks
9575 for disk in instance.disks:
9576 self.cfg.SetDiskID(disk, src_node)
9578 activate_disks = (not instance.admin_up)
9581 # Activate the instance disks if we'exporting a stopped instance
9582 feedback_fn("Activating disks for %s" % instance.name)
9583 _StartInstanceDisks(self, instance, None)
9586 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
9589 helper.CreateSnapshots()
9591 if (self.op.shutdown and instance.admin_up and
9592 not self.op.remove_instance):
9593 assert not activate_disks
9594 feedback_fn("Starting instance %s" % instance.name)
9595 result = self.rpc.call_instance_start(src_node, instance, None, None)
9596 msg = result.fail_msg
9598 feedback_fn("Failed to start instance: %s" % msg)
9599 _ShutdownInstanceDisks(self, instance)
9600 raise errors.OpExecError("Could not start instance: %s" % msg)
9602 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9603 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9604 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9605 connect_timeout = constants.RIE_CONNECT_TIMEOUT
9606 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9608 (key_name, _, _) = self.x509_key_name
9611 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
9614 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
9615 key_name, dest_ca_pem,
9620 # Check for backwards compatibility
9621 assert len(dresults) == len(instance.disks)
9622 assert compat.all(isinstance(i, bool) for i in dresults), \
9623 "Not all results are boolean: %r" % dresults
9627 feedback_fn("Deactivating disks for %s" % instance.name)
9628 _ShutdownInstanceDisks(self, instance)
9630 if not (compat.all(dresults) and fin_resu):
9633 failures.append("export finalization")
9634 if not compat.all(dresults):
9635 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
9637 failures.append("disk export: disk(s) %s" % fdsk)
9639 raise errors.OpExecError("Export failed, errors in %s" %
9640 utils.CommaJoin(failures))
9642 # At this point, the export was successful, we can cleanup/finish
9644 # Remove instance if requested
9645 if self.op.remove_instance:
9646 feedback_fn("Removing instance %s" % instance.name)
9647 _RemoveInstance(self, feedback_fn, instance,
9648 self.op.ignore_remove_failures)
9650 if self.op.mode == constants.EXPORT_MODE_LOCAL:
9651 self._CleanupExports(feedback_fn)
9653 return fin_resu, dresults
9656 class LURemoveExport(NoHooksLU):
9657 """Remove exports related to the named instance.
9665 def ExpandNames(self):
9666 self.needed_locks = {}
9667 # We need all nodes to be locked in order for RemoveExport to work, but we
9668 # don't need to lock the instance itself, as nothing will happen to it (and
9669 # we can remove exports also for a removed instance)
9670 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9672 def Exec(self, feedback_fn):
9673 """Remove any export.
9676 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9677 # If the instance was not found we'll try with the name that was passed in.
9678 # This will only work if it was an FQDN, though.
9680 if not instance_name:
9682 instance_name = self.op.instance_name
9684 locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9685 exportlist = self.rpc.call_export_list(locked_nodes)
9687 for node in exportlist:
9688 msg = exportlist[node].fail_msg
9690 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9692 if instance_name in exportlist[node].payload:
9694 result = self.rpc.call_export_remove(node, instance_name)
9695 msg = result.fail_msg
9697 logging.error("Could not remove export for instance %s"
9698 " on node %s: %s", instance_name, node, msg)
9700 if fqdn_warn and not found:
9701 feedback_fn("Export not found. If trying to remove an export belonging"
9702 " to a deleted instance please use its Fully Qualified"
9706 class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9709 This is an abstract class which is the parent of all the other tags LUs.
9713 def ExpandNames(self):
9714 self.needed_locks = {}
9715 if self.op.kind == constants.TAG_NODE:
9716 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9717 self.needed_locks[locking.LEVEL_NODE] = self.op.name
9718 elif self.op.kind == constants.TAG_INSTANCE:
9719 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9720 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9722 def CheckPrereq(self):
9723 """Check prerequisites.
9726 if self.op.kind == constants.TAG_CLUSTER:
9727 self.target = self.cfg.GetClusterInfo()
9728 elif self.op.kind == constants.TAG_NODE:
9729 self.target = self.cfg.GetNodeInfo(self.op.name)
9730 elif self.op.kind == constants.TAG_INSTANCE:
9731 self.target = self.cfg.GetInstanceInfo(self.op.name)
9733 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9734 str(self.op.kind), errors.ECODE_INVAL)
9737 class LUGetTags(TagsLU):
9738 """Returns the tags of a given object.
9742 ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9743 # Name is only meaningful for nodes and instances
9744 ("name", _NoDefault, _TMaybeString),
9748 def Exec(self, feedback_fn):
9749 """Returns the tag list.
9752 return list(self.target.GetTags())
9755 class LUSearchTags(NoHooksLU):
9756 """Searches the tags for a given pattern.
9760 ("pattern", _NoDefault, _TNonEmptyString),
9764 def ExpandNames(self):
9765 self.needed_locks = {}
9767 def CheckPrereq(self):
9768 """Check prerequisites.
9770 This checks the pattern passed for validity by compiling it.
9774 self.re = re.compile(self.op.pattern)
9775 except re.error, err:
9776 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
9777 (self.op.pattern, err), errors.ECODE_INVAL)
9779 def Exec(self, feedback_fn):
9780 """Returns the tag list.
9784 tgts = [("/cluster", cfg.GetClusterInfo())]
9785 ilist = cfg.GetAllInstancesInfo().values()
9786 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
9787 nlist = cfg.GetAllNodesInfo().values()
9788 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
9790 for path, target in tgts:
9791 for tag in target.GetTags():
9792 if self.re.search(tag):
9793 results.append((path, tag))
9797 class LUAddTags(TagsLU):
9798 """Sets a tag on a given object.
9802 ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9803 # Name is only meaningful for nodes and instances
9804 ("name", _NoDefault, _TMaybeString),
9805 ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
9809 def CheckPrereq(self):
9810 """Check prerequisites.
9812 This checks the type and length of the tag name and value.
9815 TagsLU.CheckPrereq(self)
9816 for tag in self.op.tags:
9817 objects.TaggableObject.ValidateTag(tag)
9819 def Exec(self, feedback_fn):
9824 for tag in self.op.tags:
9825 self.target.AddTag(tag)
9826 except errors.TagError, err:
9827 raise errors.OpExecError("Error while setting tag: %s" % str(err))
9828 self.cfg.Update(self.target, feedback_fn)
9831 class LUDelTags(TagsLU):
9832 """Delete a list of tags from a given object.
9836 ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9837 # Name is only meaningful for nodes and instances
9838 ("name", _NoDefault, _TMaybeString),
9839 ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
9843 def CheckPrereq(self):
9844 """Check prerequisites.
9846 This checks that we have the given tag.
9849 TagsLU.CheckPrereq(self)
9850 for tag in self.op.tags:
9851 objects.TaggableObject.ValidateTag(tag)
9852 del_tags = frozenset(self.op.tags)
9853 cur_tags = self.target.GetTags()
9854 if not del_tags <= cur_tags:
9855 diff_tags = del_tags - cur_tags
9856 diff_names = ["'%s'" % tag for tag in diff_tags]
9858 raise errors.OpPrereqError("Tag(s) %s not found" %
9859 (",".join(diff_names)), errors.ECODE_NOENT)
9861 def Exec(self, feedback_fn):
9862 """Remove the tag from the object.
9865 for tag in self.op.tags:
9866 self.target.RemoveTag(tag)
9867 self.cfg.Update(self.target, feedback_fn)
9870 class LUTestDelay(NoHooksLU):
9871 """Sleep for a specified amount of time.
9873 This LU sleeps on the master and/or nodes for a specified amount of
9878 ("duration", _NoDefault, _TFloat),
9879 ("on_master", True, _TBool),
9880 ("on_nodes", _EmptyList, _TListOf(_TNonEmptyString)),
9881 ("repeat", 0, _TPositiveInt)
9885 def ExpandNames(self):
9886 """Expand names and set required locks.
9888 This expands the node list, if any.
9891 self.needed_locks = {}
9892 if self.op.on_nodes:
9893 # _GetWantedNodes can be used here, but is not always appropriate to use
9894 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9896 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9897 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9899 def _TestDelay(self):
9900 """Do the actual sleep.
9903 if self.op.on_master:
9904 if not utils.TestDelay(self.op.duration):
9905 raise errors.OpExecError("Error during master delay test")
9906 if self.op.on_nodes:
9907 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9908 for node, node_result in result.items():
9909 node_result.Raise("Failure during rpc call to node %s" % node)
9911 def Exec(self, feedback_fn):
9912 """Execute the test delay opcode, with the wanted repetitions.
9915 if self.op.repeat == 0:
9918 top_value = self.op.repeat - 1
9919 for i in range(self.op.repeat):
9920 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
9924 class LUTestJobqueue(NoHooksLU):
9925 """Utility LU to test some aspects of the job queue.
9929 ("notify_waitlock", False, _TBool),
9930 ("notify_exec", False, _TBool),
9931 ("log_messages", _EmptyList, _TListOf(_TString)),
9932 ("fail", False, _TBool),
9936 # Must be lower than default timeout for WaitForJobChange to see whether it
9937 # notices changed jobs
9938 _CLIENT_CONNECT_TIMEOUT = 20.0
9939 _CLIENT_CONFIRM_TIMEOUT = 60.0
9942 def _NotifyUsingSocket(cls, cb, errcls):
9943 """Opens a Unix socket and waits for another program to connect.
9946 @param cb: Callback to send socket name to client
9948 @param errcls: Exception class to use for errors
9951 # Using a temporary directory as there's no easy way to create temporary
9952 # sockets without writing a custom loop around tempfile.mktemp and
9954 tmpdir = tempfile.mkdtemp()
9956 tmpsock = utils.PathJoin(tmpdir, "sock")
9958 logging.debug("Creating temporary socket at %s", tmpsock)
9959 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
9964 # Send details to client
9967 # Wait for client to connect before continuing
9968 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
9970 (conn, _) = sock.accept()
9971 except socket.error, err:
9972 raise errcls("Client didn't connect in time (%s)" % err)
9976 # Remove as soon as client is connected
9977 shutil.rmtree(tmpdir)
9979 # Wait for client to close
9982 # pylint: disable-msg=E1101
9983 # Instance of '_socketobject' has no ... member
9984 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
9986 except socket.error, err:
9987 raise errcls("Client failed to confirm notification (%s)" % err)
9991 def _SendNotification(self, test, arg, sockname):
9992 """Sends a notification to the client.
9995 @param test: Test name
9996 @param arg: Test argument (depends on test)
9997 @type sockname: string
9998 @param sockname: Socket path
10001 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10003 def _Notify(self, prereq, test, arg):
10004 """Notifies the client of a test.
10007 @param prereq: Whether this is a prereq-phase test
10009 @param test: Test name
10010 @param arg: Test argument (depends on test)
10014 errcls = errors.OpPrereqError
10016 errcls = errors.OpExecError
10018 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
10022 def CheckArguments(self):
10023 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
10024 self.expandnames_calls = 0
10026 def ExpandNames(self):
10027 checkargs_calls = getattr(self, "checkargs_calls", 0)
10028 if checkargs_calls < 1:
10029 raise errors.ProgrammerError("CheckArguments was not called")
10031 self.expandnames_calls += 1
10033 if self.op.notify_waitlock:
10034 self._Notify(True, constants.JQT_EXPANDNAMES, None)
10036 self.LogInfo("Expanding names")
10038 # Get lock on master node (just to get a lock, not for a particular reason)
10039 self.needed_locks = {
10040 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10043 def Exec(self, feedback_fn):
10044 if self.expandnames_calls < 1:
10045 raise errors.ProgrammerError("ExpandNames was not called")
10047 if self.op.notify_exec:
10048 self._Notify(False, constants.JQT_EXEC, None)
10050 self.LogInfo("Executing")
10052 if self.op.log_messages:
10053 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
10054 for idx, msg in enumerate(self.op.log_messages):
10055 self.LogInfo("Sending log message %s", idx + 1)
10056 feedback_fn(constants.JQT_MSGPREFIX + msg)
10057 # Report how many test messages have been sent
10058 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10061 raise errors.OpExecError("Opcode failure was requested")
10066 class IAllocator(object):
10067 """IAllocator framework.
10069 An IAllocator instance has three sets of attributes:
10070 - cfg that is needed to query the cluster
10071 - input data (all members of the _KEYS class attribute are required)
10072 - four buffer attributes (in|out_data|text), that represent the
10073 input (to the external script) in text and data structure format,
10074 and the output from it, again in two formats
10075 - the result variables from the script (success, info, nodes) for
10079 # pylint: disable-msg=R0902
10080 # lots of instance attributes
10082 "name", "mem_size", "disks", "disk_template",
10083 "os", "tags", "nics", "vcpus", "hypervisor",
10086 "name", "relocate_from",
10092 def __init__(self, cfg, rpc, mode, **kwargs):
10095 # init buffer variables
10096 self.in_text = self.out_text = self.in_data = self.out_data = None
10097 # init all input fields so that pylint is happy
10099 self.mem_size = self.disks = self.disk_template = None
10100 self.os = self.tags = self.nics = self.vcpus = None
10101 self.hypervisor = None
10102 self.relocate_from = None
10104 self.evac_nodes = None
10106 self.required_nodes = None
10107 # init result fields
10108 self.success = self.info = self.result = None
10109 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10110 keyset = self._ALLO_KEYS
10111 fn = self._AddNewInstance
10112 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10113 keyset = self._RELO_KEYS
10114 fn = self._AddRelocateInstance
10115 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10116 keyset = self._EVAC_KEYS
10117 fn = self._AddEvacuateNodes
10119 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10120 " IAllocator" % self.mode)
10122 if key not in keyset:
10123 raise errors.ProgrammerError("Invalid input parameter '%s' to"
10124 " IAllocator" % key)
10125 setattr(self, key, kwargs[key])
10128 if key not in kwargs:
10129 raise errors.ProgrammerError("Missing input parameter '%s' to"
10130 " IAllocator" % key)
10131 self._BuildInputData(fn)
10133 def _ComputeClusterData(self):
10134 """Compute the generic allocator input data.
10136 This is the data that is independent of the actual operation.
10140 cluster_info = cfg.GetClusterInfo()
10143 "version": constants.IALLOCATOR_VERSION,
10144 "cluster_name": cfg.GetClusterName(),
10145 "cluster_tags": list(cluster_info.GetTags()),
10146 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10147 # we don't have job IDs
10149 iinfo = cfg.GetAllInstancesInfo().values()
10150 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10154 node_list = cfg.GetNodeList()
10156 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10157 hypervisor_name = self.hypervisor
10158 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10159 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10160 elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10161 hypervisor_name = cluster_info.enabled_hypervisors[0]
10163 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10166 self.rpc.call_all_instances_info(node_list,
10167 cluster_info.enabled_hypervisors)
10168 for nname, nresult in node_data.items():
10169 # first fill in static (config-based) values
10170 ninfo = cfg.GetNodeInfo(nname)
10172 "tags": list(ninfo.GetTags()),
10173 "primary_ip": ninfo.primary_ip,
10174 "secondary_ip": ninfo.secondary_ip,
10175 "offline": ninfo.offline,
10176 "drained": ninfo.drained,
10177 "master_candidate": ninfo.master_candidate,
10180 if not (ninfo.offline or ninfo.drained):
10181 nresult.Raise("Can't get data for node %s" % nname)
10182 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
10184 remote_info = nresult.payload
10186 for attr in ['memory_total', 'memory_free', 'memory_dom0',
10187 'vg_size', 'vg_free', 'cpu_total']:
10188 if attr not in remote_info:
10189 raise errors.OpExecError("Node '%s' didn't return attribute"
10190 " '%s'" % (nname, attr))
10191 if not isinstance(remote_info[attr], int):
10192 raise errors.OpExecError("Node '%s' returned invalid value"
10194 (nname, attr, remote_info[attr]))
10195 # compute memory used by primary instances
10196 i_p_mem = i_p_up_mem = 0
10197 for iinfo, beinfo in i_list:
10198 if iinfo.primary_node == nname:
10199 i_p_mem += beinfo[constants.BE_MEMORY]
10200 if iinfo.name not in node_iinfo[nname].payload:
10203 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
10204 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
10205 remote_info['memory_free'] -= max(0, i_mem_diff)
10208 i_p_up_mem += beinfo[constants.BE_MEMORY]
10210 # compute memory used by instances
10212 "total_memory": remote_info['memory_total'],
10213 "reserved_memory": remote_info['memory_dom0'],
10214 "free_memory": remote_info['memory_free'],
10215 "total_disk": remote_info['vg_size'],
10216 "free_disk": remote_info['vg_free'],
10217 "total_cpus": remote_info['cpu_total'],
10218 "i_pri_memory": i_p_mem,
10219 "i_pri_up_memory": i_p_up_mem,
10221 pnr.update(pnr_dyn)
10223 node_results[nname] = pnr
10224 data["nodes"] = node_results
10228 for iinfo, beinfo in i_list:
10230 for nic in iinfo.nics:
10231 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
10232 nic_dict = {"mac": nic.mac,
10234 "mode": filled_params[constants.NIC_MODE],
10235 "link": filled_params[constants.NIC_LINK],
10237 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
10238 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
10239 nic_data.append(nic_dict)
10241 "tags": list(iinfo.GetTags()),
10242 "admin_up": iinfo.admin_up,
10243 "vcpus": beinfo[constants.BE_VCPUS],
10244 "memory": beinfo[constants.BE_MEMORY],
10246 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
10248 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
10249 "disk_template": iinfo.disk_template,
10250 "hypervisor": iinfo.hypervisor,
10252 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
10254 instance_data[iinfo.name] = pir
10256 data["instances"] = instance_data
10258 self.in_data = data
10260 def _AddNewInstance(self):
10261 """Add new instance data to allocator structure.
10263 This in combination with _AllocatorGetClusterData will create the
10264 correct structure needed as input for the allocator.
10266 The checks for the completeness of the opcode must have already been
10270 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
10272 if self.disk_template in constants.DTS_NET_MIRROR:
10273 self.required_nodes = 2
10275 self.required_nodes = 1
10278 "disk_template": self.disk_template,
10281 "vcpus": self.vcpus,
10282 "memory": self.mem_size,
10283 "disks": self.disks,
10284 "disk_space_total": disk_space,
10286 "required_nodes": self.required_nodes,
10290 def _AddRelocateInstance(self):
10291 """Add relocate instance data to allocator structure.
10293 This in combination with _IAllocatorGetClusterData will create the
10294 correct structure needed as input for the allocator.
10296 The checks for the completeness of the opcode must have already been
10300 instance = self.cfg.GetInstanceInfo(self.name)
10301 if instance is None:
10302 raise errors.ProgrammerError("Unknown instance '%s' passed to"
10303 " IAllocator" % self.name)
10305 if instance.disk_template not in constants.DTS_NET_MIRROR:
10306 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
10307 errors.ECODE_INVAL)
10309 if len(instance.secondary_nodes) != 1:
10310 raise errors.OpPrereqError("Instance has not exactly one secondary node",
10311 errors.ECODE_STATE)
10313 self.required_nodes = 1
10314 disk_sizes = [{'size': disk.size} for disk in instance.disks]
10315 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
10319 "disk_space_total": disk_space,
10320 "required_nodes": self.required_nodes,
10321 "relocate_from": self.relocate_from,
10325 def _AddEvacuateNodes(self):
10326 """Add evacuate nodes data to allocator structure.
10330 "evac_nodes": self.evac_nodes
10334 def _BuildInputData(self, fn):
10335 """Build input data structures.
10338 self._ComputeClusterData()
10341 request["type"] = self.mode
10342 self.in_data["request"] = request
10344 self.in_text = serializer.Dump(self.in_data)
10346 def Run(self, name, validate=True, call_fn=None):
10347 """Run an instance allocator and return the results.
10350 if call_fn is None:
10351 call_fn = self.rpc.call_iallocator_runner
10353 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
10354 result.Raise("Failure while running the iallocator script")
10356 self.out_text = result.payload
10358 self._ValidateResult()
10360 def _ValidateResult(self):
10361 """Process the allocator results.
10363 This will process and if successful save the result in
10364 self.out_data and the other parameters.
10368 rdict = serializer.Load(self.out_text)
10369 except Exception, err:
10370 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
10372 if not isinstance(rdict, dict):
10373 raise errors.OpExecError("Can't parse iallocator results: not a dict")
10375 # TODO: remove backwards compatiblity in later versions
10376 if "nodes" in rdict and "result" not in rdict:
10377 rdict["result"] = rdict["nodes"]
10380 for key in "success", "info", "result":
10381 if key not in rdict:
10382 raise errors.OpExecError("Can't parse iallocator results:"
10383 " missing key '%s'" % key)
10384 setattr(self, key, rdict[key])
10386 if not isinstance(rdict["result"], list):
10387 raise errors.OpExecError("Can't parse iallocator results: 'result' key"
10389 self.out_data = rdict
10392 class LUTestAllocator(NoHooksLU):
10393 """Run allocator tests.
10395 This LU runs the allocator tests
10399 ("direction", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
10400 ("mode", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_MODES)),
10401 ("name", _NoDefault, _TNonEmptyString),
10402 ("nics", _NoDefault, _TOr(_TNone, _TListOf(
10403 _TDictOf(_TElemOf(["mac", "ip", "bridge"]),
10404 _TOr(_TNone, _TNonEmptyString))))),
10405 ("disks", _NoDefault, _TOr(_TNone, _TList)),
10406 ("hypervisor", None, _TMaybeString),
10407 ("allocator", None, _TMaybeString),
10408 ("tags", _EmptyList, _TListOf(_TNonEmptyString)),
10409 ("mem_size", None, _TOr(_TNone, _TPositiveInt)),
10410 ("vcpus", None, _TOr(_TNone, _TPositiveInt)),
10411 ("os", None, _TMaybeString),
10412 ("disk_template", None, _TMaybeString),
10413 ("evac_nodes", None, _TOr(_TNone, _TListOf(_TNonEmptyString))),
10416 def CheckPrereq(self):
10417 """Check prerequisites.
10419 This checks the opcode parameters depending on the director and mode test.
10422 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10423 for attr in ["mem_size", "disks", "disk_template",
10424 "os", "tags", "nics", "vcpus"]:
10425 if not hasattr(self.op, attr):
10426 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
10427 attr, errors.ECODE_INVAL)
10428 iname = self.cfg.ExpandInstanceName(self.op.name)
10429 if iname is not None:
10430 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
10431 iname, errors.ECODE_EXISTS)
10432 if not isinstance(self.op.nics, list):
10433 raise errors.OpPrereqError("Invalid parameter 'nics'",
10434 errors.ECODE_INVAL)
10435 if not isinstance(self.op.disks, list):
10436 raise errors.OpPrereqError("Invalid parameter 'disks'",
10437 errors.ECODE_INVAL)
10438 for row in self.op.disks:
10439 if (not isinstance(row, dict) or
10440 "size" not in row or
10441 not isinstance(row["size"], int) or
10442 "mode" not in row or
10443 row["mode"] not in ['r', 'w']):
10444 raise errors.OpPrereqError("Invalid contents of the 'disks'"
10445 " parameter", errors.ECODE_INVAL)
10446 if self.op.hypervisor is None:
10447 self.op.hypervisor = self.cfg.GetHypervisorType()
10448 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10449 fname = _ExpandInstanceName(self.cfg, self.op.name)
10450 self.op.name = fname
10451 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
10452 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10453 if not hasattr(self.op, "evac_nodes"):
10454 raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
10455 " opcode input", errors.ECODE_INVAL)
10457 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
10458 self.op.mode, errors.ECODE_INVAL)
10460 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
10461 if self.op.allocator is None:
10462 raise errors.OpPrereqError("Missing allocator name",
10463 errors.ECODE_INVAL)
10464 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
10465 raise errors.OpPrereqError("Wrong allocator test '%s'" %
10466 self.op.direction, errors.ECODE_INVAL)
10468 def Exec(self, feedback_fn):
10469 """Run the allocator test.
10472 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10473 ial = IAllocator(self.cfg, self.rpc,
10476 mem_size=self.op.mem_size,
10477 disks=self.op.disks,
10478 disk_template=self.op.disk_template,
10482 vcpus=self.op.vcpus,
10483 hypervisor=self.op.hypervisor,
10485 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10486 ial = IAllocator(self.cfg, self.rpc,
10489 relocate_from=list(self.relocate_from),
10491 elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10492 ial = IAllocator(self.cfg, self.rpc,
10494 evac_nodes=self.op.evac_nodes)
10496 raise errors.ProgrammerError("Uncatched mode %s in"
10497 " LUTestAllocator.Exec", self.op.mode)
10499 if self.op.direction == constants.IALLOCATOR_DIR_IN:
10500 result = ial.in_text
10502 ial.Run(self.op.allocator, validate=False)
10503 result = ial.out_text