4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0613,W0201
36 from ganeti import ssh
37 from ganeti import utils
38 from ganeti import errors
39 from ganeti import hypervisor
40 from ganeti import locking
41 from ganeti import constants
42 from ganeti import objects
43 from ganeti import opcodes
44 from ganeti import serializer
45 from ganeti import ssconf
48 class LogicalUnit(object):
49 """Logical Unit base class.
51 Subclasses must follow these rules:
52 - implement ExpandNames
53 - implement CheckPrereq
55 - implement BuildHooksEnv
56 - redefine HPATH and HTYPE
57 - optionally redefine their run requirements:
58 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 Note that all commands require root permissions.
68 def __init__(self, processor, op, context, rpc):
69 """Constructor for LogicalUnit.
71 This needs to be overriden in derived classes in order to check op
77 self.cfg = context.cfg
78 self.context = context
80 # Dicts used to declare locking needs to mcpu
81 self.needed_locks = None
82 self.acquired_locks = {}
83 self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 self.remove_locks = {}
86 # Used to force good behavior when calling helper functions
87 self.recalculate_locks = {}
90 self.LogWarning = processor.LogWarning
91 self.LogInfo = processor.LogInfo
93 for attr_name in self._OP_REQP:
94 attr_val = getattr(op, attr_name, None)
96 raise errors.OpPrereqError("Required parameter '%s' missing" %
101 """Returns the SshRunner object
105 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
108 ssh = property(fget=__GetSSH)
110 def CheckArguments(self):
111 """Check syntactic validity for the opcode arguments.
113 This method is for doing a simple syntactic check and ensure
114 validity of opcode parameters, without any cluster-related
115 checks. While the same can be accomplished in ExpandNames and/or
116 CheckPrereq, doing these separate is better because:
118 - ExpandNames is left as as purely a lock-related function
119 - CheckPrereq is run after we have aquired locks (and possible
122 The function is allowed to change the self.op attribute so that
123 later methods can no longer worry about missing parameters.
128 def ExpandNames(self):
129 """Expand names for this LU.
131 This method is called before starting to execute the opcode, and it should
132 update all the parameters of the opcode to their canonical form (e.g. a
133 short node name must be fully expanded after this method has successfully
134 completed). This way locking, hooks, logging, ecc. can work correctly.
136 LUs which implement this method must also populate the self.needed_locks
137 member, as a dict with lock levels as keys, and a list of needed lock names
140 - use an empty dict if you don't need any lock
141 - if you don't need any lock at a particular level omit that level
142 - don't put anything for the BGL level
143 - if you want all locks at a level use locking.ALL_SET as a value
145 If you need to share locks (rather than acquire them exclusively) at one
146 level you can modify self.share_locks, setting a true value (usually 1) for
147 that level. By default locks are not shared.
151 # Acquire all nodes and one instance
152 self.needed_locks = {
153 locking.LEVEL_NODE: locking.ALL_SET,
154 locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156 # Acquire just two nodes
157 self.needed_locks = {
158 locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
161 self.needed_locks = {} # No, you can't leave it to the default value None
164 # The implementation of this method is mandatory only if the new LU is
165 # concurrent, so that old LUs don't need to be changed all at the same
168 self.needed_locks = {} # Exclusive LUs don't need locks.
170 raise NotImplementedError
172 def DeclareLocks(self, level):
173 """Declare LU locking needs for a level
175 While most LUs can just declare their locking needs at ExpandNames time,
176 sometimes there's the need to calculate some locks after having acquired
177 the ones before. This function is called just before acquiring locks at a
178 particular level, but after acquiring the ones at lower levels, and permits
179 such calculations. It can be used to modify self.needed_locks, and by
180 default it does nothing.
182 This function is only called if you have something already set in
183 self.needed_locks for the level.
185 @param level: Locking level which is going to be locked
186 @type level: member of ganeti.locking.LEVELS
190 def CheckPrereq(self):
191 """Check prerequisites for this LU.
193 This method should check that the prerequisites for the execution
194 of this LU are fulfilled. It can do internode communication, but
195 it should be idempotent - no cluster or system changes are
198 The method should raise errors.OpPrereqError in case something is
199 not fulfilled. Its return value is ignored.
201 This method should also update all the parameters of the opcode to
202 their canonical form if it hasn't been done by ExpandNames before.
205 raise NotImplementedError
207 def Exec(self, feedback_fn):
210 This method should implement the actual work. It should raise
211 errors.OpExecError for failures that are somewhat dealt with in
215 raise NotImplementedError
217 def BuildHooksEnv(self):
218 """Build hooks environment for this LU.
220 This method should return a three-node tuple consisting of: a dict
221 containing the environment that will be used for running the
222 specific hook for this LU, a list of node names on which the hook
223 should run before the execution, and a list of node names on which
224 the hook should run after the execution.
226 The keys of the dict must not have 'GANETI_' prefixed as this will
227 be handled in the hooks runner. Also note additional keys will be
228 added by the hooks runner. If the LU doesn't define any
229 environment, an empty dict (and not None) should be returned.
231 No nodes should be returned as an empty list (and not None).
233 Note that if the HPATH for a LU class is None, this function will
237 raise NotImplementedError
239 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 """Notify the LU about the results of its hooks.
242 This method is called every time a hooks phase is executed, and notifies
243 the Logical Unit about the hooks' result. The LU can then use it to alter
244 its result based on the hooks. By default the method does nothing and the
245 previous result is passed back unchanged but any LU can define it if it
246 wants to use the local cluster hook-scripts somehow.
248 @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 @param hook_results: the results of the multi-node hooks rpc call
251 @param feedback_fn: function used send feedback back to the caller
252 @param lu_result: the previous Exec result this LU had, or None
254 @return: the new Exec result, based on the previous result
260 def _ExpandAndLockInstance(self):
261 """Helper function to expand and lock an instance.
263 Many LUs that work on an instance take its name in self.op.instance_name
264 and need to expand it and then declare the expanded name for locking. This
265 function does it, and then updates self.op.instance_name to the expanded
266 name. It also initializes needed_locks as a dict, if this hasn't been done
270 if self.needed_locks is None:
271 self.needed_locks = {}
273 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 "_ExpandAndLockInstance called with instance-level locks set"
275 expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 if expanded_name is None:
277 raise errors.OpPrereqError("Instance '%s' not known" %
278 self.op.instance_name)
279 self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 self.op.instance_name = expanded_name
282 def _LockInstancesNodes(self, primary_only=False):
283 """Helper function to declare instances' nodes for locking.
285 This function should be called after locking one or more instances to lock
286 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 with all primary or secondary nodes for instances already locked and
288 present in self.needed_locks[locking.LEVEL_INSTANCE].
290 It should be called from DeclareLocks, and for safety only works if
291 self.recalculate_locks[locking.LEVEL_NODE] is set.
293 In the future it may grow parameters to just lock some instance's nodes, or
294 to just lock primaries or secondary nodes, if needed.
296 If should be called in DeclareLocks in a way similar to::
298 if level == locking.LEVEL_NODE:
299 self._LockInstancesNodes()
301 @type primary_only: boolean
302 @param primary_only: only lock primary nodes of locked instances
305 assert locking.LEVEL_NODE in self.recalculate_locks, \
306 "_LockInstancesNodes helper function called with no nodes to recalculate"
308 # TODO: check if we're really been called with the instance locks held
310 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 # future we might want to have different behaviors depending on the value
312 # of self.recalculate_locks[locking.LEVEL_NODE]
314 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 instance = self.context.cfg.GetInstanceInfo(instance_name)
316 wanted_nodes.append(instance.primary_node)
318 wanted_nodes.extend(instance.secondary_nodes)
320 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325 del self.recalculate_locks[locking.LEVEL_NODE]
328 class NoHooksLU(LogicalUnit):
329 """Simple LU which runs no hooks.
331 This LU is intended as a parent for other LogicalUnits which will
332 run no hooks, in order to reduce duplicate code.
339 def _GetWantedNodes(lu, nodes):
340 """Returns list of checked and expanded node names.
342 @type lu: L{LogicalUnit}
343 @param lu: the logical unit on whose behalf we execute
345 @param nodes: list of node names or None for all nodes
347 @return: the list of nodes, sorted
348 @raise errors.OpProgrammerError: if the nodes parameter is wrong type
351 if not isinstance(nodes, list):
352 raise errors.OpPrereqError("Invalid argument type 'nodes'")
355 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 " non-empty list of nodes whose name is to be expanded.")
360 node = lu.cfg.ExpandNodeName(name)
362 raise errors.OpPrereqError("No such node name '%s'" % name)
365 return utils.NiceSort(wanted)
368 def _GetWantedInstances(lu, instances):
369 """Returns list of checked and expanded instance names.
371 @type lu: L{LogicalUnit}
372 @param lu: the logical unit on whose behalf we execute
373 @type instances: list
374 @param instances: list of instance names or None for all instances
376 @return: the list of instances, sorted
377 @raise errors.OpPrereqError: if the instances parameter is wrong type
378 @raise errors.OpPrereqError: if any of the passed instances is not found
381 if not isinstance(instances, list):
382 raise errors.OpPrereqError("Invalid argument type 'instances'")
387 for name in instances:
388 instance = lu.cfg.ExpandInstanceName(name)
390 raise errors.OpPrereqError("No such instance name '%s'" % name)
391 wanted.append(instance)
394 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
398 def _CheckOutputFields(static, dynamic, selected):
399 """Checks whether all selected fields are valid.
401 @type static: L{utils.FieldSet}
402 @param static: static fields set
403 @type dynamic: L{utils.FieldSet}
404 @param dynamic: dynamic fields set
411 delta = f.NonMatching(selected)
413 raise errors.OpPrereqError("Unknown output fields selected: %s"
417 def _CheckBooleanOpField(op, name):
418 """Validates boolean opcode parameters.
420 This will ensure that an opcode parameter is either a boolean value,
421 or None (but that it always exists).
424 val = getattr(op, name, None)
425 if not (val is None or isinstance(val, bool)):
426 raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428 setattr(op, name, val)
431 def _CheckNodeOnline(lu, node):
432 """Ensure that a given node is online.
434 @param lu: the LU on behalf of which we make the check
435 @param node: the node to check
436 @raise errors.OpPrereqError: if the node is offline
439 if lu.cfg.GetNodeInfo(node).offline:
440 raise errors.OpPrereqError("Can't use offline node %s" % node)
443 def _CheckNodeNotDrained(lu, node):
444 """Ensure that a given node is not drained.
446 @param lu: the LU on behalf of which we make the check
447 @param node: the node to check
448 @raise errors.OpPrereqError: if the node is drained
451 if lu.cfg.GetNodeInfo(node).drained:
452 raise errors.OpPrereqError("Can't use drained node %s" % node)
455 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 memory, vcpus, nics, disk_template, disks):
457 """Builds instance related env variables for hooks
459 This builds the hook environment from individual variables.
462 @param name: the name of the instance
463 @type primary_node: string
464 @param primary_node: the name of the instance's primary node
465 @type secondary_nodes: list
466 @param secondary_nodes: list of secondary nodes as strings
467 @type os_type: string
468 @param os_type: the name of the instance's OS
469 @type status: boolean
470 @param status: the should_run status of the instance
472 @param memory: the memory size of the instance
474 @param vcpus: the count of VCPUs the instance has
476 @param nics: list of tuples (ip, bridge, mac) representing
477 the NICs the instance has
478 @type disk_template: string
479 @param disk_template: the distk template of the instance
481 @param disks: the list of (size, mode) pairs
483 @return: the hook environment for this instance
492 "INSTANCE_NAME": name,
493 "INSTANCE_PRIMARY": primary_node,
494 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
495 "INSTANCE_OS_TYPE": os_type,
496 "INSTANCE_STATUS": str_status,
497 "INSTANCE_MEMORY": memory,
498 "INSTANCE_VCPUS": vcpus,
499 "INSTANCE_DISK_TEMPLATE": disk_template,
503 nic_count = len(nics)
504 for idx, (ip, bridge, mac) in enumerate(nics):
507 env["INSTANCE_NIC%d_IP" % idx] = ip
508 env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
509 env["INSTANCE_NIC%d_MAC" % idx] = mac
513 env["INSTANCE_NIC_COUNT"] = nic_count
516 disk_count = len(disks)
517 for idx, (size, mode) in enumerate(disks):
518 env["INSTANCE_DISK%d_SIZE" % idx] = size
519 env["INSTANCE_DISK%d_MODE" % idx] = mode
523 env["INSTANCE_DISK_COUNT"] = disk_count
528 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
529 """Builds instance related env variables for hooks from an object.
531 @type lu: L{LogicalUnit}
532 @param lu: the logical unit on whose behalf we execute
533 @type instance: L{objects.Instance}
534 @param instance: the instance for which we should build the
537 @param override: dictionary with key/values that will override
540 @return: the hook environment dictionary
543 bep = lu.cfg.GetClusterInfo().FillBE(instance)
545 'name': instance.name,
546 'primary_node': instance.primary_node,
547 'secondary_nodes': instance.secondary_nodes,
548 'os_type': instance.os,
549 'status': instance.admin_up,
550 'memory': bep[constants.BE_MEMORY],
551 'vcpus': bep[constants.BE_VCPUS],
552 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
553 'disk_template': instance.disk_template,
554 'disks': [(disk.size, disk.mode) for disk in instance.disks],
557 args.update(override)
558 return _BuildInstanceHookEnv(**args)
561 def _AdjustCandidatePool(lu):
562 """Adjust the candidate pool after node operations.
565 mod_list = lu.cfg.MaintainCandidatePool()
567 lu.LogInfo("Promoted nodes to master candidate role: %s",
568 ", ".join(node.name for node in mod_list))
569 for name in mod_list:
570 lu.context.ReaddNode(name)
571 mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
573 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
577 def _CheckInstanceBridgesExist(lu, instance):
578 """Check that the brigdes needed by an instance exist.
581 # check bridges existance
582 brlist = [nic.bridge for nic in instance.nics]
583 result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
586 raise errors.OpPrereqError("One or more target bridges %s does not"
587 " exist on destination node '%s'" %
588 (brlist, instance.primary_node))
591 class LUDestroyCluster(NoHooksLU):
592 """Logical unit for destroying the cluster.
597 def CheckPrereq(self):
598 """Check prerequisites.
600 This checks whether the cluster is empty.
602 Any errors are signalled by raising errors.OpPrereqError.
605 master = self.cfg.GetMasterNode()
607 nodelist = self.cfg.GetNodeList()
608 if len(nodelist) != 1 or nodelist[0] != master:
609 raise errors.OpPrereqError("There are still %d node(s) in"
610 " this cluster." % (len(nodelist) - 1))
611 instancelist = self.cfg.GetInstanceList()
613 raise errors.OpPrereqError("There are still %d instance(s) in"
614 " this cluster." % len(instancelist))
616 def Exec(self, feedback_fn):
617 """Destroys the cluster.
620 master = self.cfg.GetMasterNode()
621 result = self.rpc.call_node_stop_master(master, False)
624 raise errors.OpExecError("Could not disable the master role")
625 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
626 utils.CreateBackup(priv_key)
627 utils.CreateBackup(pub_key)
631 class LUVerifyCluster(LogicalUnit):
632 """Verifies the cluster status.
635 HPATH = "cluster-verify"
636 HTYPE = constants.HTYPE_CLUSTER
637 _OP_REQP = ["skip_checks"]
640 def ExpandNames(self):
641 self.needed_locks = {
642 locking.LEVEL_NODE: locking.ALL_SET,
643 locking.LEVEL_INSTANCE: locking.ALL_SET,
645 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
647 def _VerifyNode(self, nodeinfo, file_list, local_cksum,
648 node_result, feedback_fn, master_files,
650 """Run multiple tests against a node.
654 - compares ganeti version
655 - checks vg existance and size > 20G
656 - checks config file checksum
657 - checks ssh to other nodes
659 @type nodeinfo: L{objects.Node}
660 @param nodeinfo: the node to check
661 @param file_list: required list of files
662 @param local_cksum: dictionary of local files and their checksums
663 @param node_result: the results from the node
664 @param feedback_fn: function used to accumulate results
665 @param master_files: list of files that only masters should have
666 @param drbd_map: the useddrbd minors for this node, in
667 form of minor: (instance, must_exist) which correspond to instances
668 and their running status
669 @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
674 # main result, node_result should be a non-empty dict
675 if not node_result or not isinstance(node_result, dict):
676 feedback_fn(" - ERROR: unable to verify node %s." % (node,))
679 # compares ganeti version
680 local_version = constants.PROTOCOL_VERSION
681 remote_version = node_result.get('version', None)
682 if not (remote_version and isinstance(remote_version, (list, tuple)) and
683 len(remote_version) == 2):
684 feedback_fn(" - ERROR: connection to %s failed" % (node))
687 if local_version != remote_version[0]:
688 feedback_fn(" - ERROR: incompatible protocol versions: master %s,"
689 " node %s %s" % (local_version, node, remote_version[0]))
692 # node seems compatible, we can actually try to look into its results
696 # full package version
697 if constants.RELEASE_VERSION != remote_version[1]:
698 feedback_fn(" - WARNING: software version mismatch: master %s,"
700 (constants.RELEASE_VERSION, node, remote_version[1]))
702 # checks vg existence and size > 20G
703 if vg_name is not None:
704 vglist = node_result.get(constants.NV_VGLIST, None)
706 feedback_fn(" - ERROR: unable to check volume groups on node %s." %
710 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
711 constants.MIN_VG_SIZE)
713 feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
716 # checks config file checksum
718 remote_cksum = node_result.get(constants.NV_FILELIST, None)
719 if not isinstance(remote_cksum, dict):
721 feedback_fn(" - ERROR: node hasn't returned file checksum data")
723 for file_name in file_list:
724 node_is_mc = nodeinfo.master_candidate
725 must_have_file = file_name not in master_files
726 if file_name not in remote_cksum:
727 if node_is_mc or must_have_file:
729 feedback_fn(" - ERROR: file '%s' missing" % file_name)
730 elif remote_cksum[file_name] != local_cksum[file_name]:
731 if node_is_mc or must_have_file:
733 feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
735 # not candidate and this is not a must-have file
737 feedback_fn(" - ERROR: non master-candidate has old/wrong file"
740 # all good, except non-master/non-must have combination
741 if not node_is_mc and not must_have_file:
742 feedback_fn(" - ERROR: file '%s' should not exist on non master"
743 " candidates" % file_name)
747 if constants.NV_NODELIST not in node_result:
749 feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
751 if node_result[constants.NV_NODELIST]:
753 for node in node_result[constants.NV_NODELIST]:
754 feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
755 (node, node_result[constants.NV_NODELIST][node]))
757 if constants.NV_NODENETTEST not in node_result:
759 feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
761 if node_result[constants.NV_NODENETTEST]:
763 nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
765 feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
766 (node, node_result[constants.NV_NODENETTEST][node]))
768 hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
769 if isinstance(hyp_result, dict):
770 for hv_name, hv_result in hyp_result.iteritems():
771 if hv_result is not None:
772 feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
773 (hv_name, hv_result))
775 # check used drbd list
776 if vg_name is not None:
777 used_minors = node_result.get(constants.NV_DRBDLIST, [])
778 if not isinstance(used_minors, (tuple, list)):
779 feedback_fn(" - ERROR: cannot parse drbd status file: %s" %
782 for minor, (iname, must_exist) in drbd_map.items():
783 if minor not in used_minors and must_exist:
784 feedback_fn(" - ERROR: drbd minor %d of instance %s is"
785 " not active" % (minor, iname))
787 for minor in used_minors:
788 if minor not in drbd_map:
789 feedback_fn(" - ERROR: unallocated drbd minor %d is in use" %
795 def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
796 node_instance, feedback_fn, n_offline):
797 """Verify an instance.
799 This function checks to see if the required block devices are
800 available on the instance's node.
805 node_current = instanceconfig.primary_node
808 instanceconfig.MapLVsByNode(node_vol_should)
810 for node in node_vol_should:
811 if node in n_offline:
812 # ignore missing volumes on offline nodes
814 for volume in node_vol_should[node]:
815 if node not in node_vol_is or volume not in node_vol_is[node]:
816 feedback_fn(" - ERROR: volume %s missing on node %s" %
820 if instanceconfig.admin_up:
821 if ((node_current not in node_instance or
822 not instance in node_instance[node_current]) and
823 node_current not in n_offline):
824 feedback_fn(" - ERROR: instance %s not running on node %s" %
825 (instance, node_current))
828 for node in node_instance:
829 if (not node == node_current):
830 if instance in node_instance[node]:
831 feedback_fn(" - ERROR: instance %s should not run on node %s" %
837 def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
838 """Verify if there are any unknown volumes in the cluster.
840 The .os, .swap and backup volumes are ignored. All other volumes are
846 for node in node_vol_is:
847 for volume in node_vol_is[node]:
848 if node not in node_vol_should or volume not in node_vol_should[node]:
849 feedback_fn(" - ERROR: volume %s on node %s should not exist" %
854 def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
855 """Verify the list of running instances.
857 This checks what instances are running but unknown to the cluster.
861 for node in node_instance:
862 for runninginstance in node_instance[node]:
863 if runninginstance not in instancelist:
864 feedback_fn(" - ERROR: instance %s on node %s should not exist" %
865 (runninginstance, node))
869 def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
870 """Verify N+1 Memory Resilience.
872 Check that if one single node dies we can still start all the instances it
878 for node, nodeinfo in node_info.iteritems():
879 # This code checks that every node which is now listed as secondary has
880 # enough memory to host all instances it is supposed to should a single
881 # other node in the cluster fail.
882 # FIXME: not ready for failover to an arbitrary node
883 # FIXME: does not support file-backed instances
884 # WARNING: we currently take into account down instances as well as up
885 # ones, considering that even if they're down someone might want to start
886 # them even in the event of a node failure.
887 for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
889 for instance in instances:
890 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
891 if bep[constants.BE_AUTO_BALANCE]:
892 needed_mem += bep[constants.BE_MEMORY]
893 if nodeinfo['mfree'] < needed_mem:
894 feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
895 " failovers should node %s fail" % (node, prinode))
899 def CheckPrereq(self):
900 """Check prerequisites.
902 Transform the list of checks we're going to skip into a set and check that
903 all its members are valid.
906 self.skip_set = frozenset(self.op.skip_checks)
907 if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
908 raise errors.OpPrereqError("Invalid checks to be skipped specified")
910 def BuildHooksEnv(self):
913 Cluster-Verify hooks just rone in the post phase and their failure makes
914 the output be logged in the verify output and the verification to fail.
917 all_nodes = self.cfg.GetNodeList()
919 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
921 for node in self.cfg.GetAllNodesInfo().values():
922 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
924 return env, [], all_nodes
926 def Exec(self, feedback_fn):
927 """Verify integrity of cluster, performing various test on nodes.
931 feedback_fn("* Verifying global settings")
932 for msg in self.cfg.VerifyConfig():
933 feedback_fn(" - ERROR: %s" % msg)
935 vg_name = self.cfg.GetVGName()
936 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
937 nodelist = utils.NiceSort(self.cfg.GetNodeList())
938 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
939 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
940 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
941 for iname in instancelist)
942 i_non_redundant = [] # Non redundant instances
943 i_non_a_balanced = [] # Non auto-balanced instances
944 n_offline = [] # List of offline nodes
945 n_drained = [] # List of nodes being drained
951 # FIXME: verify OS list
953 master_files = [constants.CLUSTER_CONF_FILE]
955 file_names = ssconf.SimpleStore().GetFileList()
956 file_names.append(constants.SSL_CERT_FILE)
957 file_names.append(constants.RAPI_CERT_FILE)
958 file_names.extend(master_files)
960 local_checksums = utils.FingerprintFiles(file_names)
962 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
963 node_verify_param = {
964 constants.NV_FILELIST: file_names,
965 constants.NV_NODELIST: [node.name for node in nodeinfo
966 if not node.offline],
967 constants.NV_HYPERVISOR: hypervisors,
968 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
969 node.secondary_ip) for node in nodeinfo
970 if not node.offline],
971 constants.NV_INSTANCELIST: hypervisors,
972 constants.NV_VERSION: None,
973 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
975 if vg_name is not None:
976 node_verify_param[constants.NV_VGLIST] = None
977 node_verify_param[constants.NV_LVLIST] = vg_name
978 node_verify_param[constants.NV_DRBDLIST] = None
979 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
980 self.cfg.GetClusterName())
982 cluster = self.cfg.GetClusterInfo()
983 master_node = self.cfg.GetMasterNode()
984 all_drbd_map = self.cfg.ComputeDRBDMap()
986 for node_i in nodeinfo:
988 nresult = all_nvinfo[node].data
991 feedback_fn("* Skipping offline node %s" % (node,))
992 n_offline.append(node)
995 if node == master_node:
997 elif node_i.master_candidate:
998 ntype = "master candidate"
1001 n_drained.append(node)
1004 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1006 if all_nvinfo[node].failed or not isinstance(nresult, dict):
1007 feedback_fn(" - ERROR: connection to %s failed" % (node,))
1012 for minor, instance in all_drbd_map[node].items():
1013 if instance not in instanceinfo:
1014 feedback_fn(" - ERROR: ghost instance '%s' in temporary DRBD map" %
1016 # ghost instance should not be running, but otherwise we
1017 # don't give double warnings (both ghost instance and
1018 # unallocated minor in use)
1019 node_drbd[minor] = (instance, False)
1021 instance = instanceinfo[instance]
1022 node_drbd[minor] = (instance.name, instance.admin_up)
1023 result = self._VerifyNode(node_i, file_names, local_checksums,
1024 nresult, feedback_fn, master_files,
1028 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1030 node_volume[node] = {}
1031 elif isinstance(lvdata, basestring):
1032 feedback_fn(" - ERROR: LVM problem on node %s: %s" %
1033 (node, utils.SafeEncode(lvdata)))
1035 node_volume[node] = {}
1036 elif not isinstance(lvdata, dict):
1037 feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,))
1041 node_volume[node] = lvdata
1044 idata = nresult.get(constants.NV_INSTANCELIST, None)
1045 if not isinstance(idata, list):
1046 feedback_fn(" - ERROR: connection to %s failed (instancelist)" %
1051 node_instance[node] = idata
1054 nodeinfo = nresult.get(constants.NV_HVINFO, None)
1055 if not isinstance(nodeinfo, dict):
1056 feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,))
1062 "mfree": int(nodeinfo['memory_free']),
1065 # dictionary holding all instances this node is secondary for,
1066 # grouped by their primary node. Each key is a cluster node, and each
1067 # value is a list of instances which have the key as primary and the
1068 # current node as secondary. this is handy to calculate N+1 memory
1069 # availability if you can only failover from a primary to its
1071 "sinst-by-pnode": {},
1073 # FIXME: devise a free space model for file based instances as well
1074 if vg_name is not None:
1075 if (constants.NV_VGLIST not in nresult or
1076 vg_name not in nresult[constants.NV_VGLIST]):
1077 feedback_fn(" - ERROR: node %s didn't return data for the"
1078 " volume group '%s' - it is either missing or broken" %
1082 node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1083 except (ValueError, KeyError):
1084 feedback_fn(" - ERROR: invalid nodeinfo value returned"
1085 " from node %s" % (node,))
1089 node_vol_should = {}
1091 for instance in instancelist:
1092 feedback_fn("* Verifying instance %s" % instance)
1093 inst_config = instanceinfo[instance]
1094 result = self._VerifyInstance(instance, inst_config, node_volume,
1095 node_instance, feedback_fn, n_offline)
1097 inst_nodes_offline = []
1099 inst_config.MapLVsByNode(node_vol_should)
1101 instance_cfg[instance] = inst_config
1103 pnode = inst_config.primary_node
1104 if pnode in node_info:
1105 node_info[pnode]['pinst'].append(instance)
1106 elif pnode not in n_offline:
1107 feedback_fn(" - ERROR: instance %s, connection to primary node"
1108 " %s failed" % (instance, pnode))
1111 if pnode in n_offline:
1112 inst_nodes_offline.append(pnode)
1114 # If the instance is non-redundant we cannot survive losing its primary
1115 # node, so we are not N+1 compliant. On the other hand we have no disk
1116 # templates with more than one secondary so that situation is not well
1118 # FIXME: does not support file-backed instances
1119 if len(inst_config.secondary_nodes) == 0:
1120 i_non_redundant.append(instance)
1121 elif len(inst_config.secondary_nodes) > 1:
1122 feedback_fn(" - WARNING: multiple secondaries for instance %s"
1125 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1126 i_non_a_balanced.append(instance)
1128 for snode in inst_config.secondary_nodes:
1129 if snode in node_info:
1130 node_info[snode]['sinst'].append(instance)
1131 if pnode not in node_info[snode]['sinst-by-pnode']:
1132 node_info[snode]['sinst-by-pnode'][pnode] = []
1133 node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1134 elif snode not in n_offline:
1135 feedback_fn(" - ERROR: instance %s, connection to secondary node"
1136 " %s failed" % (instance, snode))
1138 if snode in n_offline:
1139 inst_nodes_offline.append(snode)
1141 if inst_nodes_offline:
1142 # warn that the instance lives on offline nodes, and set bad=True
1143 feedback_fn(" - ERROR: instance lives on offline node(s) %s" %
1144 ", ".join(inst_nodes_offline))
1147 feedback_fn("* Verifying orphan volumes")
1148 result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1152 feedback_fn("* Verifying remaining instances")
1153 result = self._VerifyOrphanInstances(instancelist, node_instance,
1157 if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1158 feedback_fn("* Verifying N+1 Memory redundancy")
1159 result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1162 feedback_fn("* Other Notes")
1164 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
1165 % len(i_non_redundant))
1167 if i_non_a_balanced:
1168 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
1169 % len(i_non_a_balanced))
1172 feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline))
1175 feedback_fn(" - NOTICE: %d drained node(s) found." % len(n_drained))
1179 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1180 """Analize the post-hooks' result
1182 This method analyses the hook result, handles it, and sends some
1183 nicely-formatted feedback back to the user.
1185 @param phase: one of L{constants.HOOKS_PHASE_POST} or
1186 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1187 @param hooks_results: the results of the multi-node hooks rpc call
1188 @param feedback_fn: function used send feedback back to the caller
1189 @param lu_result: previous Exec result
1190 @return: the new Exec result, based on the previous result
1194 # We only really run POST phase hooks, and are only interested in
1196 if phase == constants.HOOKS_PHASE_POST:
1197 # Used to change hooks' output to proper indentation
1198 indent_re = re.compile('^', re.M)
1199 feedback_fn("* Hooks Results")
1200 if not hooks_results:
1201 feedback_fn(" - ERROR: general communication failure")
1204 for node_name in hooks_results:
1205 show_node_header = True
1206 res = hooks_results[node_name]
1207 if res.failed or res.data is False or not isinstance(res.data, list):
1209 # no need to warn or set fail return value
1211 feedback_fn(" Communication failure in hooks execution")
1214 for script, hkr, output in res.data:
1215 if hkr == constants.HKR_FAIL:
1216 # The node header is only shown once, if there are
1217 # failing hooks on that node
1218 if show_node_header:
1219 feedback_fn(" Node %s:" % node_name)
1220 show_node_header = False
1221 feedback_fn(" ERROR: Script %s failed, output:" % script)
1222 output = indent_re.sub(' ', output)
1223 feedback_fn("%s" % output)
1229 class LUVerifyDisks(NoHooksLU):
1230 """Verifies the cluster disks status.
1236 def ExpandNames(self):
1237 self.needed_locks = {
1238 locking.LEVEL_NODE: locking.ALL_SET,
1239 locking.LEVEL_INSTANCE: locking.ALL_SET,
1241 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1243 def CheckPrereq(self):
1244 """Check prerequisites.
1246 This has no prerequisites.
1251 def Exec(self, feedback_fn):
1252 """Verify integrity of cluster disks.
1255 result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1257 vg_name = self.cfg.GetVGName()
1258 nodes = utils.NiceSort(self.cfg.GetNodeList())
1259 instances = [self.cfg.GetInstanceInfo(name)
1260 for name in self.cfg.GetInstanceList()]
1263 for inst in instances:
1265 if (not inst.admin_up or
1266 inst.disk_template not in constants.DTS_NET_MIRROR):
1268 inst.MapLVsByNode(inst_lvs)
1269 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1270 for node, vol_list in inst_lvs.iteritems():
1271 for vol in vol_list:
1272 nv_dict[(node, vol)] = inst
1277 node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1282 lvs = node_lvs[node]
1285 self.LogWarning("Connection to node %s failed: %s" %
1289 if isinstance(lvs, basestring):
1290 logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1291 res_nlvm[node] = lvs
1293 elif not isinstance(lvs, dict):
1294 logging.warning("Connection to node %s failed or invalid data"
1296 res_nodes.append(node)
1299 for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1300 inst = nv_dict.pop((node, lv_name), None)
1301 if (not lv_online and inst is not None
1302 and inst.name not in res_instances):
1303 res_instances.append(inst.name)
1305 # any leftover items in nv_dict are missing LVs, let's arrange the
1307 for key, inst in nv_dict.iteritems():
1308 if inst.name not in res_missing:
1309 res_missing[inst.name] = []
1310 res_missing[inst.name].append(key)
1315 class LURenameCluster(LogicalUnit):
1316 """Rename the cluster.
1319 HPATH = "cluster-rename"
1320 HTYPE = constants.HTYPE_CLUSTER
1323 def BuildHooksEnv(self):
1328 "OP_TARGET": self.cfg.GetClusterName(),
1329 "NEW_NAME": self.op.name,
1331 mn = self.cfg.GetMasterNode()
1332 return env, [mn], [mn]
1334 def CheckPrereq(self):
1335 """Verify that the passed name is a valid one.
1338 hostname = utils.HostInfo(self.op.name)
1340 new_name = hostname.name
1341 self.ip = new_ip = hostname.ip
1342 old_name = self.cfg.GetClusterName()
1343 old_ip = self.cfg.GetMasterIP()
1344 if new_name == old_name and new_ip == old_ip:
1345 raise errors.OpPrereqError("Neither the name nor the IP address of the"
1346 " cluster has changed")
1347 if new_ip != old_ip:
1348 if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1349 raise errors.OpPrereqError("The given cluster IP address (%s) is"
1350 " reachable on the network. Aborting." %
1353 self.op.name = new_name
1355 def Exec(self, feedback_fn):
1356 """Rename the cluster.
1359 clustername = self.op.name
1362 # shutdown the master IP
1363 master = self.cfg.GetMasterNode()
1364 result = self.rpc.call_node_stop_master(master, False)
1365 if result.failed or not result.data:
1366 raise errors.OpExecError("Could not disable the master role")
1369 cluster = self.cfg.GetClusterInfo()
1370 cluster.cluster_name = clustername
1371 cluster.master_ip = ip
1372 self.cfg.Update(cluster)
1374 # update the known hosts file
1375 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1376 node_list = self.cfg.GetNodeList()
1378 node_list.remove(master)
1381 result = self.rpc.call_upload_file(node_list,
1382 constants.SSH_KNOWN_HOSTS_FILE)
1383 for to_node, to_result in result.iteritems():
1384 msg = to_result.RemoteFailMsg()
1386 msg = ("Copy of file %s to node %s failed: %s" %
1387 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1388 self.proc.LogWarning(msg)
1391 result = self.rpc.call_node_start_master(master, False)
1392 if result.failed or not result.data:
1393 self.LogWarning("Could not re-enable the master role on"
1394 " the master, please restart manually.")
1397 def _RecursiveCheckIfLVMBased(disk):
1398 """Check if the given disk or its children are lvm-based.
1400 @type disk: L{objects.Disk}
1401 @param disk: the disk to check
1403 @return: boolean indicating whether a LD_LV dev_type was found or not
1407 for chdisk in disk.children:
1408 if _RecursiveCheckIfLVMBased(chdisk):
1410 return disk.dev_type == constants.LD_LV
1413 class LUSetClusterParams(LogicalUnit):
1414 """Change the parameters of the cluster.
1417 HPATH = "cluster-modify"
1418 HTYPE = constants.HTYPE_CLUSTER
1422 def CheckArguments(self):
1426 if not hasattr(self.op, "candidate_pool_size"):
1427 self.op.candidate_pool_size = None
1428 if self.op.candidate_pool_size is not None:
1430 self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1431 except (ValueError, TypeError), err:
1432 raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1434 if self.op.candidate_pool_size < 1:
1435 raise errors.OpPrereqError("At least one master candidate needed")
1437 def ExpandNames(self):
1438 # FIXME: in the future maybe other cluster params won't require checking on
1439 # all nodes to be modified.
1440 self.needed_locks = {
1441 locking.LEVEL_NODE: locking.ALL_SET,
1443 self.share_locks[locking.LEVEL_NODE] = 1
1445 def BuildHooksEnv(self):
1450 "OP_TARGET": self.cfg.GetClusterName(),
1451 "NEW_VG_NAME": self.op.vg_name,
1453 mn = self.cfg.GetMasterNode()
1454 return env, [mn], [mn]
1456 def CheckPrereq(self):
1457 """Check prerequisites.
1459 This checks whether the given params don't conflict and
1460 if the given volume group is valid.
1463 if self.op.vg_name is not None and not self.op.vg_name:
1464 instances = self.cfg.GetAllInstancesInfo().values()
1465 for inst in instances:
1466 for disk in inst.disks:
1467 if _RecursiveCheckIfLVMBased(disk):
1468 raise errors.OpPrereqError("Cannot disable lvm storage while"
1469 " lvm-based instances exist")
1471 node_list = self.acquired_locks[locking.LEVEL_NODE]
1473 # if vg_name not None, checks given volume group on all nodes
1475 vglist = self.rpc.call_vg_list(node_list)
1476 for node in node_list:
1477 if vglist[node].failed:
1478 # ignoring down node
1479 self.LogWarning("Node %s unreachable/error, ignoring" % node)
1481 vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1483 constants.MIN_VG_SIZE)
1485 raise errors.OpPrereqError("Error on node '%s': %s" %
1488 self.cluster = cluster = self.cfg.GetClusterInfo()
1489 # validate beparams changes
1490 if self.op.beparams:
1491 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1492 self.new_beparams = cluster.FillDict(
1493 cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1495 # hypervisor list/parameters
1496 self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1497 if self.op.hvparams:
1498 if not isinstance(self.op.hvparams, dict):
1499 raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1500 for hv_name, hv_dict in self.op.hvparams.items():
1501 if hv_name not in self.new_hvparams:
1502 self.new_hvparams[hv_name] = hv_dict
1504 self.new_hvparams[hv_name].update(hv_dict)
1506 if self.op.enabled_hypervisors is not None:
1507 self.hv_list = self.op.enabled_hypervisors
1509 self.hv_list = cluster.enabled_hypervisors
1511 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1512 # either the enabled list has changed, or the parameters have, validate
1513 for hv_name, hv_params in self.new_hvparams.items():
1514 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1515 (self.op.enabled_hypervisors and
1516 hv_name in self.op.enabled_hypervisors)):
1517 # either this is a new hypervisor, or its parameters have changed
1518 hv_class = hypervisor.GetHypervisor(hv_name)
1519 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1520 hv_class.CheckParameterSyntax(hv_params)
1521 _CheckHVParams(self, node_list, hv_name, hv_params)
1523 def Exec(self, feedback_fn):
1524 """Change the parameters of the cluster.
1527 if self.op.vg_name is not None:
1528 new_volume = self.op.vg_name
1531 if new_volume != self.cfg.GetVGName():
1532 self.cfg.SetVGName(new_volume)
1534 feedback_fn("Cluster LVM configuration already in desired"
1535 " state, not changing")
1536 if self.op.hvparams:
1537 self.cluster.hvparams = self.new_hvparams
1538 if self.op.enabled_hypervisors is not None:
1539 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1540 if self.op.beparams:
1541 self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1542 if self.op.candidate_pool_size is not None:
1543 self.cluster.candidate_pool_size = self.op.candidate_pool_size
1545 self.cfg.Update(self.cluster)
1547 # we want to update nodes after the cluster so that if any errors
1548 # happen, we have recorded and saved the cluster info
1549 if self.op.candidate_pool_size is not None:
1550 _AdjustCandidatePool(self)
1553 def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1554 """Distribute additional files which are part of the cluster configuration.
1556 ConfigWriter takes care of distributing the config and ssconf files, but
1557 there are more files which should be distributed to all nodes. This function
1558 makes sure those are copied.
1560 @param lu: calling logical unit
1561 @param additional_nodes: list of nodes not in the config to distribute to
1564 # 1. Gather target nodes
1565 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1566 dist_nodes = lu.cfg.GetNodeList()
1567 if additional_nodes is not None:
1568 dist_nodes.extend(additional_nodes)
1569 if myself.name in dist_nodes:
1570 dist_nodes.remove(myself.name)
1571 # 2. Gather files to distribute
1572 dist_files = set([constants.ETC_HOSTS,
1573 constants.SSH_KNOWN_HOSTS_FILE,
1574 constants.RAPI_CERT_FILE,
1575 constants.RAPI_USERS_FILE,
1578 enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1579 for hv_name in enabled_hypervisors:
1580 hv_class = hypervisor.GetHypervisor(hv_name)
1581 dist_files.update(hv_class.GetAncillaryFiles())
1583 # 3. Perform the files upload
1584 for fname in dist_files:
1585 if os.path.exists(fname):
1586 result = lu.rpc.call_upload_file(dist_nodes, fname)
1587 for to_node, to_result in result.items():
1588 msg = to_result.RemoteFailMsg()
1590 msg = ("Copy of file %s to node %s failed: %s" %
1591 (fname, to_node, msg))
1592 lu.proc.LogWarning(msg)
1595 class LURedistributeConfig(NoHooksLU):
1596 """Force the redistribution of cluster configuration.
1598 This is a very simple LU.
1604 def ExpandNames(self):
1605 self.needed_locks = {
1606 locking.LEVEL_NODE: locking.ALL_SET,
1608 self.share_locks[locking.LEVEL_NODE] = 1
1610 def CheckPrereq(self):
1611 """Check prerequisites.
1615 def Exec(self, feedback_fn):
1616 """Redistribute the configuration.
1619 self.cfg.Update(self.cfg.GetClusterInfo())
1620 _RedistributeAncillaryFiles(self)
1623 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1624 """Sleep and poll for an instance's disk to sync.
1627 if not instance.disks:
1631 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1633 node = instance.primary_node
1635 for dev in instance.disks:
1636 lu.cfg.SetDiskID(dev, node)
1642 cumul_degraded = False
1643 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1644 if rstats.failed or not rstats.data:
1645 lu.LogWarning("Can't get any data from node %s", node)
1648 raise errors.RemoteError("Can't contact node %s for mirror data,"
1649 " aborting." % node)
1652 rstats = rstats.data
1654 for i, mstat in enumerate(rstats):
1656 lu.LogWarning("Can't compute data for node %s/%s",
1657 node, instance.disks[i].iv_name)
1659 # we ignore the ldisk parameter
1660 perc_done, est_time, is_degraded, _ = mstat
1661 cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1662 if perc_done is not None:
1664 if est_time is not None:
1665 rem_time = "%d estimated seconds remaining" % est_time
1668 rem_time = "no time estimate"
1669 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1670 (instance.disks[i].iv_name, perc_done, rem_time))
1674 time.sleep(min(60, max_time))
1677 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1678 return not cumul_degraded
1681 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1682 """Check that mirrors are not degraded.
1684 The ldisk parameter, if True, will change the test from the
1685 is_degraded attribute (which represents overall non-ok status for
1686 the device(s)) to the ldisk (representing the local storage status).
1689 lu.cfg.SetDiskID(dev, node)
1696 if on_primary or dev.AssembleOnSecondary():
1697 rstats = lu.rpc.call_blockdev_find(node, dev)
1698 msg = rstats.RemoteFailMsg()
1700 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1702 elif not rstats.payload:
1703 lu.LogWarning("Can't find disk on node %s", node)
1706 result = result and (not rstats.payload[idx])
1708 for child in dev.children:
1709 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1714 class LUDiagnoseOS(NoHooksLU):
1715 """Logical unit for OS diagnose/query.
1718 _OP_REQP = ["output_fields", "names"]
1720 _FIELDS_STATIC = utils.FieldSet()
1721 _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1723 def ExpandNames(self):
1725 raise errors.OpPrereqError("Selective OS query not supported")
1727 _CheckOutputFields(static=self._FIELDS_STATIC,
1728 dynamic=self._FIELDS_DYNAMIC,
1729 selected=self.op.output_fields)
1731 # Lock all nodes, in shared mode
1732 # Temporary removal of locks, should be reverted later
1733 # TODO: reintroduce locks when they are lighter-weight
1734 self.needed_locks = {}
1735 #self.share_locks[locking.LEVEL_NODE] = 1
1736 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1738 def CheckPrereq(self):
1739 """Check prerequisites.
1744 def _DiagnoseByOS(node_list, rlist):
1745 """Remaps a per-node return list into an a per-os per-node dictionary
1747 @param node_list: a list with the names of all nodes
1748 @param rlist: a map with node names as keys and OS objects as values
1751 @return: a dictionary with osnames as keys and as value another map, with
1752 nodes as keys and list of OS objects as values, eg::
1754 {"debian-etch": {"node1": [<object>,...],
1755 "node2": [<object>,]}
1760 # we build here the list of nodes that didn't fail the RPC (at RPC
1761 # level), so that nodes with a non-responding node daemon don't
1762 # make all OSes invalid
1763 good_nodes = [node_name for node_name in rlist
1764 if not rlist[node_name].failed]
1765 for node_name, nr in rlist.iteritems():
1766 if nr.failed or not nr.data:
1768 for os_obj in nr.data:
1769 if os_obj.name not in all_os:
1770 # build a list of nodes for this os containing empty lists
1771 # for each node in node_list
1772 all_os[os_obj.name] = {}
1773 for nname in good_nodes:
1774 all_os[os_obj.name][nname] = []
1775 all_os[os_obj.name][node_name].append(os_obj)
1778 def Exec(self, feedback_fn):
1779 """Compute the list of OSes.
1782 valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1783 node_data = self.rpc.call_os_diagnose(valid_nodes)
1784 if node_data == False:
1785 raise errors.OpExecError("Can't gather the list of OSes")
1786 pol = self._DiagnoseByOS(valid_nodes, node_data)
1788 for os_name, os_data in pol.iteritems():
1790 for field in self.op.output_fields:
1793 elif field == "valid":
1794 val = utils.all([osl and osl[0] for osl in os_data.values()])
1795 elif field == "node_status":
1797 for node_name, nos_list in os_data.iteritems():
1798 val[node_name] = [(v.status, v.path) for v in nos_list]
1800 raise errors.ParameterError(field)
1807 class LURemoveNode(LogicalUnit):
1808 """Logical unit for removing a node.
1811 HPATH = "node-remove"
1812 HTYPE = constants.HTYPE_NODE
1813 _OP_REQP = ["node_name"]
1815 def BuildHooksEnv(self):
1818 This doesn't run on the target node in the pre phase as a failed
1819 node would then be impossible to remove.
1823 "OP_TARGET": self.op.node_name,
1824 "NODE_NAME": self.op.node_name,
1826 all_nodes = self.cfg.GetNodeList()
1827 all_nodes.remove(self.op.node_name)
1828 return env, all_nodes, all_nodes
1830 def CheckPrereq(self):
1831 """Check prerequisites.
1834 - the node exists in the configuration
1835 - it does not have primary or secondary instances
1836 - it's not the master
1838 Any errors are signalled by raising errors.OpPrereqError.
1841 node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1843 raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1845 instance_list = self.cfg.GetInstanceList()
1847 masternode = self.cfg.GetMasterNode()
1848 if node.name == masternode:
1849 raise errors.OpPrereqError("Node is the master node,"
1850 " you need to failover first.")
1852 for instance_name in instance_list:
1853 instance = self.cfg.GetInstanceInfo(instance_name)
1854 if node.name in instance.all_nodes:
1855 raise errors.OpPrereqError("Instance %s is still running on the node,"
1856 " please remove first." % instance_name)
1857 self.op.node_name = node.name
1860 def Exec(self, feedback_fn):
1861 """Removes the node from the cluster.
1865 logging.info("Stopping the node daemon and removing configs from node %s",
1868 self.context.RemoveNode(node.name)
1870 self.rpc.call_node_leave_cluster(node.name)
1872 # Promote nodes to master candidate as needed
1873 _AdjustCandidatePool(self)
1876 class LUQueryNodes(NoHooksLU):
1877 """Logical unit for querying nodes.
1880 _OP_REQP = ["output_fields", "names", "use_locking"]
1882 _FIELDS_DYNAMIC = utils.FieldSet(
1884 "mtotal", "mnode", "mfree",
1886 "ctotal", "cnodes", "csockets",
1889 _FIELDS_STATIC = utils.FieldSet(
1890 "name", "pinst_cnt", "sinst_cnt",
1891 "pinst_list", "sinst_list",
1892 "pip", "sip", "tags",
1900 def ExpandNames(self):
1901 _CheckOutputFields(static=self._FIELDS_STATIC,
1902 dynamic=self._FIELDS_DYNAMIC,
1903 selected=self.op.output_fields)
1905 self.needed_locks = {}
1906 self.share_locks[locking.LEVEL_NODE] = 1
1909 self.wanted = _GetWantedNodes(self, self.op.names)
1911 self.wanted = locking.ALL_SET
1913 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1914 self.do_locking = self.do_node_query and self.op.use_locking
1916 # if we don't request only static fields, we need to lock the nodes
1917 self.needed_locks[locking.LEVEL_NODE] = self.wanted
1920 def CheckPrereq(self):
1921 """Check prerequisites.
1924 # The validation of the node list is done in the _GetWantedNodes,
1925 # if non empty, and if empty, there's no validation to do
1928 def Exec(self, feedback_fn):
1929 """Computes the list of nodes and their attributes.
1932 all_info = self.cfg.GetAllNodesInfo()
1934 nodenames = self.acquired_locks[locking.LEVEL_NODE]
1935 elif self.wanted != locking.ALL_SET:
1936 nodenames = self.wanted
1937 missing = set(nodenames).difference(all_info.keys())
1939 raise errors.OpExecError(
1940 "Some nodes were removed before retrieving their data: %s" % missing)
1942 nodenames = all_info.keys()
1944 nodenames = utils.NiceSort(nodenames)
1945 nodelist = [all_info[name] for name in nodenames]
1947 # begin data gathering
1949 if self.do_node_query:
1951 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1952 self.cfg.GetHypervisorType())
1953 for name in nodenames:
1954 nodeinfo = node_data[name]
1955 if not nodeinfo.failed and nodeinfo.data:
1956 nodeinfo = nodeinfo.data
1957 fn = utils.TryConvert
1959 "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1960 "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1961 "mfree": fn(int, nodeinfo.get('memory_free', None)),
1962 "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1963 "dfree": fn(int, nodeinfo.get('vg_free', None)),
1964 "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1965 "bootid": nodeinfo.get('bootid', None),
1966 "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1967 "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1970 live_data[name] = {}
1972 live_data = dict.fromkeys(nodenames, {})
1974 node_to_primary = dict([(name, set()) for name in nodenames])
1975 node_to_secondary = dict([(name, set()) for name in nodenames])
1977 inst_fields = frozenset(("pinst_cnt", "pinst_list",
1978 "sinst_cnt", "sinst_list"))
1979 if inst_fields & frozenset(self.op.output_fields):
1980 instancelist = self.cfg.GetInstanceList()
1982 for instance_name in instancelist:
1983 inst = self.cfg.GetInstanceInfo(instance_name)
1984 if inst.primary_node in node_to_primary:
1985 node_to_primary[inst.primary_node].add(inst.name)
1986 for secnode in inst.secondary_nodes:
1987 if secnode in node_to_secondary:
1988 node_to_secondary[secnode].add(inst.name)
1990 master_node = self.cfg.GetMasterNode()
1992 # end data gathering
1995 for node in nodelist:
1997 for field in self.op.output_fields:
2000 elif field == "pinst_list":
2001 val = list(node_to_primary[node.name])
2002 elif field == "sinst_list":
2003 val = list(node_to_secondary[node.name])
2004 elif field == "pinst_cnt":
2005 val = len(node_to_primary[node.name])
2006 elif field == "sinst_cnt":
2007 val = len(node_to_secondary[node.name])
2008 elif field == "pip":
2009 val = node.primary_ip
2010 elif field == "sip":
2011 val = node.secondary_ip
2012 elif field == "tags":
2013 val = list(node.GetTags())
2014 elif field == "serial_no":
2015 val = node.serial_no
2016 elif field == "master_candidate":
2017 val = node.master_candidate
2018 elif field == "master":
2019 val = node.name == master_node
2020 elif field == "offline":
2022 elif field == "drained":
2024 elif self._FIELDS_DYNAMIC.Matches(field):
2025 val = live_data[node.name].get(field, None)
2027 raise errors.ParameterError(field)
2028 node_output.append(val)
2029 output.append(node_output)
2034 class LUQueryNodeVolumes(NoHooksLU):
2035 """Logical unit for getting volumes on node(s).
2038 _OP_REQP = ["nodes", "output_fields"]
2040 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2041 _FIELDS_STATIC = utils.FieldSet("node")
2043 def ExpandNames(self):
2044 _CheckOutputFields(static=self._FIELDS_STATIC,
2045 dynamic=self._FIELDS_DYNAMIC,
2046 selected=self.op.output_fields)
2048 self.needed_locks = {}
2049 self.share_locks[locking.LEVEL_NODE] = 1
2050 if not self.op.nodes:
2051 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2053 self.needed_locks[locking.LEVEL_NODE] = \
2054 _GetWantedNodes(self, self.op.nodes)
2056 def CheckPrereq(self):
2057 """Check prerequisites.
2059 This checks that the fields required are valid output fields.
2062 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2064 def Exec(self, feedback_fn):
2065 """Computes the list of nodes and their attributes.
2068 nodenames = self.nodes
2069 volumes = self.rpc.call_node_volumes(nodenames)
2071 ilist = [self.cfg.GetInstanceInfo(iname) for iname
2072 in self.cfg.GetInstanceList()]
2074 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2077 for node in nodenames:
2078 if node not in volumes or volumes[node].failed or not volumes[node].data:
2081 node_vols = volumes[node].data[:]
2082 node_vols.sort(key=lambda vol: vol['dev'])
2084 for vol in node_vols:
2086 for field in self.op.output_fields:
2089 elif field == "phys":
2093 elif field == "name":
2095 elif field == "size":
2096 val = int(float(vol['size']))
2097 elif field == "instance":
2099 if node not in lv_by_node[inst]:
2101 if vol['name'] in lv_by_node[inst][node]:
2107 raise errors.ParameterError(field)
2108 node_output.append(str(val))
2110 output.append(node_output)
2115 class LUAddNode(LogicalUnit):
2116 """Logical unit for adding node to the cluster.
2120 HTYPE = constants.HTYPE_NODE
2121 _OP_REQP = ["node_name"]
2123 def BuildHooksEnv(self):
2126 This will run on all nodes before, and on all nodes + the new node after.
2130 "OP_TARGET": self.op.node_name,
2131 "NODE_NAME": self.op.node_name,
2132 "NODE_PIP": self.op.primary_ip,
2133 "NODE_SIP": self.op.secondary_ip,
2135 nodes_0 = self.cfg.GetNodeList()
2136 nodes_1 = nodes_0 + [self.op.node_name, ]
2137 return env, nodes_0, nodes_1
2139 def CheckPrereq(self):
2140 """Check prerequisites.
2143 - the new node is not already in the config
2145 - its parameters (single/dual homed) matches the cluster
2147 Any errors are signalled by raising errors.OpPrereqError.
2150 node_name = self.op.node_name
2153 dns_data = utils.HostInfo(node_name)
2155 node = dns_data.name
2156 primary_ip = self.op.primary_ip = dns_data.ip
2157 secondary_ip = getattr(self.op, "secondary_ip", None)
2158 if secondary_ip is None:
2159 secondary_ip = primary_ip
2160 if not utils.IsValidIP(secondary_ip):
2161 raise errors.OpPrereqError("Invalid secondary IP given")
2162 self.op.secondary_ip = secondary_ip
2164 node_list = cfg.GetNodeList()
2165 if not self.op.readd and node in node_list:
2166 raise errors.OpPrereqError("Node %s is already in the configuration" %
2168 elif self.op.readd and node not in node_list:
2169 raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2171 for existing_node_name in node_list:
2172 existing_node = cfg.GetNodeInfo(existing_node_name)
2174 if self.op.readd and node == existing_node_name:
2175 if (existing_node.primary_ip != primary_ip or
2176 existing_node.secondary_ip != secondary_ip):
2177 raise errors.OpPrereqError("Readded node doesn't have the same IP"
2178 " address configuration as before")
2181 if (existing_node.primary_ip == primary_ip or
2182 existing_node.secondary_ip == primary_ip or
2183 existing_node.primary_ip == secondary_ip or
2184 existing_node.secondary_ip == secondary_ip):
2185 raise errors.OpPrereqError("New node ip address(es) conflict with"
2186 " existing node %s" % existing_node.name)
2188 # check that the type of the node (single versus dual homed) is the
2189 # same as for the master
2190 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2191 master_singlehomed = myself.secondary_ip == myself.primary_ip
2192 newbie_singlehomed = secondary_ip == primary_ip
2193 if master_singlehomed != newbie_singlehomed:
2194 if master_singlehomed:
2195 raise errors.OpPrereqError("The master has no private ip but the"
2196 " new node has one")
2198 raise errors.OpPrereqError("The master has a private ip but the"
2199 " new node doesn't have one")
2201 # checks reachablity
2202 if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2203 raise errors.OpPrereqError("Node not reachable by ping")
2205 if not newbie_singlehomed:
2206 # check reachability from my secondary ip to newbie's secondary ip
2207 if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2208 source=myself.secondary_ip):
2209 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2210 " based ping to noded port")
2212 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2213 mc_now, _ = self.cfg.GetMasterCandidateStats()
2214 master_candidate = mc_now < cp_size
2216 self.new_node = objects.Node(name=node,
2217 primary_ip=primary_ip,
2218 secondary_ip=secondary_ip,
2219 master_candidate=master_candidate,
2220 offline=False, drained=False)
2222 def Exec(self, feedback_fn):
2223 """Adds the new node to the cluster.
2226 new_node = self.new_node
2227 node = new_node.name
2229 # check connectivity
2230 result = self.rpc.call_version([node])[node]
2233 if constants.PROTOCOL_VERSION == result.data:
2234 logging.info("Communication to node %s fine, sw version %s match",
2237 raise errors.OpExecError("Version mismatch master version %s,"
2238 " node version %s" %
2239 (constants.PROTOCOL_VERSION, result.data))
2241 raise errors.OpExecError("Cannot get version from the new node")
2244 logging.info("Copy ssh key to node %s", node)
2245 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2247 keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2248 constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2254 keyarray.append(f.read())
2258 result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2260 keyarray[3], keyarray[4], keyarray[5])
2262 msg = result.RemoteFailMsg()
2264 raise errors.OpExecError("Cannot transfer ssh keys to the"
2265 " new node: %s" % msg)
2267 # Add node to our /etc/hosts, and add key to known_hosts
2268 if self.cfg.GetClusterInfo().modify_etc_hosts:
2269 utils.AddHostToEtcHosts(new_node.name)
2271 if new_node.secondary_ip != new_node.primary_ip:
2272 result = self.rpc.call_node_has_ip_address(new_node.name,
2273 new_node.secondary_ip)
2274 if result.failed or not result.data:
2275 raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2276 " you gave (%s). Please fix and re-run this"
2277 " command." % new_node.secondary_ip)
2279 node_verify_list = [self.cfg.GetMasterNode()]
2280 node_verify_param = {
2282 # TODO: do a node-net-test as well?
2285 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2286 self.cfg.GetClusterName())
2287 for verifier in node_verify_list:
2288 if result[verifier].failed or not result[verifier].data:
2289 raise errors.OpExecError("Cannot communicate with %s's node daemon"
2290 " for remote verification" % verifier)
2291 if result[verifier].data['nodelist']:
2292 for failed in result[verifier].data['nodelist']:
2293 feedback_fn("ssh/hostname verification failed %s -> %s" %
2294 (verifier, result[verifier].data['nodelist'][failed]))
2295 raise errors.OpExecError("ssh/hostname verification failed.")
2298 _RedistributeAncillaryFiles(self)
2299 self.context.ReaddNode(new_node)
2301 _RedistributeAncillaryFiles(self, additional_nodes=node)
2302 self.context.AddNode(new_node)
2305 class LUSetNodeParams(LogicalUnit):
2306 """Modifies the parameters of a node.
2309 HPATH = "node-modify"
2310 HTYPE = constants.HTYPE_NODE
2311 _OP_REQP = ["node_name"]
2314 def CheckArguments(self):
2315 node_name = self.cfg.ExpandNodeName(self.op.node_name)
2316 if node_name is None:
2317 raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2318 self.op.node_name = node_name
2319 _CheckBooleanOpField(self.op, 'master_candidate')
2320 _CheckBooleanOpField(self.op, 'offline')
2321 _CheckBooleanOpField(self.op, 'drained')
2322 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2323 if all_mods.count(None) == 3:
2324 raise errors.OpPrereqError("Please pass at least one modification")
2325 if all_mods.count(True) > 1:
2326 raise errors.OpPrereqError("Can't set the node into more than one"
2327 " state at the same time")
2329 def ExpandNames(self):
2330 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2332 def BuildHooksEnv(self):
2335 This runs on the master node.
2339 "OP_TARGET": self.op.node_name,
2340 "MASTER_CANDIDATE": str(self.op.master_candidate),
2341 "OFFLINE": str(self.op.offline),
2342 "DRAINED": str(self.op.drained),
2344 nl = [self.cfg.GetMasterNode(),
2348 def CheckPrereq(self):
2349 """Check prerequisites.
2351 This only checks the instance list against the existing names.
2354 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2356 if ((self.op.master_candidate == False or self.op.offline == True or
2357 self.op.drained == True) and node.master_candidate):
2358 # we will demote the node from master_candidate
2359 if self.op.node_name == self.cfg.GetMasterNode():
2360 raise errors.OpPrereqError("The master node has to be a"
2361 " master candidate, online and not drained")
2362 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2363 num_candidates, _ = self.cfg.GetMasterCandidateStats()
2364 if num_candidates <= cp_size:
2365 msg = ("Not enough master candidates (desired"
2366 " %d, new value will be %d)" % (cp_size, num_candidates-1))
2368 self.LogWarning(msg)
2370 raise errors.OpPrereqError(msg)
2372 if (self.op.master_candidate == True and
2373 ((node.offline and not self.op.offline == False) or
2374 (node.drained and not self.op.drained == False))):
2375 raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2376 " to master_candidate" % node.name)
2380 def Exec(self, feedback_fn):
2389 if self.op.offline is not None:
2390 node.offline = self.op.offline
2391 result.append(("offline", str(self.op.offline)))
2392 if self.op.offline == True:
2393 if node.master_candidate:
2394 node.master_candidate = False
2396 result.append(("master_candidate", "auto-demotion due to offline"))
2398 node.drained = False
2399 result.append(("drained", "clear drained status due to offline"))
2401 if self.op.master_candidate is not None:
2402 node.master_candidate = self.op.master_candidate
2404 result.append(("master_candidate", str(self.op.master_candidate)))
2405 if self.op.master_candidate == False:
2406 rrc = self.rpc.call_node_demote_from_mc(node.name)
2407 msg = rrc.RemoteFailMsg()
2409 self.LogWarning("Node failed to demote itself: %s" % msg)
2411 if self.op.drained is not None:
2412 node.drained = self.op.drained
2413 result.append(("drained", str(self.op.drained)))
2414 if self.op.drained == True:
2415 if node.master_candidate:
2416 node.master_candidate = False
2418 result.append(("master_candidate", "auto-demotion due to drain"))
2420 node.offline = False
2421 result.append(("offline", "clear offline status due to drain"))
2423 # this will trigger configuration file update, if needed
2424 self.cfg.Update(node)
2425 # this will trigger job queue propagation or cleanup
2427 self.context.ReaddNode(node)
2432 class LUPowercycleNode(NoHooksLU):
2433 """Powercycles a node.
2436 _OP_REQP = ["node_name", "force"]
2439 def CheckArguments(self):
2440 node_name = self.cfg.ExpandNodeName(self.op.node_name)
2441 if node_name is None:
2442 raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2443 self.op.node_name = node_name
2444 if node_name == self.cfg.GetMasterNode() and not self.op.force:
2445 raise errors.OpPrereqError("The node is the master and the force"
2446 " parameter was not set")
2448 def ExpandNames(self):
2449 """Locking for PowercycleNode.
2451 This is a last-resource option and shouldn't block on other
2452 jobs. Therefore, we grab no locks.
2455 self.needed_locks = {}
2457 def CheckPrereq(self):
2458 """Check prerequisites.
2460 This LU has no prereqs.
2465 def Exec(self, feedback_fn):
2469 result = self.rpc.call_node_powercycle(self.op.node_name,
2470 self.cfg.GetHypervisorType())
2471 msg = result.RemoteFailMsg()
2473 raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
2474 return result.payload
2477 class LUQueryClusterInfo(NoHooksLU):
2478 """Query cluster configuration.
2484 def ExpandNames(self):
2485 self.needed_locks = {}
2487 def CheckPrereq(self):
2488 """No prerequsites needed for this LU.
2493 def Exec(self, feedback_fn):
2494 """Return cluster config.
2497 cluster = self.cfg.GetClusterInfo()
2499 "software_version": constants.RELEASE_VERSION,
2500 "protocol_version": constants.PROTOCOL_VERSION,
2501 "config_version": constants.CONFIG_VERSION,
2502 "os_api_version": constants.OS_API_VERSION,
2503 "export_version": constants.EXPORT_VERSION,
2504 "architecture": (platform.architecture()[0], platform.machine()),
2505 "name": cluster.cluster_name,
2506 "master": cluster.master_node,
2507 "default_hypervisor": cluster.default_hypervisor,
2508 "enabled_hypervisors": cluster.enabled_hypervisors,
2509 "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2510 for hypervisor in cluster.enabled_hypervisors]),
2511 "beparams": cluster.beparams,
2512 "candidate_pool_size": cluster.candidate_pool_size,
2513 "default_bridge": cluster.default_bridge,
2514 "master_netdev": cluster.master_netdev,
2515 "volume_group_name": cluster.volume_group_name,
2516 "file_storage_dir": cluster.file_storage_dir,
2522 class LUQueryConfigValues(NoHooksLU):
2523 """Return configuration values.
2528 _FIELDS_DYNAMIC = utils.FieldSet()
2529 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2531 def ExpandNames(self):
2532 self.needed_locks = {}
2534 _CheckOutputFields(static=self._FIELDS_STATIC,
2535 dynamic=self._FIELDS_DYNAMIC,
2536 selected=self.op.output_fields)
2538 def CheckPrereq(self):
2539 """No prerequisites.
2544 def Exec(self, feedback_fn):
2545 """Dump a representation of the cluster config to the standard output.
2549 for field in self.op.output_fields:
2550 if field == "cluster_name":
2551 entry = self.cfg.GetClusterName()
2552 elif field == "master_node":
2553 entry = self.cfg.GetMasterNode()
2554 elif field == "drain_flag":
2555 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2557 raise errors.ParameterError(field)
2558 values.append(entry)
2562 class LUActivateInstanceDisks(NoHooksLU):
2563 """Bring up an instance's disks.
2566 _OP_REQP = ["instance_name"]
2569 def ExpandNames(self):
2570 self._ExpandAndLockInstance()
2571 self.needed_locks[locking.LEVEL_NODE] = []
2572 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2574 def DeclareLocks(self, level):
2575 if level == locking.LEVEL_NODE:
2576 self._LockInstancesNodes()
2578 def CheckPrereq(self):
2579 """Check prerequisites.
2581 This checks that the instance is in the cluster.
2584 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2585 assert self.instance is not None, \
2586 "Cannot retrieve locked instance %s" % self.op.instance_name
2587 _CheckNodeOnline(self, self.instance.primary_node)
2589 def Exec(self, feedback_fn):
2590 """Activate the disks.
2593 disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2595 raise errors.OpExecError("Cannot activate block devices")
2600 def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2601 """Prepare the block devices for an instance.
2603 This sets up the block devices on all nodes.
2605 @type lu: L{LogicalUnit}
2606 @param lu: the logical unit on whose behalf we execute
2607 @type instance: L{objects.Instance}
2608 @param instance: the instance for whose disks we assemble
2609 @type ignore_secondaries: boolean
2610 @param ignore_secondaries: if true, errors on secondary nodes
2611 won't result in an error return from the function
2612 @return: False if the operation failed, otherwise a list of
2613 (host, instance_visible_name, node_visible_name)
2614 with the mapping from node devices to instance devices
2619 iname = instance.name
2620 # With the two passes mechanism we try to reduce the window of
2621 # opportunity for the race condition of switching DRBD to primary
2622 # before handshaking occured, but we do not eliminate it
2624 # The proper fix would be to wait (with some limits) until the
2625 # connection has been made and drbd transitions from WFConnection
2626 # into any other network-connected state (Connected, SyncTarget,
2629 # 1st pass, assemble on all nodes in secondary mode
2630 for inst_disk in instance.disks:
2631 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2632 lu.cfg.SetDiskID(node_disk, node)
2633 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2634 msg = result.RemoteFailMsg()
2636 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2637 " (is_primary=False, pass=1): %s",
2638 inst_disk.iv_name, node, msg)
2639 if not ignore_secondaries:
2642 # FIXME: race condition on drbd migration to primary
2644 # 2nd pass, do only the primary node
2645 for inst_disk in instance.disks:
2646 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2647 if node != instance.primary_node:
2649 lu.cfg.SetDiskID(node_disk, node)
2650 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2651 msg = result.RemoteFailMsg()
2653 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2654 " (is_primary=True, pass=2): %s",
2655 inst_disk.iv_name, node, msg)
2657 device_info.append((instance.primary_node, inst_disk.iv_name,
2660 # leave the disks configured for the primary node
2661 # this is a workaround that would be fixed better by
2662 # improving the logical/physical id handling
2663 for disk in instance.disks:
2664 lu.cfg.SetDiskID(disk, instance.primary_node)
2666 return disks_ok, device_info
2669 def _StartInstanceDisks(lu, instance, force):
2670 """Start the disks of an instance.
2673 disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2674 ignore_secondaries=force)
2676 _ShutdownInstanceDisks(lu, instance)
2677 if force is not None and not force:
2678 lu.proc.LogWarning("", hint="If the message above refers to a"
2680 " you can retry the operation using '--force'.")
2681 raise errors.OpExecError("Disk consistency error")
2684 class LUDeactivateInstanceDisks(NoHooksLU):
2685 """Shutdown an instance's disks.
2688 _OP_REQP = ["instance_name"]
2691 def ExpandNames(self):
2692 self._ExpandAndLockInstance()
2693 self.needed_locks[locking.LEVEL_NODE] = []
2694 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2696 def DeclareLocks(self, level):
2697 if level == locking.LEVEL_NODE:
2698 self._LockInstancesNodes()
2700 def CheckPrereq(self):
2701 """Check prerequisites.
2703 This checks that the instance is in the cluster.
2706 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2707 assert self.instance is not None, \
2708 "Cannot retrieve locked instance %s" % self.op.instance_name
2710 def Exec(self, feedback_fn):
2711 """Deactivate the disks
2714 instance = self.instance
2715 _SafeShutdownInstanceDisks(self, instance)
2718 def _SafeShutdownInstanceDisks(lu, instance):
2719 """Shutdown block devices of an instance.
2721 This function checks if an instance is running, before calling
2722 _ShutdownInstanceDisks.
2725 ins_l = lu.rpc.call_instance_list([instance.primary_node],
2726 [instance.hypervisor])
2727 ins_l = ins_l[instance.primary_node]
2728 if ins_l.failed or not isinstance(ins_l.data, list):
2729 raise errors.OpExecError("Can't contact node '%s'" %
2730 instance.primary_node)
2732 if instance.name in ins_l.data:
2733 raise errors.OpExecError("Instance is running, can't shutdown"
2736 _ShutdownInstanceDisks(lu, instance)
2739 def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2740 """Shutdown block devices of an instance.
2742 This does the shutdown on all nodes of the instance.
2744 If the ignore_primary is false, errors on the primary node are
2749 for disk in instance.disks:
2750 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2751 lu.cfg.SetDiskID(top_disk, node)
2752 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2753 msg = result.RemoteFailMsg()
2755 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2756 disk.iv_name, node, msg)
2757 if not ignore_primary or node != instance.primary_node:
2762 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2763 """Checks if a node has enough free memory.
2765 This function check if a given node has the needed amount of free
2766 memory. In case the node has less memory or we cannot get the
2767 information from the node, this function raise an OpPrereqError
2770 @type lu: C{LogicalUnit}
2771 @param lu: a logical unit from which we get configuration data
2773 @param node: the node to check
2774 @type reason: C{str}
2775 @param reason: string to use in the error message
2776 @type requested: C{int}
2777 @param requested: the amount of memory in MiB to check for
2778 @type hypervisor_name: C{str}
2779 @param hypervisor_name: the hypervisor to ask for memory stats
2780 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2781 we cannot check the node
2784 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2785 nodeinfo[node].Raise()
2786 free_mem = nodeinfo[node].data.get('memory_free')
2787 if not isinstance(free_mem, int):
2788 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2789 " was '%s'" % (node, free_mem))
2790 if requested > free_mem:
2791 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2792 " needed %s MiB, available %s MiB" %
2793 (node, reason, requested, free_mem))
2796 class LUStartupInstance(LogicalUnit):
2797 """Starts an instance.
2800 HPATH = "instance-start"
2801 HTYPE = constants.HTYPE_INSTANCE
2802 _OP_REQP = ["instance_name", "force"]
2805 def ExpandNames(self):
2806 self._ExpandAndLockInstance()
2808 def BuildHooksEnv(self):
2811 This runs on master, primary and secondary nodes of the instance.
2815 "FORCE": self.op.force,
2817 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2818 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2821 def CheckPrereq(self):
2822 """Check prerequisites.
2824 This checks that the instance is in the cluster.
2827 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2828 assert self.instance is not None, \
2829 "Cannot retrieve locked instance %s" % self.op.instance_name
2832 self.beparams = getattr(self.op, "beparams", {})
2834 if not isinstance(self.beparams, dict):
2835 raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2836 " dict" % (type(self.beparams), ))
2837 # fill the beparams dict
2838 utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2839 self.op.beparams = self.beparams
2842 self.hvparams = getattr(self.op, "hvparams", {})
2844 if not isinstance(self.hvparams, dict):
2845 raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2846 " dict" % (type(self.hvparams), ))
2848 # check hypervisor parameter syntax (locally)
2849 cluster = self.cfg.GetClusterInfo()
2850 utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2851 filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
2853 filled_hvp.update(self.hvparams)
2854 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2855 hv_type.CheckParameterSyntax(filled_hvp)
2856 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2857 self.op.hvparams = self.hvparams
2859 _CheckNodeOnline(self, instance.primary_node)
2861 bep = self.cfg.GetClusterInfo().FillBE(instance)
2862 # check bridges existance
2863 _CheckInstanceBridgesExist(self, instance)
2865 remote_info = self.rpc.call_instance_info(instance.primary_node,
2867 instance.hypervisor)
2869 if not remote_info.data:
2870 _CheckNodeFreeMemory(self, instance.primary_node,
2871 "starting instance %s" % instance.name,
2872 bep[constants.BE_MEMORY], instance.hypervisor)
2874 def Exec(self, feedback_fn):
2875 """Start the instance.
2878 instance = self.instance
2879 force = self.op.force
2881 self.cfg.MarkInstanceUp(instance.name)
2883 node_current = instance.primary_node
2885 _StartInstanceDisks(self, instance, force)
2887 result = self.rpc.call_instance_start(node_current, instance,
2888 self.hvparams, self.beparams)
2889 msg = result.RemoteFailMsg()
2891 _ShutdownInstanceDisks(self, instance)
2892 raise errors.OpExecError("Could not start instance: %s" % msg)
2895 class LURebootInstance(LogicalUnit):
2896 """Reboot an instance.
2899 HPATH = "instance-reboot"
2900 HTYPE = constants.HTYPE_INSTANCE
2901 _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2904 def ExpandNames(self):
2905 if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2906 constants.INSTANCE_REBOOT_HARD,
2907 constants.INSTANCE_REBOOT_FULL]:
2908 raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2909 (constants.INSTANCE_REBOOT_SOFT,
2910 constants.INSTANCE_REBOOT_HARD,
2911 constants.INSTANCE_REBOOT_FULL))
2912 self._ExpandAndLockInstance()
2914 def BuildHooksEnv(self):
2917 This runs on master, primary and secondary nodes of the instance.
2921 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2922 "REBOOT_TYPE": self.op.reboot_type,
2924 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2925 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2928 def CheckPrereq(self):
2929 """Check prerequisites.
2931 This checks that the instance is in the cluster.
2934 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2935 assert self.instance is not None, \
2936 "Cannot retrieve locked instance %s" % self.op.instance_name
2938 _CheckNodeOnline(self, instance.primary_node)
2940 # check bridges existance
2941 _CheckInstanceBridgesExist(self, instance)
2943 def Exec(self, feedback_fn):
2944 """Reboot the instance.
2947 instance = self.instance
2948 ignore_secondaries = self.op.ignore_secondaries
2949 reboot_type = self.op.reboot_type
2951 node_current = instance.primary_node
2953 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2954 constants.INSTANCE_REBOOT_HARD]:
2955 for disk in instance.disks:
2956 self.cfg.SetDiskID(disk, node_current)
2957 result = self.rpc.call_instance_reboot(node_current, instance,
2959 msg = result.RemoteFailMsg()
2961 raise errors.OpExecError("Could not reboot instance: %s" % msg)
2963 result = self.rpc.call_instance_shutdown(node_current, instance)
2964 msg = result.RemoteFailMsg()
2966 raise errors.OpExecError("Could not shutdown instance for"
2967 " full reboot: %s" % msg)
2968 _ShutdownInstanceDisks(self, instance)
2969 _StartInstanceDisks(self, instance, ignore_secondaries)
2970 result = self.rpc.call_instance_start(node_current, instance, None, None)
2971 msg = result.RemoteFailMsg()
2973 _ShutdownInstanceDisks(self, instance)
2974 raise errors.OpExecError("Could not start instance for"
2975 " full reboot: %s" % msg)
2977 self.cfg.MarkInstanceUp(instance.name)
2980 class LUShutdownInstance(LogicalUnit):
2981 """Shutdown an instance.
2984 HPATH = "instance-stop"
2985 HTYPE = constants.HTYPE_INSTANCE
2986 _OP_REQP = ["instance_name"]
2989 def ExpandNames(self):
2990 self._ExpandAndLockInstance()
2992 def BuildHooksEnv(self):
2995 This runs on master, primary and secondary nodes of the instance.
2998 env = _BuildInstanceHookEnvByObject(self, self.instance)
2999 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3002 def CheckPrereq(self):
3003 """Check prerequisites.
3005 This checks that the instance is in the cluster.
3008 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3009 assert self.instance is not None, \
3010 "Cannot retrieve locked instance %s" % self.op.instance_name
3011 _CheckNodeOnline(self, self.instance.primary_node)
3013 def Exec(self, feedback_fn):
3014 """Shutdown the instance.
3017 instance = self.instance
3018 node_current = instance.primary_node
3019 self.cfg.MarkInstanceDown(instance.name)
3020 result = self.rpc.call_instance_shutdown(node_current, instance)
3021 msg = result.RemoteFailMsg()
3023 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3025 _ShutdownInstanceDisks(self, instance)
3028 class LUReinstallInstance(LogicalUnit):
3029 """Reinstall an instance.
3032 HPATH = "instance-reinstall"
3033 HTYPE = constants.HTYPE_INSTANCE
3034 _OP_REQP = ["instance_name"]
3037 def ExpandNames(self):
3038 self._ExpandAndLockInstance()
3040 def BuildHooksEnv(self):
3043 This runs on master, primary and secondary nodes of the instance.
3046 env = _BuildInstanceHookEnvByObject(self, self.instance)
3047 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3050 def CheckPrereq(self):
3051 """Check prerequisites.
3053 This checks that the instance is in the cluster and is not running.
3056 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3057 assert instance is not None, \
3058 "Cannot retrieve locked instance %s" % self.op.instance_name
3059 _CheckNodeOnline(self, instance.primary_node)
3061 if instance.disk_template == constants.DT_DISKLESS:
3062 raise errors.OpPrereqError("Instance '%s' has no disks" %
3063 self.op.instance_name)
3064 if instance.admin_up:
3065 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3066 self.op.instance_name)
3067 remote_info = self.rpc.call_instance_info(instance.primary_node,
3069 instance.hypervisor)
3071 if remote_info.data:
3072 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3073 (self.op.instance_name,
3074 instance.primary_node))
3076 self.op.os_type = getattr(self.op, "os_type", None)
3077 if self.op.os_type is not None:
3079 pnode = self.cfg.GetNodeInfo(
3080 self.cfg.ExpandNodeName(instance.primary_node))
3082 raise errors.OpPrereqError("Primary node '%s' is unknown" %
3084 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3086 if not isinstance(result.data, objects.OS):
3087 raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3088 " primary node" % self.op.os_type)
3090 self.instance = instance
3092 def Exec(self, feedback_fn):
3093 """Reinstall the instance.
3096 inst = self.instance
3098 if self.op.os_type is not None:
3099 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3100 inst.os = self.op.os_type
3101 self.cfg.Update(inst)
3103 _StartInstanceDisks(self, inst, None)
3105 feedback_fn("Running the instance OS create scripts...")
3106 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3107 msg = result.RemoteFailMsg()
3109 raise errors.OpExecError("Could not install OS for instance %s"
3111 (inst.name, inst.primary_node, msg))
3113 _ShutdownInstanceDisks(self, inst)
3116 class LURenameInstance(LogicalUnit):
3117 """Rename an instance.
3120 HPATH = "instance-rename"
3121 HTYPE = constants.HTYPE_INSTANCE
3122 _OP_REQP = ["instance_name", "new_name"]
3124 def BuildHooksEnv(self):
3127 This runs on master, primary and secondary nodes of the instance.
3130 env = _BuildInstanceHookEnvByObject(self, self.instance)
3131 env["INSTANCE_NEW_NAME"] = self.op.new_name
3132 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3135 def CheckPrereq(self):
3136 """Check prerequisites.
3138 This checks that the instance is in the cluster and is not running.
3141 instance = self.cfg.GetInstanceInfo(
3142 self.cfg.ExpandInstanceName(self.op.instance_name))
3143 if instance is None:
3144 raise errors.OpPrereqError("Instance '%s' not known" %
3145 self.op.instance_name)
3146 _CheckNodeOnline(self, instance.primary_node)
3148 if instance.admin_up:
3149 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3150 self.op.instance_name)
3151 remote_info = self.rpc.call_instance_info(instance.primary_node,
3153 instance.hypervisor)
3155 if remote_info.data:
3156 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3157 (self.op.instance_name,
3158 instance.primary_node))
3159 self.instance = instance
3161 # new name verification
3162 name_info = utils.HostInfo(self.op.new_name)
3164 self.op.new_name = new_name = name_info.name
3165 instance_list = self.cfg.GetInstanceList()
3166 if new_name in instance_list:
3167 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3170 if not getattr(self.op, "ignore_ip", False):
3171 if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3172 raise errors.OpPrereqError("IP %s of instance %s already in use" %
3173 (name_info.ip, new_name))
3176 def Exec(self, feedback_fn):
3177 """Reinstall the instance.
3180 inst = self.instance
3181 old_name = inst.name
3183 if inst.disk_template == constants.DT_FILE:
3184 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3186 self.cfg.RenameInstance(inst.name, self.op.new_name)
3187 # Change the instance lock. This is definitely safe while we hold the BGL
3188 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3189 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3191 # re-read the instance from the configuration after rename
3192 inst = self.cfg.GetInstanceInfo(self.op.new_name)
3194 if inst.disk_template == constants.DT_FILE:
3195 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3196 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3197 old_file_storage_dir,
3198 new_file_storage_dir)
3201 raise errors.OpExecError("Could not connect to node '%s' to rename"
3202 " directory '%s' to '%s' (but the instance"
3203 " has been renamed in Ganeti)" % (
3204 inst.primary_node, old_file_storage_dir,
3205 new_file_storage_dir))
3207 if not result.data[0]:
3208 raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3209 " (but the instance has been renamed in"
3210 " Ganeti)" % (old_file_storage_dir,
3211 new_file_storage_dir))
3213 _StartInstanceDisks(self, inst, None)
3215 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3217 msg = result.RemoteFailMsg()
3219 msg = ("Could not run OS rename script for instance %s on node %s"
3220 " (but the instance has been renamed in Ganeti): %s" %
3221 (inst.name, inst.primary_node, msg))
3222 self.proc.LogWarning(msg)
3224 _ShutdownInstanceDisks(self, inst)
3227 class LURemoveInstance(LogicalUnit):
3228 """Remove an instance.
3231 HPATH = "instance-remove"
3232 HTYPE = constants.HTYPE_INSTANCE
3233 _OP_REQP = ["instance_name", "ignore_failures"]
3236 def ExpandNames(self):
3237 self._ExpandAndLockInstance()
3238 self.needed_locks[locking.LEVEL_NODE] = []
3239 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3241 def DeclareLocks(self, level):
3242 if level == locking.LEVEL_NODE:
3243 self._LockInstancesNodes()
3245 def BuildHooksEnv(self):
3248 This runs on master, primary and secondary nodes of the instance.
3251 env = _BuildInstanceHookEnvByObject(self, self.instance)
3252 nl = [self.cfg.GetMasterNode()]
3255 def CheckPrereq(self):
3256 """Check prerequisites.
3258 This checks that the instance is in the cluster.
3261 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3262 assert self.instance is not None, \
3263 "Cannot retrieve locked instance %s" % self.op.instance_name
3265 def Exec(self, feedback_fn):
3266 """Remove the instance.
3269 instance = self.instance
3270 logging.info("Shutting down instance %s on node %s",
3271 instance.name, instance.primary_node)
3273 result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3274 msg = result.RemoteFailMsg()
3276 if self.op.ignore_failures:
3277 feedback_fn("Warning: can't shutdown instance: %s" % msg)
3279 raise errors.OpExecError("Could not shutdown instance %s on"
3281 (instance.name, instance.primary_node, msg))
3283 logging.info("Removing block devices for instance %s", instance.name)
3285 if not _RemoveDisks(self, instance):
3286 if self.op.ignore_failures:
3287 feedback_fn("Warning: can't remove instance's disks")
3289 raise errors.OpExecError("Can't remove instance's disks")
3291 logging.info("Removing instance %s out of cluster config", instance.name)
3293 self.cfg.RemoveInstance(instance.name)
3294 self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3297 class LUQueryInstances(NoHooksLU):
3298 """Logical unit for querying instances.
3301 _OP_REQP = ["output_fields", "names", "use_locking"]
3303 _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3305 "disk_template", "ip", "mac", "bridge",
3306 "sda_size", "sdb_size", "vcpus", "tags",
3307 "network_port", "beparams",
3308 r"(disk)\.(size)/([0-9]+)",
3309 r"(disk)\.(sizes)", "disk_usage",
3310 r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3311 r"(nic)\.(macs|ips|bridges)",
3312 r"(disk|nic)\.(count)",
3313 "serial_no", "hypervisor", "hvparams",] +
3315 for name in constants.HVS_PARAMETERS] +
3317 for name in constants.BES_PARAMETERS])
3318 _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3321 def ExpandNames(self):
3322 _CheckOutputFields(static=self._FIELDS_STATIC,
3323 dynamic=self._FIELDS_DYNAMIC,
3324 selected=self.op.output_fields)
3326 self.needed_locks = {}
3327 self.share_locks[locking.LEVEL_INSTANCE] = 1
3328 self.share_locks[locking.LEVEL_NODE] = 1
3331 self.wanted = _GetWantedInstances(self, self.op.names)
3333 self.wanted = locking.ALL_SET
3335 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3336 self.do_locking = self.do_node_query and self.op.use_locking
3338 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3339 self.needed_locks[locking.LEVEL_NODE] = []
3340 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3342 def DeclareLocks(self, level):
3343 if level == locking.LEVEL_NODE and self.do_locking:
3344 self._LockInstancesNodes()
3346 def CheckPrereq(self):
3347 """Check prerequisites.
3352 def Exec(self, feedback_fn):
3353 """Computes the list of nodes and their attributes.
3356 all_info = self.cfg.GetAllInstancesInfo()
3357 if self.wanted == locking.ALL_SET:
3358 # caller didn't specify instance names, so ordering is not important
3360 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3362 instance_names = all_info.keys()
3363 instance_names = utils.NiceSort(instance_names)
3365 # caller did specify names, so we must keep the ordering
3367 tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3369 tgt_set = all_info.keys()
3370 missing = set(self.wanted).difference(tgt_set)
3372 raise errors.OpExecError("Some instances were removed before"
3373 " retrieving their data: %s" % missing)
3374 instance_names = self.wanted
3376 instance_list = [all_info[iname] for iname in instance_names]
3378 # begin data gathering
3380 nodes = frozenset([inst.primary_node for inst in instance_list])
3381 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3385 if self.do_node_query:
3387 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3389 result = node_data[name]
3391 # offline nodes will be in both lists
3392 off_nodes.append(name)
3394 bad_nodes.append(name)
3397 live_data.update(result.data)
3398 # else no instance is alive
3400 live_data = dict([(name, {}) for name in instance_names])
3402 # end data gathering
3407 for instance in instance_list:
3409 i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3410 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3411 for field in self.op.output_fields:
3412 st_match = self._FIELDS_STATIC.Matches(field)
3417 elif field == "pnode":
3418 val = instance.primary_node
3419 elif field == "snodes":
3420 val = list(instance.secondary_nodes)
3421 elif field == "admin_state":
3422 val = instance.admin_up
3423 elif field == "oper_state":
3424 if instance.primary_node in bad_nodes:
3427 val = bool(live_data.get(instance.name))
3428 elif field == "status":
3429 if instance.primary_node in off_nodes:
3430 val = "ERROR_nodeoffline"
3431 elif instance.primary_node in bad_nodes:
3432 val = "ERROR_nodedown"
3434 running = bool(live_data.get(instance.name))
3436 if instance.admin_up:
3441 if instance.admin_up:
3445 elif field == "oper_ram":
3446 if instance.primary_node in bad_nodes:
3448 elif instance.name in live_data:
3449 val = live_data[instance.name].get("memory", "?")
3452 elif field == "disk_template":
3453 val = instance.disk_template
3455 val = instance.nics[0].ip
3456 elif field == "bridge":
3457 val = instance.nics[0].bridge
3458 elif field == "mac":
3459 val = instance.nics[0].mac
3460 elif field == "sda_size" or field == "sdb_size":
3461 idx = ord(field[2]) - ord('a')
3463 val = instance.FindDisk(idx).size
3464 except errors.OpPrereqError:
3466 elif field == "disk_usage": # total disk usage per node
3467 disk_sizes = [{'size': disk.size} for disk in instance.disks]
3468 val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3469 elif field == "tags":
3470 val = list(instance.GetTags())
3471 elif field == "serial_no":
3472 val = instance.serial_no
3473 elif field == "network_port":
3474 val = instance.network_port
3475 elif field == "hypervisor":
3476 val = instance.hypervisor
3477 elif field == "hvparams":
3479 elif (field.startswith(HVPREFIX) and
3480 field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3481 val = i_hv.get(field[len(HVPREFIX):], None)
3482 elif field == "beparams":
3484 elif (field.startswith(BEPREFIX) and
3485 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3486 val = i_be.get(field[len(BEPREFIX):], None)
3487 elif st_match and st_match.groups():
3488 # matches a variable list
3489 st_groups = st_match.groups()
3490 if st_groups and st_groups[0] == "disk":
3491 if st_groups[1] == "count":
3492 val = len(instance.disks)
3493 elif st_groups[1] == "sizes":
3494 val = [disk.size for disk in instance.disks]
3495 elif st_groups[1] == "size":
3497 val = instance.FindDisk(st_groups[2]).size
3498 except errors.OpPrereqError:
3501 assert False, "Unhandled disk parameter"
3502 elif st_groups[0] == "nic":
3503 if st_groups[1] == "count":
3504 val = len(instance.nics)
3505 elif st_groups[1] == "macs":
3506 val = [nic.mac for nic in instance.nics]
3507 elif st_groups[1] == "ips":
3508 val = [nic.ip for nic in instance.nics]
3509 elif st_groups[1] == "bridges":
3510 val = [nic.bridge for nic in instance.nics]
3513 nic_idx = int(st_groups[2])
3514 if nic_idx >= len(instance.nics):
3517 if st_groups[1] == "mac":
3518 val = instance.nics[nic_idx].mac
3519 elif st_groups[1] == "ip":
3520 val = instance.nics[nic_idx].ip
3521 elif st_groups[1] == "bridge":
3522 val = instance.nics[nic_idx].bridge
3524 assert False, "Unhandled NIC parameter"
3526 assert False, "Unhandled variable parameter"
3528 raise errors.ParameterError(field)
3535 class LUFailoverInstance(LogicalUnit):
3536 """Failover an instance.
3539 HPATH = "instance-failover"
3540 HTYPE = constants.HTYPE_INSTANCE
3541 _OP_REQP = ["instance_name", "ignore_consistency"]
3544 def ExpandNames(self):
3545 self._ExpandAndLockInstance()
3546 self.needed_locks[locking.LEVEL_NODE] = []
3547 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3549 def DeclareLocks(self, level):
3550 if level == locking.LEVEL_NODE:
3551 self._LockInstancesNodes()
3553 def BuildHooksEnv(self):
3556 This runs on master, primary and secondary nodes of the instance.
3560 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3562 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3563 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3566 def CheckPrereq(self):
3567 """Check prerequisites.
3569 This checks that the instance is in the cluster.
3572 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3573 assert self.instance is not None, \
3574 "Cannot retrieve locked instance %s" % self.op.instance_name
3576 bep = self.cfg.GetClusterInfo().FillBE(instance)
3577 if instance.disk_template not in constants.DTS_NET_MIRROR:
3578 raise errors.OpPrereqError("Instance's disk layout is not"
3579 " network mirrored, cannot failover.")
3581 secondary_nodes = instance.secondary_nodes
3582 if not secondary_nodes:
3583 raise errors.ProgrammerError("no secondary node but using "
3584 "a mirrored disk template")
3586 target_node = secondary_nodes[0]
3587 _CheckNodeOnline(self, target_node)
3588 _CheckNodeNotDrained(self, target_node)
3589 # check memory requirements on the secondary node
3590 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3591 instance.name, bep[constants.BE_MEMORY],
3592 instance.hypervisor)
3594 # check bridge existance
3595 brlist = [nic.bridge for nic in instance.nics]
3596 result = self.rpc.call_bridges_exist(target_node, brlist)
3599 raise errors.OpPrereqError("One or more target bridges %s does not"
3600 " exist on destination node '%s'" %
3601 (brlist, target_node))
3603 def Exec(self, feedback_fn):
3604 """Failover an instance.
3606 The failover is done by shutting it down on its present node and
3607 starting it on the secondary.
3610 instance = self.instance
3612 source_node = instance.primary_node
3613 target_node = instance.secondary_nodes[0]
3615 feedback_fn("* checking disk consistency between source and target")
3616 for dev in instance.disks:
3617 # for drbd, these are drbd over lvm
3618 if not _CheckDiskConsistency(self, dev, target_node, False):
3619 if instance.admin_up and not self.op.ignore_consistency:
3620 raise errors.OpExecError("Disk %s is degraded on target node,"
3621 " aborting failover." % dev.iv_name)
3623 feedback_fn("* shutting down instance on source node")
3624 logging.info("Shutting down instance %s on node %s",
3625 instance.name, source_node)
3627 result = self.rpc.call_instance_shutdown(source_node, instance)
3628 msg = result.RemoteFailMsg()
3630 if self.op.ignore_consistency:
3631 self.proc.LogWarning("Could not shutdown instance %s on node %s."
3632 " Proceeding anyway. Please make sure node"
3633 " %s is down. Error details: %s",
3634 instance.name, source_node, source_node, msg)
3636 raise errors.OpExecError("Could not shutdown instance %s on"
3638 (instance.name, source_node, msg))
3640 feedback_fn("* deactivating the instance's disks on source node")
3641 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3642 raise errors.OpExecError("Can't shut down the instance's disks.")
3644 instance.primary_node = target_node
3645 # distribute new instance config to the other nodes
3646 self.cfg.Update(instance)
3648 # Only start the instance if it's marked as up
3649 if instance.admin_up:
3650 feedback_fn("* activating the instance's disks on target node")
3651 logging.info("Starting instance %s on node %s",
3652 instance.name, target_node)
3654 disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3655 ignore_secondaries=True)
3657 _ShutdownInstanceDisks(self, instance)
3658 raise errors.OpExecError("Can't activate the instance's disks")
3660 feedback_fn("* starting the instance on the target node")
3661 result = self.rpc.call_instance_start(target_node, instance, None, None)
3662 msg = result.RemoteFailMsg()
3664 _ShutdownInstanceDisks(self, instance)
3665 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3666 (instance.name, target_node, msg))
3669 class LUMigrateInstance(LogicalUnit):
3670 """Migrate an instance.
3672 This is migration without shutting down, compared to the failover,
3673 which is done with shutdown.
3676 HPATH = "instance-migrate"
3677 HTYPE = constants.HTYPE_INSTANCE
3678 _OP_REQP = ["instance_name", "live", "cleanup"]
3682 def ExpandNames(self):
3683 self._ExpandAndLockInstance()
3684 self.needed_locks[locking.LEVEL_NODE] = []
3685 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3687 def DeclareLocks(self, level):
3688 if level == locking.LEVEL_NODE:
3689 self._LockInstancesNodes()
3691 def BuildHooksEnv(self):
3694 This runs on master, primary and secondary nodes of the instance.
3697 env = _BuildInstanceHookEnvByObject(self, self.instance)
3698 env["MIGRATE_LIVE"] = self.op.live
3699 env["MIGRATE_CLEANUP"] = self.op.cleanup
3700 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3703 def CheckPrereq(self):
3704 """Check prerequisites.
3706 This checks that the instance is in the cluster.
3709 instance = self.cfg.GetInstanceInfo(
3710 self.cfg.ExpandInstanceName(self.op.instance_name))
3711 if instance is None:
3712 raise errors.OpPrereqError("Instance '%s' not known" %
3713 self.op.instance_name)
3715 if instance.disk_template != constants.DT_DRBD8:
3716 raise errors.OpPrereqError("Instance's disk layout is not"
3717 " drbd8, cannot migrate.")
3719 secondary_nodes = instance.secondary_nodes
3720 if not secondary_nodes:
3721 raise errors.ConfigurationError("No secondary node but using"
3722 " drbd8 disk template")
3724 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3726 target_node = secondary_nodes[0]
3727 # check memory requirements on the secondary node
3728 _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3729 instance.name, i_be[constants.BE_MEMORY],
3730 instance.hypervisor)
3732 # check bridge existance
3733 brlist = [nic.bridge for nic in instance.nics]
3734 result = self.rpc.call_bridges_exist(target_node, brlist)
3735 if result.failed or not result.data:
3736 raise errors.OpPrereqError("One or more target bridges %s does not"
3737 " exist on destination node '%s'" %
3738 (brlist, target_node))
3740 if not self.op.cleanup:
3741 _CheckNodeNotDrained(self, target_node)
3742 result = self.rpc.call_instance_migratable(instance.primary_node,
3744 msg = result.RemoteFailMsg()
3746 raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3749 self.instance = instance
3751 def _WaitUntilSync(self):
3752 """Poll with custom rpc for disk sync.
3754 This uses our own step-based rpc call.
3757 self.feedback_fn("* wait until resync is done")
3761 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3763 self.instance.disks)
3765 for node, nres in result.items():
3766 msg = nres.RemoteFailMsg()
3768 raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3770 node_done, node_percent = nres.payload
3771 all_done = all_done and node_done
3772 if node_percent is not None:
3773 min_percent = min(min_percent, node_percent)
3775 if min_percent < 100:
3776 self.feedback_fn(" - progress: %.1f%%" % min_percent)
3779 def _EnsureSecondary(self, node):
3780 """Demote a node to secondary.
3783 self.feedback_fn("* switching node %s to secondary mode" % node)
3785 for dev in self.instance.disks:
3786 self.cfg.SetDiskID(dev, node)
3788 result = self.rpc.call_blockdev_close(node, self.instance.name,
3789 self.instance.disks)
3790 msg = result.RemoteFailMsg()
3792 raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3793 " error %s" % (node, msg))
3795 def _GoStandalone(self):
3796 """Disconnect from the network.
3799 self.feedback_fn("* changing into standalone mode")
3800 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3801 self.instance.disks)
3802 for node, nres in result.items():
3803 msg = nres.RemoteFailMsg()
3805 raise errors.OpExecError("Cannot disconnect disks node %s,"
3806 " error %s" % (node, msg))
3808 def _GoReconnect(self, multimaster):
3809 """Reconnect to the network.
3815 msg = "single-master"
3816 self.feedback_fn("* changing disks into %s mode" % msg)
3817 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3818 self.instance.disks,
3819 self.instance.name, multimaster)
3820 for node, nres in result.items():
3821 msg = nres.RemoteFailMsg()
3823 raise errors.OpExecError("Cannot change disks config on node %s,"
3824 " error: %s" % (node, msg))
3826 def _ExecCleanup(self):
3827 """Try to cleanup after a failed migration.
3829 The cleanup is done by:
3830 - check that the instance is running only on one node
3831 (and update the config if needed)
3832 - change disks on its secondary node to secondary
3833 - wait until disks are fully synchronized
3834 - disconnect from the network
3835 - change disks into single-master mode
3836 - wait again until disks are fully synchronized
3839 instance = self.instance
3840 target_node = self.target_node
3841 source_node = self.source_node
3843 # check running on only one node
3844 self.feedback_fn("* checking where the instance actually runs"
3845 " (if this hangs, the hypervisor might be in"
3847 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3848 for node, result in ins_l.items():
3850 if not isinstance(result.data, list):
3851 raise errors.OpExecError("Can't contact node '%s'" % node)
3853 runningon_source = instance.name in ins_l[source_node].data
3854 runningon_target = instance.name in ins_l[target_node].data
3856 if runningon_source and runningon_target:
3857 raise errors.OpExecError("Instance seems to be running on two nodes,"
3858 " or the hypervisor is confused. You will have"
3859 " to ensure manually that it runs only on one"
3860 " and restart this operation.")
3862 if not (runningon_source or runningon_target):
3863 raise errors.OpExecError("Instance does not seem to be running at all."
3864 " In this case, it's safer to repair by"
3865 " running 'gnt-instance stop' to ensure disk"
3866 " shutdown, and then restarting it.")
3868 if runningon_target:
3869 # the migration has actually succeeded, we need to update the config
3870 self.feedback_fn("* instance running on secondary node (%s),"
3871 " updating config" % target_node)
3872 instance.primary_node = target_node
3873 self.cfg.Update(instance)
3874 demoted_node = source_node
3876 self.feedback_fn("* instance confirmed to be running on its"
3877 " primary node (%s)" % source_node)
3878 demoted_node = target_node
3880 self._EnsureSecondary(demoted_node)
3882 self._WaitUntilSync()
3883 except errors.OpExecError:
3884 # we ignore here errors, since if the device is standalone, it
3885 # won't be able to sync
3887 self._GoStandalone()
3888 self._GoReconnect(False)
3889 self._WaitUntilSync()
3891 self.feedback_fn("* done")
3893 def _RevertDiskStatus(self):
3894 """Try to revert the disk status after a failed migration.
3897 target_node = self.target_node
3899 self._EnsureSecondary(target_node)
3900 self._GoStandalone()
3901 self._GoReconnect(False)
3902 self._WaitUntilSync()
3903 except errors.OpExecError, err:
3904 self.LogWarning("Migration failed and I can't reconnect the"
3905 " drives: error '%s'\n"
3906 "Please look and recover the instance status" %
3909 def _AbortMigration(self):
3910 """Call the hypervisor code to abort a started migration.
3913 instance = self.instance
3914 target_node = self.target_node
3915 migration_info = self.migration_info
3917 abort_result = self.rpc.call_finalize_migration(target_node,
3921 abort_msg = abort_result.RemoteFailMsg()
3923 logging.error("Aborting migration failed on target node %s: %s" %
3924 (target_node, abort_msg))
3925 # Don't raise an exception here, as we stil have to try to revert the
3926 # disk status, even if this step failed.
3928 def _ExecMigration(self):
3929 """Migrate an instance.
3931 The migrate is done by:
3932 - change the disks into dual-master mode
3933 - wait until disks are fully synchronized again
3934 - migrate the instance
3935 - change disks on the new secondary node (the old primary) to secondary
3936 - wait until disks are fully synchronized
3937 - change disks into single-master mode
3940 instance = self.instance
3941 target_node = self.target_node
3942 source_node = self.source_node
3944 self.feedback_fn("* checking disk consistency between source and target")
3945 for dev in instance.disks:
3946 if not _CheckDiskConsistency(self, dev, target_node, False):
3947 raise errors.OpExecError("Disk %s is degraded or not fully"
3948 " synchronized on target node,"
3949 " aborting migrate." % dev.iv_name)
3951 # First get the migration information from the remote node
3952 result = self.rpc.call_migration_info(source_node, instance)
3953 msg = result.RemoteFailMsg()
3955 log_err = ("Failed fetching source migration information from %s: %s" %
3957 logging.error(log_err)
3958 raise errors.OpExecError(log_err)
3960 self.migration_info = migration_info = result.payload
3962 # Then switch the disks to master/master mode
3963 self._EnsureSecondary(target_node)
3964 self._GoStandalone()
3965 self._GoReconnect(True)
3966 self._WaitUntilSync()
3968 self.feedback_fn("* preparing %s to accept the instance" % target_node)
3969 result = self.rpc.call_accept_instance(target_node,
3972 self.nodes_ip[target_node])
3974 msg = result.RemoteFailMsg()
3976 logging.error("Instance pre-migration failed, trying to revert"
3977 " disk status: %s", msg)
3978 self._AbortMigration()
3979 self._RevertDiskStatus()
3980 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3981 (instance.name, msg))
3983 self.feedback_fn("* migrating instance to %s" % target_node)
3985 result = self.rpc.call_instance_migrate(source_node, instance,
3986 self.nodes_ip[target_node],
3988 msg = result.RemoteFailMsg()
3990 logging.error("Instance migration failed, trying to revert"
3991 " disk status: %s", msg)
3992 self._AbortMigration()
3993 self._RevertDiskStatus()
3994 raise errors.OpExecError("Could not migrate instance %s: %s" %
3995 (instance.name, msg))
3998 instance.primary_node = target_node
3999 # distribute new instance config to the other nodes
4000 self.cfg.Update(instance)
4002 result = self.rpc.call_finalize_migration(target_node,
4006 msg = result.RemoteFailMsg()
4008 logging.error("Instance migration succeeded, but finalization failed:"
4010 raise errors.OpExecError("Could not finalize instance migration: %s" %
4013 self._EnsureSecondary(source_node)
4014 self._WaitUntilSync()
4015 self._GoStandalone()
4016 self._GoReconnect(False)
4017 self._WaitUntilSync()
4019 self.feedback_fn("* done")
4021 def Exec(self, feedback_fn):
4022 """Perform the migration.
4025 self.feedback_fn = feedback_fn
4027 self.source_node = self.instance.primary_node
4028 self.target_node = self.instance.secondary_nodes[0]
4029 self.all_nodes = [self.source_node, self.target_node]
4031 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4032 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4035 return self._ExecCleanup()
4037 return self._ExecMigration()
4040 def _CreateBlockDev(lu, node, instance, device, force_create,
4042 """Create a tree of block devices on a given node.
4044 If this device type has to be created on secondaries, create it and
4047 If not, just recurse to children keeping the same 'force' value.
4049 @param lu: the lu on whose behalf we execute
4050 @param node: the node on which to create the device
4051 @type instance: L{objects.Instance}
4052 @param instance: the instance which owns the device
4053 @type device: L{objects.Disk}
4054 @param device: the device to create
4055 @type force_create: boolean
4056 @param force_create: whether to force creation of this device; this
4057 will be change to True whenever we find a device which has
4058 CreateOnSecondary() attribute
4059 @param info: the extra 'metadata' we should attach to the device
4060 (this will be represented as a LVM tag)
4061 @type force_open: boolean
4062 @param force_open: this parameter will be passes to the
4063 L{backend.BlockdevCreate} function where it specifies
4064 whether we run on primary or not, and it affects both
4065 the child assembly and the device own Open() execution
4068 if device.CreateOnSecondary():
4072 for child in device.children:
4073 _CreateBlockDev(lu, node, instance, child, force_create,
4076 if not force_create:
4079 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4082 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4083 """Create a single block device on a given node.
4085 This will not recurse over children of the device, so they must be
4088 @param lu: the lu on whose behalf we execute
4089 @param node: the node on which to create the device
4090 @type instance: L{objects.Instance}
4091 @param instance: the instance which owns the device
4092 @type device: L{objects.Disk}
4093 @param device: the device to create
4094 @param info: the extra 'metadata' we should attach to the device
4095 (this will be represented as a LVM tag)
4096 @type force_open: boolean
4097 @param force_open: this parameter will be passes to the
4098 L{backend.BlockdevCreate} function where it specifies
4099 whether we run on primary or not, and it affects both
4100 the child assembly and the device own Open() execution
4103 lu.cfg.SetDiskID(device, node)
4104 result = lu.rpc.call_blockdev_create(node, device, device.size,
4105 instance.name, force_open, info)
4106 msg = result.RemoteFailMsg()
4108 raise errors.OpExecError("Can't create block device %s on"
4109 " node %s for instance %s: %s" %
4110 (device, node, instance.name, msg))
4111 if device.physical_id is None:
4112 device.physical_id = result.payload
4115 def _GenerateUniqueNames(lu, exts):
4116 """Generate a suitable LV name.
4118 This will generate a logical volume name for the given instance.
4123 new_id = lu.cfg.GenerateUniqueID()
4124 results.append("%s%s" % (new_id, val))
4128 def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4130 """Generate a drbd8 device complete with its children.
4133 port = lu.cfg.AllocatePort()
4134 vgname = lu.cfg.GetVGName()
4135 shared_secret = lu.cfg.GenerateDRBDSecret()
4136 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4137 logical_id=(vgname, names[0]))
4138 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4139 logical_id=(vgname, names[1]))
4140 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4141 logical_id=(primary, secondary, port,
4144 children=[dev_data, dev_meta],
4149 def _GenerateDiskTemplate(lu, template_name,
4150 instance_name, primary_node,
4151 secondary_nodes, disk_info,
4152 file_storage_dir, file_driver,
4154 """Generate the entire disk layout for a given template type.
4157 #TODO: compute space requirements
4159 vgname = lu.cfg.GetVGName()
4160 disk_count = len(disk_info)
4162 if template_name == constants.DT_DISKLESS:
4164 elif template_name == constants.DT_PLAIN:
4165 if len(secondary_nodes) != 0:
4166 raise errors.ProgrammerError("Wrong template configuration")
4168 names = _GenerateUniqueNames(lu, [".disk%d" % i
4169 for i in range(disk_count)])
4170 for idx, disk in enumerate(disk_info):
4171 disk_index = idx + base_index
4172 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4173 logical_id=(vgname, names[idx]),
4174 iv_name="disk/%d" % disk_index,
4176 disks.append(disk_dev)
4177 elif template_name == constants.DT_DRBD8:
4178 if len(secondary_nodes) != 1:
4179 raise errors.ProgrammerError("Wrong template configuration")
4180 remote_node = secondary_nodes[0]
4181 minors = lu.cfg.AllocateDRBDMinor(
4182 [primary_node, remote_node] * len(disk_info), instance_name)
4185 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4186 for i in range(disk_count)]):
4187 names.append(lv_prefix + "_data")
4188 names.append(lv_prefix + "_meta")
4189 for idx, disk in enumerate(disk_info):
4190 disk_index = idx + base_index
4191 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4192 disk["size"], names[idx*2:idx*2+2],
4193 "disk/%d" % disk_index,
4194 minors[idx*2], minors[idx*2+1])
4195 disk_dev.mode = disk["mode"]
4196 disks.append(disk_dev)
4197 elif template_name == constants.DT_FILE:
4198 if len(secondary_nodes) != 0:
4199 raise errors.ProgrammerError("Wrong template configuration")
4201 for idx, disk in enumerate(disk_info):
4202 disk_index = idx + base_index
4203 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4204 iv_name="disk/%d" % disk_index,
4205 logical_id=(file_driver,
4206 "%s/disk%d" % (file_storage_dir,
4209 disks.append(disk_dev)
4211 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4215 def _GetInstanceInfoText(instance):
4216 """Compute that text that should be added to the disk's metadata.
4219 return "originstname+%s" % instance.name
4222 def _CreateDisks(lu, instance):
4223 """Create all disks for an instance.
4225 This abstracts away some work from AddInstance.
4227 @type lu: L{LogicalUnit}
4228 @param lu: the logical unit on whose behalf we execute
4229 @type instance: L{objects.Instance}
4230 @param instance: the instance whose disks we should create
4232 @return: the success of the creation
4235 info = _GetInstanceInfoText(instance)
4236 pnode = instance.primary_node
4238 if instance.disk_template == constants.DT_FILE:
4239 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4240 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4242 if result.failed or not result.data:
4243 raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4245 if not result.data[0]:
4246 raise errors.OpExecError("Failed to create directory '%s'" %
4249 # Note: this needs to be kept in sync with adding of disks in
4250 # LUSetInstanceParams
4251 for device in instance.disks:
4252 logging.info("Creating volume %s for instance %s",
4253 device.iv_name, instance.name)
4255 for node in instance.all_nodes:
4256 f_create = node == pnode
4257 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4260 def _RemoveDisks(lu, instance):
4261 """Remove all disks for an instance.
4263 This abstracts away some work from `AddInstance()` and
4264 `RemoveInstance()`. Note that in case some of the devices couldn't
4265 be removed, the removal will continue with the other ones (compare
4266 with `_CreateDisks()`).
4268 @type lu: L{LogicalUnit}
4269 @param lu: the logical unit on whose behalf we execute
4270 @type instance: L{objects.Instance}
4271 @param instance: the instance whose disks we should remove
4273 @return: the success of the removal
4276 logging.info("Removing block devices for instance %s", instance.name)
4279 for device in instance.disks:
4280 for node, disk in device.ComputeNodeTree(instance.primary_node):
4281 lu.cfg.SetDiskID(disk, node)
4282 msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4284 lu.LogWarning("Could not remove block device %s on node %s,"
4285 " continuing anyway: %s", device.iv_name, node, msg)
4288 if instance.disk_template == constants.DT_FILE:
4289 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4290 result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4292 if result.failed or not result.data:
4293 logging.error("Could not remove directory '%s'", file_storage_dir)
4299 def _ComputeDiskSize(disk_template, disks):
4300 """Compute disk size requirements in the volume group
4303 # Required free disk space as a function of disk and swap space
4305 constants.DT_DISKLESS: None,
4306 constants.DT_PLAIN: sum(d["size"] for d in disks),
4307 # 128 MB are added for drbd metadata for each disk
4308 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4309 constants.DT_FILE: None,
4312 if disk_template not in req_size_dict:
4313 raise errors.ProgrammerError("Disk template '%s' size requirement"
4314 " is unknown" % disk_template)
4316 return req_size_dict[disk_template]
4319 def _CheckHVParams(lu, nodenames, hvname, hvparams):
4320 """Hypervisor parameter validation.
4322 This function abstract the hypervisor parameter validation to be
4323 used in both instance create and instance modify.
4325 @type lu: L{LogicalUnit}
4326 @param lu: the logical unit for which we check
4327 @type nodenames: list
4328 @param nodenames: the list of nodes on which we should check
4329 @type hvname: string
4330 @param hvname: the name of the hypervisor we should use
4331 @type hvparams: dict
4332 @param hvparams: the parameters which we need to check
4333 @raise errors.OpPrereqError: if the parameters are not valid
4336 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4339 for node in nodenames:
4343 msg = info.RemoteFailMsg()
4345 raise errors.OpPrereqError("Hypervisor parameter validation"
4346 " failed on node %s: %s" % (node, msg))
4349 class LUCreateInstance(LogicalUnit):
4350 """Create an instance.
4353 HPATH = "instance-add"
4354 HTYPE = constants.HTYPE_INSTANCE
4355 _OP_REQP = ["instance_name", "disks", "disk_template",
4357 "wait_for_sync", "ip_check", "nics",
4358 "hvparams", "beparams"]
4361 def _ExpandNode(self, node):
4362 """Expands and checks one node name.
4365 node_full = self.cfg.ExpandNodeName(node)
4366 if node_full is None:
4367 raise errors.OpPrereqError("Unknown node %s" % node)
4370 def ExpandNames(self):
4371 """ExpandNames for CreateInstance.
4373 Figure out the right locks for instance creation.
4376 self.needed_locks = {}
4378 # set optional parameters to none if they don't exist
4379 for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4380 if not hasattr(self.op, attr):
4381 setattr(self.op, attr, None)
4383 # cheap checks, mostly valid constants given
4385 # verify creation mode
4386 if self.op.mode not in (constants.INSTANCE_CREATE,
4387 constants.INSTANCE_IMPORT):
4388 raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4391 # disk template and mirror node verification
4392 if self.op.disk_template not in constants.DISK_TEMPLATES:
4393 raise errors.OpPrereqError("Invalid disk template name")
4395 if self.op.hypervisor is None:
4396 self.op.hypervisor = self.cfg.GetHypervisorType()
4398 cluster = self.cfg.GetClusterInfo()
4399 enabled_hvs = cluster.enabled_hypervisors
4400 if self.op.hypervisor not in enabled_hvs:
4401 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4402 " cluster (%s)" % (self.op.hypervisor,
4403 ",".join(enabled_hvs)))
4405 # check hypervisor parameter syntax (locally)
4406 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4407 filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4409 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4410 hv_type.CheckParameterSyntax(filled_hvp)
4412 # fill and remember the beparams dict
4413 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4414 self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4417 #### instance parameters check
4419 # instance name verification
4420 hostname1 = utils.HostInfo(self.op.instance_name)
4421 self.op.instance_name = instance_name = hostname1.name
4423 # this is just a preventive check, but someone might still add this
4424 # instance in the meantime, and creation will fail at lock-add time
4425 if instance_name in self.cfg.GetInstanceList():
4426 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4429 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4433 for nic in self.op.nics:
4434 # ip validity checks
4435 ip = nic.get("ip", None)
4436 if ip is None or ip.lower() == "none":
4438 elif ip.lower() == constants.VALUE_AUTO:
4439 nic_ip = hostname1.ip
4441 if not utils.IsValidIP(ip):
4442 raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4443 " like a valid IP" % ip)
4446 # MAC address verification
4447 mac = nic.get("mac", constants.VALUE_AUTO)
4448 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4449 if not utils.IsValidMac(mac.lower()):
4450 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4452 # bridge verification
4453 bridge = nic.get("bridge", None)
4455 bridge = self.cfg.GetDefBridge()
4456 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4458 # disk checks/pre-build
4460 for disk in self.op.disks:
4461 mode = disk.get("mode", constants.DISK_RDWR)
4462 if mode not in constants.DISK_ACCESS_SET:
4463 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4465 size = disk.get("size", None)
4467 raise errors.OpPrereqError("Missing disk size")
4471 raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4472 self.disks.append({"size": size, "mode": mode})
4474 # used in CheckPrereq for ip ping check
4475 self.check_ip = hostname1.ip
4477 # file storage checks
4478 if (self.op.file_driver and
4479 not self.op.file_driver in constants.FILE_DRIVER):
4480 raise errors.OpPrereqError("Invalid file driver name '%s'" %
4481 self.op.file_driver)
4483 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4484 raise errors.OpPrereqError("File storage directory path not absolute")
4486 ### Node/iallocator related checks
4487 if [self.op.iallocator, self.op.pnode].count(None) != 1:
4488 raise errors.OpPrereqError("One and only one of iallocator and primary"
4489 " node must be given")
4491 if self.op.iallocator:
4492 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4494 self.op.pnode = self._ExpandNode(self.op.pnode)
4495 nodelist = [self.op.pnode]
4496 if self.op.snode is not None:
4497 self.op.snode = self._ExpandNode(self.op.snode)
4498 nodelist.append(self.op.snode)
4499 self.needed_locks[locking.LEVEL_NODE] = nodelist
4501 # in case of import lock the source node too
4502 if self.op.mode == constants.INSTANCE_IMPORT:
4503 src_node = getattr(self.op, "src_node", None)
4504 src_path = getattr(self.op, "src_path", None)
4506 if src_path is None:
4507 self.op.src_path = src_path = self.op.instance_name
4509 if src_node is None:
4510 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4511 self.op.src_node = None
4512 if os.path.isabs(src_path):
4513 raise errors.OpPrereqError("Importing an instance from an absolute"
4514 " path requires a source node option.")
4516 self.op.src_node = src_node = self._ExpandNode(src_node)
4517 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4518 self.needed_locks[locking.LEVEL_NODE].append(src_node)
4519 if not os.path.isabs(src_path):
4520 self.op.src_path = src_path = \
4521 os.path.join(constants.EXPORT_DIR, src_path)
4523 else: # INSTANCE_CREATE
4524 if getattr(self.op, "os_type", None) is None:
4525 raise errors.OpPrereqError("No guest OS specified")
4527 def _RunAllocator(self):
4528 """Run the allocator based on input opcode.
4531 nics = [n.ToDict() for n in self.nics]
4532 ial = IAllocator(self,
4533 mode=constants.IALLOCATOR_MODE_ALLOC,
4534 name=self.op.instance_name,
4535 disk_template=self.op.disk_template,
4538 vcpus=self.be_full[constants.BE_VCPUS],
4539 mem_size=self.be_full[constants.BE_MEMORY],
4542 hypervisor=self.op.hypervisor,
4545 ial.Run(self.op.iallocator)
4548 raise errors.OpPrereqError("Can't compute nodes using"
4549 " iallocator '%s': %s" % (self.op.iallocator,
4551 if len(ial.nodes) != ial.required_nodes:
4552 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4553 " of nodes (%s), required %s" %
4554 (self.op.iallocator, len(ial.nodes),
4555 ial.required_nodes))
4556 self.op.pnode = ial.nodes[0]
4557 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4558 self.op.instance_name, self.op.iallocator,
4559 ", ".join(ial.nodes))
4560 if ial.required_nodes == 2:
4561 self.op.snode = ial.nodes[1]
4563 def BuildHooksEnv(self):
4566 This runs on master, primary and secondary nodes of the instance.
4570 "ADD_MODE": self.op.mode,
4572 if self.op.mode == constants.INSTANCE_IMPORT:
4573 env["SRC_NODE"] = self.op.src_node
4574 env["SRC_PATH"] = self.op.src_path
4575 env["SRC_IMAGES"] = self.src_images
4577 env.update(_BuildInstanceHookEnv(
4578 name=self.op.instance_name,
4579 primary_node=self.op.pnode,
4580 secondary_nodes=self.secondaries,
4581 status=self.op.start,
4582 os_type=self.op.os_type,
4583 memory=self.be_full[constants.BE_MEMORY],
4584 vcpus=self.be_full[constants.BE_VCPUS],
4585 nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4586 disk_template=self.op.disk_template,
4587 disks=[(d["size"], d["mode"]) for d in self.disks],
4590 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4595 def CheckPrereq(self):
4596 """Check prerequisites.
4599 if (not self.cfg.GetVGName() and
4600 self.op.disk_template not in constants.DTS_NOT_LVM):
4601 raise errors.OpPrereqError("Cluster does not support lvm-based"
4604 if self.op.mode == constants.INSTANCE_IMPORT:
4605 src_node = self.op.src_node
4606 src_path = self.op.src_path
4608 if src_node is None:
4609 exp_list = self.rpc.call_export_list(
4610 self.acquired_locks[locking.LEVEL_NODE])
4612 for node in exp_list:
4613 if not exp_list[node].failed and src_path in exp_list[node].data:
4615 self.op.src_node = src_node = node
4616 self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4620 raise errors.OpPrereqError("No export found for relative path %s" %
4623 _CheckNodeOnline(self, src_node)
4624 result = self.rpc.call_export_info(src_node, src_path)
4627 raise errors.OpPrereqError("No export found in dir %s" % src_path)
4629 export_info = result.data
4630 if not export_info.has_section(constants.INISECT_EXP):
4631 raise errors.ProgrammerError("Corrupted export config")
4633 ei_version = export_info.get(constants.INISECT_EXP, 'version')
4634 if (int(ei_version) != constants.EXPORT_VERSION):
4635 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4636 (ei_version, constants.EXPORT_VERSION))
4638 # Check that the new instance doesn't have less disks than the export
4639 instance_disks = len(self.disks)
4640 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4641 if instance_disks < export_disks:
4642 raise errors.OpPrereqError("Not enough disks to import."
4643 " (instance: %d, export: %d)" %
4644 (instance_disks, export_disks))
4646 self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4648 for idx in range(export_disks):
4649 option = 'disk%d_dump' % idx
4650 if export_info.has_option(constants.INISECT_INS, option):
4651 # FIXME: are the old os-es, disk sizes, etc. useful?
4652 export_name = export_info.get(constants.INISECT_INS, option)
4653 image = os.path.join(src_path, export_name)
4654 disk_images.append(image)
4656 disk_images.append(False)
4658 self.src_images = disk_images
4660 old_name = export_info.get(constants.INISECT_INS, 'name')
4661 # FIXME: int() here could throw a ValueError on broken exports
4662 exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4663 if self.op.instance_name == old_name:
4664 for idx, nic in enumerate(self.nics):
4665 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4666 nic_mac_ini = 'nic%d_mac' % idx
4667 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4669 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4670 # ip ping checks (we use the same ip that was resolved in ExpandNames)
4671 if self.op.start and not self.op.ip_check:
4672 raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4673 " adding an instance in start mode")
4675 if self.op.ip_check:
4676 if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4677 raise errors.OpPrereqError("IP %s of instance %s already in use" %
4678 (self.check_ip, self.op.instance_name))
4680 #### mac address generation
4681 # By generating here the mac address both the allocator and the hooks get
4682 # the real final mac address rather than the 'auto' or 'generate' value.
4683 # There is a race condition between the generation and the instance object
4684 # creation, which means that we know the mac is valid now, but we're not
4685 # sure it will be when we actually add the instance. If things go bad
4686 # adding the instance will abort because of a duplicate mac, and the
4687 # creation job will fail.
4688 for nic in self.nics:
4689 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4690 nic.mac = self.cfg.GenerateMAC()
4694 if self.op.iallocator is not None:
4695 self._RunAllocator()
4697 #### node related checks
4699 # check primary node
4700 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4701 assert self.pnode is not None, \
4702 "Cannot retrieve locked node %s" % self.op.pnode
4704 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4707 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4710 self.secondaries = []
4712 # mirror node verification
4713 if self.op.disk_template in constants.DTS_NET_MIRROR:
4714 if self.op.snode is None:
4715 raise errors.OpPrereqError("The networked disk templates need"
4717 if self.op.snode == pnode.name:
4718 raise errors.OpPrereqError("The secondary node cannot be"
4719 " the primary node.")
4720 _CheckNodeOnline(self, self.op.snode)
4721 _CheckNodeNotDrained(self, self.op.snode)
4722 self.secondaries.append(self.op.snode)
4724 nodenames = [pnode.name] + self.secondaries
4726 req_size = _ComputeDiskSize(self.op.disk_template,
4729 # Check lv size requirements
4730 if req_size is not None:
4731 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4733 for node in nodenames:
4734 info = nodeinfo[node]
4738 raise errors.OpPrereqError("Cannot get current information"
4739 " from node '%s'" % node)
4740 vg_free = info.get('vg_free', None)
4741 if not isinstance(vg_free, int):
4742 raise errors.OpPrereqError("Can't compute free disk space on"
4744 if req_size > info['vg_free']:
4745 raise errors.OpPrereqError("Not enough disk space on target node %s."
4746 " %d MB available, %d MB required" %
4747 (node, info['vg_free'], req_size))
4749 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4752 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4754 if not isinstance(result.data, objects.OS):
4755 raise errors.OpPrereqError("OS '%s' not in supported os list for"
4756 " primary node" % self.op.os_type)
4758 # bridge check on primary node
4759 bridges = [n.bridge for n in self.nics]
4760 result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4763 raise errors.OpPrereqError("One of the target bridges '%s' does not"
4764 " exist on destination node '%s'" %
4765 (",".join(bridges), pnode.name))
4767 # memory check on primary node
4769 _CheckNodeFreeMemory(self, self.pnode.name,
4770 "creating instance %s" % self.op.instance_name,
4771 self.be_full[constants.BE_MEMORY],
4774 def Exec(self, feedback_fn):
4775 """Create and add the instance to the cluster.
4778 instance = self.op.instance_name
4779 pnode_name = self.pnode.name
4781 ht_kind = self.op.hypervisor
4782 if ht_kind in constants.HTS_REQ_PORT:
4783 network_port = self.cfg.AllocatePort()
4787 ##if self.op.vnc_bind_address is None:
4788 ## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4790 # this is needed because os.path.join does not accept None arguments
4791 if self.op.file_storage_dir is None:
4792 string_file_storage_dir = ""
4794 string_file_storage_dir = self.op.file_storage_dir
4796 # build the full file storage dir path
4797 file_storage_dir = os.path.normpath(os.path.join(
4798 self.cfg.GetFileStorageDir(),
4799 string_file_storage_dir, instance))
4802 disks = _GenerateDiskTemplate(self,
4803 self.op.disk_template,
4804 instance, pnode_name,
4808 self.op.file_driver,
4811 iobj = objects.Instance(name=instance, os=self.op.os_type,
4812 primary_node=pnode_name,
4813 nics=self.nics, disks=disks,
4814 disk_template=self.op.disk_template,
4816 network_port=network_port,
4817 beparams=self.op.beparams,
4818 hvparams=self.op.hvparams,
4819 hypervisor=self.op.hypervisor,
4822 feedback_fn("* creating instance disks...")
4824 _CreateDisks(self, iobj)
4825 except errors.OpExecError:
4826 self.LogWarning("Device creation failed, reverting...")
4828 _RemoveDisks(self, iobj)
4830 self.cfg.ReleaseDRBDMinors(instance)
4833 feedback_fn("adding instance %s to cluster config" % instance)
4835 self.cfg.AddInstance(iobj)
4836 # Declare that we don't want to remove the instance lock anymore, as we've
4837 # added the instance to the config
4838 del self.remove_locks[locking.LEVEL_INSTANCE]
4839 # Unlock all the nodes
4840 if self.op.mode == constants.INSTANCE_IMPORT:
4841 nodes_keep = [self.op.src_node]
4842 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4843 if node != self.op.src_node]
4844 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4845 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4847 self.context.glm.release(locking.LEVEL_NODE)
4848 del self.acquired_locks[locking.LEVEL_NODE]
4850 if self.op.wait_for_sync:
4851 disk_abort = not _WaitForSync(self, iobj)
4852 elif iobj.disk_template in constants.DTS_NET_MIRROR:
4853 # make sure the disks are not degraded (still sync-ing is ok)
4855 feedback_fn("* checking mirrors status")
4856 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4861 _RemoveDisks(self, iobj)
4862 self.cfg.RemoveInstance(iobj.name)
4863 # Make sure the instance lock gets removed
4864 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4865 raise errors.OpExecError("There are some degraded disks for"
4868 feedback_fn("creating os for instance %s on node %s" %
4869 (instance, pnode_name))
4871 if iobj.disk_template != constants.DT_DISKLESS:
4872 if self.op.mode == constants.INSTANCE_CREATE:
4873 feedback_fn("* running the instance OS create scripts...")
4874 result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4875 msg = result.RemoteFailMsg()
4877 raise errors.OpExecError("Could not add os for instance %s"
4879 (instance, pnode_name, msg))
4881 elif self.op.mode == constants.INSTANCE_IMPORT:
4882 feedback_fn("* running the instance OS import scripts...")
4883 src_node = self.op.src_node
4884 src_images = self.src_images
4885 cluster_name = self.cfg.GetClusterName()
4886 import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4887 src_node, src_images,
4889 import_result.Raise()
4890 for idx, result in enumerate(import_result.data):
4892 self.LogWarning("Could not import the image %s for instance"
4893 " %s, disk %d, on node %s" %
4894 (src_images[idx], instance, idx, pnode_name))
4896 # also checked in the prereq part
4897 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4901 iobj.admin_up = True
4902 self.cfg.Update(iobj)
4903 logging.info("Starting instance %s on node %s", instance, pnode_name)
4904 feedback_fn("* starting instance...")
4905 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4906 msg = result.RemoteFailMsg()
4908 raise errors.OpExecError("Could not start instance: %s" % msg)
4911 class LUConnectConsole(NoHooksLU):
4912 """Connect to an instance's console.
4914 This is somewhat special in that it returns the command line that
4915 you need to run on the master node in order to connect to the
4919 _OP_REQP = ["instance_name"]
4922 def ExpandNames(self):
4923 self._ExpandAndLockInstance()
4925 def CheckPrereq(self):
4926 """Check prerequisites.
4928 This checks that the instance is in the cluster.
4931 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4932 assert self.instance is not None, \
4933 "Cannot retrieve locked instance %s" % self.op.instance_name
4934 _CheckNodeOnline(self, self.instance.primary_node)
4936 def Exec(self, feedback_fn):
4937 """Connect to the console of an instance
4940 instance = self.instance
4941 node = instance.primary_node
4943 node_insts = self.rpc.call_instance_list([node],
4944 [instance.hypervisor])[node]
4947 if instance.name not in node_insts.data:
4948 raise errors.OpExecError("Instance %s is not running." % instance.name)
4950 logging.debug("Connecting to console of %s on %s", instance.name, node)
4952 hyper = hypervisor.GetHypervisor(instance.hypervisor)
4953 cluster = self.cfg.GetClusterInfo()
4954 # beparams and hvparams are passed separately, to avoid editing the
4955 # instance and then saving the defaults in the instance itself.
4956 hvparams = cluster.FillHV(instance)
4957 beparams = cluster.FillBE(instance)
4958 console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4961 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4964 class LUReplaceDisks(LogicalUnit):
4965 """Replace the disks of an instance.
4968 HPATH = "mirrors-replace"
4969 HTYPE = constants.HTYPE_INSTANCE
4970 _OP_REQP = ["instance_name", "mode", "disks"]
4973 def CheckArguments(self):
4974 if not hasattr(self.op, "remote_node"):
4975 self.op.remote_node = None
4976 if not hasattr(self.op, "iallocator"):
4977 self.op.iallocator = None
4979 # check for valid parameter combination
4980 cnt = [self.op.remote_node, self.op.iallocator].count(None)
4981 if self.op.mode == constants.REPLACE_DISK_CHG:
4983 raise errors.OpPrereqError("When changing the secondary either an"
4984 " iallocator script must be used or the"
4987 raise errors.OpPrereqError("Give either the iallocator or the new"
4988 " secondary, not both")
4989 else: # not replacing the secondary
4991 raise errors.OpPrereqError("The iallocator and new node options can"
4992 " be used only when changing the"
4995 def ExpandNames(self):
4996 self._ExpandAndLockInstance()
4998 if self.op.iallocator is not None:
4999 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5000 elif self.op.remote_node is not None:
5001 remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5002 if remote_node is None:
5003 raise errors.OpPrereqError("Node '%s' not known" %
5004 self.op.remote_node)
5005 self.op.remote_node = remote_node
5006 # Warning: do not remove the locking of the new secondary here
5007 # unless DRBD8.AddChildren is changed to work in parallel;
5008 # currently it doesn't since parallel invocations of
5009 # FindUnusedMinor will conflict
5010 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5011 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5013 self.needed_locks[locking.LEVEL_NODE] = []
5014 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5016 def DeclareLocks(self, level):
5017 # If we're not already locking all nodes in the set we have to declare the
5018 # instance's primary/secondary nodes.
5019 if (level == locking.LEVEL_NODE and
5020 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5021 self._LockInstancesNodes()
5023 def _RunAllocator(self):
5024 """Compute a new secondary node using an IAllocator.
5027 ial = IAllocator(self,
5028 mode=constants.IALLOCATOR_MODE_RELOC,
5029 name=self.op.instance_name,
5030 relocate_from=[self.sec_node])
5032 ial.Run(self.op.iallocator)
5035 raise errors.OpPrereqError("Can't compute nodes using"
5036 " iallocator '%s': %s" % (self.op.iallocator,
5038 if len(ial.nodes) != ial.required_nodes:
5039 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5040 " of nodes (%s), required %s" %
5041 (len(ial.nodes), ial.required_nodes))
5042 self.op.remote_node = ial.nodes[0]
5043 self.LogInfo("Selected new secondary for the instance: %s",
5044 self.op.remote_node)
5046 def BuildHooksEnv(self):
5049 This runs on the master, the primary and all the secondaries.
5053 "MODE": self.op.mode,
5054 "NEW_SECONDARY": self.op.remote_node,
5055 "OLD_SECONDARY": self.instance.secondary_nodes[0],
5057 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5059 self.cfg.GetMasterNode(),
5060 self.instance.primary_node,
5062 if self.op.remote_node is not None:
5063 nl.append(self.op.remote_node)
5066 def CheckPrereq(self):
5067 """Check prerequisites.
5069 This checks that the instance is in the cluster.
5072 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5073 assert instance is not None, \
5074 "Cannot retrieve locked instance %s" % self.op.instance_name
5075 self.instance = instance
5077 if instance.disk_template != constants.DT_DRBD8:
5078 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5081 if len(instance.secondary_nodes) != 1:
5082 raise errors.OpPrereqError("The instance has a strange layout,"
5083 " expected one secondary but found %d" %
5084 len(instance.secondary_nodes))
5086 self.sec_node = instance.secondary_nodes[0]
5088 if self.op.iallocator is not None:
5089 self._RunAllocator()
5091 remote_node = self.op.remote_node
5092 if remote_node is not None:
5093 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5094 assert self.remote_node_info is not None, \
5095 "Cannot retrieve locked node %s" % remote_node
5097 self.remote_node_info = None
5098 if remote_node == instance.primary_node:
5099 raise errors.OpPrereqError("The specified node is the primary node of"
5101 elif remote_node == self.sec_node:
5102 raise errors.OpPrereqError("The specified node is already the"
5103 " secondary node of the instance.")
5105 if self.op.mode == constants.REPLACE_DISK_PRI:
5106 n1 = self.tgt_node = instance.primary_node
5107 n2 = self.oth_node = self.sec_node
5108 elif self.op.mode == constants.REPLACE_DISK_SEC:
5109 n1 = self.tgt_node = self.sec_node
5110 n2 = self.oth_node = instance.primary_node
5111 elif self.op.mode == constants.REPLACE_DISK_CHG:
5112 n1 = self.new_node = remote_node
5113 n2 = self.oth_node = instance.primary_node
5114 self.tgt_node = self.sec_node
5115 _CheckNodeNotDrained(self, remote_node)
5117 raise errors.ProgrammerError("Unhandled disk replace mode")
5119 _CheckNodeOnline(self, n1)
5120 _CheckNodeOnline(self, n2)
5122 if not self.op.disks:
5123 self.op.disks = range(len(instance.disks))
5125 for disk_idx in self.op.disks:
5126 instance.FindDisk(disk_idx)
5128 def _ExecD8DiskOnly(self, feedback_fn):
5129 """Replace a disk on the primary or secondary for dbrd8.
5131 The algorithm for replace is quite complicated:
5133 1. for each disk to be replaced:
5135 1. create new LVs on the target node with unique names
5136 1. detach old LVs from the drbd device
5137 1. rename old LVs to name_replaced.<time_t>
5138 1. rename new LVs to old LVs
5139 1. attach the new LVs (with the old names now) to the drbd device
5141 1. wait for sync across all devices
5143 1. for each modified disk:
5145 1. remove old LVs (which have the name name_replaces.<time_t>)
5147 Failures are not very well handled.
5151 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5152 instance = self.instance
5154 vgname = self.cfg.GetVGName()
5157 tgt_node = self.tgt_node
5158 oth_node = self.oth_node
5160 # Step: check device activation
5161 self.proc.LogStep(1, steps_total, "check device existence")
5162 info("checking volume groups")
5163 my_vg = cfg.GetVGName()
5164 results = self.rpc.call_vg_list([oth_node, tgt_node])
5166 raise errors.OpExecError("Can't list volume groups on the nodes")
5167 for node in oth_node, tgt_node:
5169 if res.failed or not res.data or my_vg not in res.data:
5170 raise errors.OpExecError("Volume group '%s' not found on %s" %
5172 for idx, dev in enumerate(instance.disks):
5173 if idx not in self.op.disks:
5175 for node in tgt_node, oth_node:
5176 info("checking disk/%d on %s" % (idx, node))
5177 cfg.SetDiskID(dev, node)
5178 result = self.rpc.call_blockdev_find(node, dev)
5179 msg = result.RemoteFailMsg()
5180 if not msg and not result.payload:
5181 msg = "disk not found"
5183 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5186 # Step: check other node consistency
5187 self.proc.LogStep(2, steps_total, "check peer consistency")
5188 for idx, dev in enumerate(instance.disks):
5189 if idx not in self.op.disks:
5191 info("checking disk/%d consistency on %s" % (idx, oth_node))
5192 if not _CheckDiskConsistency(self, dev, oth_node,
5193 oth_node==instance.primary_node):
5194 raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5195 " to replace disks on this node (%s)" %
5196 (oth_node, tgt_node))
5198 # Step: create new storage
5199 self.proc.LogStep(3, steps_total, "allocate new storage")
5200 for idx, dev in enumerate(instance.disks):
5201 if idx not in self.op.disks:
5204 cfg.SetDiskID(dev, tgt_node)
5205 lv_names = [".disk%d_%s" % (idx, suf)
5206 for suf in ["data", "meta"]]
5207 names = _GenerateUniqueNames(self, lv_names)
5208 lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5209 logical_id=(vgname, names[0]))
5210 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5211 logical_id=(vgname, names[1]))
5212 new_lvs = [lv_data, lv_meta]
5213 old_lvs = dev.children
5214 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5215 info("creating new local storage on %s for %s" %
5216 (tgt_node, dev.iv_name))
5217 # we pass force_create=True to force the LVM creation
5218 for new_lv in new_lvs:
5219 _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5220 _GetInstanceInfoText(instance), False)
5222 # Step: for each lv, detach+rename*2+attach
5223 self.proc.LogStep(4, steps_total, "change drbd configuration")
5224 for dev, old_lvs, new_lvs in iv_names.itervalues():
5225 info("detaching %s drbd from local storage" % dev.iv_name)
5226 result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5227 msg = result.RemoteFailMsg()
5229 raise errors.OpExecError("Can't detach drbd from local storage on node"
5230 " %s for device %s: %s" %
5231 (tgt_node, dev.iv_name, msg))
5233 #cfg.Update(instance)
5235 # ok, we created the new LVs, so now we know we have the needed
5236 # storage; as such, we proceed on the target node to rename
5237 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5238 # using the assumption that logical_id == physical_id (which in
5239 # turn is the unique_id on that node)
5241 # FIXME(iustin): use a better name for the replaced LVs
5242 temp_suffix = int(time.time())
5243 ren_fn = lambda d, suff: (d.physical_id[0],
5244 d.physical_id[1] + "_replaced-%s" % suff)
5245 # build the rename list based on what LVs exist on the node
5247 for to_ren in old_lvs:
5248 result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5249 if not result.RemoteFailMsg() and result.payload:
5251 rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5253 info("renaming the old LVs on the target node")
5254 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5255 msg = result.RemoteFailMsg()
5257 raise errors.OpExecError("Can't rename old LVs on node %s: %s" %
5259 # now we rename the new LVs to the old LVs
5260 info("renaming the new LVs on the target node")
5261 rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5262 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5263 msg = result.RemoteFailMsg()
5265 raise errors.OpExecError("Can't rename new LVs on node %s: %s" %
5268 for old, new in zip(old_lvs, new_lvs):
5269 new.logical_id = old.logical_id
5270 cfg.SetDiskID(new, tgt_node)
5272 for disk in old_lvs:
5273 disk.logical_id = ren_fn(disk, temp_suffix)
5274 cfg.SetDiskID(disk, tgt_node)
5276 # now that the new lvs have the old name, we can add them to the device
5277 info("adding new mirror component on %s" % tgt_node)
5278 result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5279 msg = result.RemoteFailMsg()
5281 for new_lv in new_lvs:
5282 msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5284 warning("Can't rollback device %s: %s", dev, msg,
5285 hint="cleanup manually the unused logical volumes")
5286 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5288 dev.children = new_lvs
5289 cfg.Update(instance)
5291 # Step: wait for sync
5293 # this can fail as the old devices are degraded and _WaitForSync
5294 # does a combined result over all disks, so we don't check its
5296 self.proc.LogStep(5, steps_total, "sync devices")
5297 _WaitForSync(self, instance, unlock=True)
5299 # so check manually all the devices
5300 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5301 cfg.SetDiskID(dev, instance.primary_node)
5302 result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5303 msg = result.RemoteFailMsg()
5304 if not msg and not result.payload:
5305 msg = "disk not found"
5307 raise errors.OpExecError("Can't find DRBD device %s: %s" %
5309 if result.payload[5]:
5310 raise errors.OpExecError("DRBD device %s is degraded!" % name)
5312 # Step: remove old storage
5313 self.proc.LogStep(6, steps_total, "removing old storage")
5314 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5315 info("remove logical volumes for %s" % name)
5317 cfg.SetDiskID(lv, tgt_node)
5318 msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5320 warning("Can't remove old LV: %s" % msg,
5321 hint="manually remove unused LVs")
5324 def _ExecD8Secondary(self, feedback_fn):
5325 """Replace the secondary node for drbd8.
5327 The algorithm for replace is quite complicated:
5328 - for all disks of the instance:
5329 - create new LVs on the new node with same names
5330 - shutdown the drbd device on the old secondary
5331 - disconnect the drbd network on the primary
5332 - create the drbd device on the new secondary
5333 - network attach the drbd on the primary, using an artifice:
5334 the drbd code for Attach() will connect to the network if it
5335 finds a device which is connected to the good local disks but
5337 - wait for sync across all devices
5338 - remove all disks from the old secondary
5340 Failures are not very well handled.
5344 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5345 instance = self.instance
5349 old_node = self.tgt_node
5350 new_node = self.new_node
5351 pri_node = instance.primary_node
5353 old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5354 new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5355 pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5358 # Step: check device activation
5359 self.proc.LogStep(1, steps_total, "check device existence")
5360 info("checking volume groups")
5361 my_vg = cfg.GetVGName()
5362 results = self.rpc.call_vg_list([pri_node, new_node])
5363 for node in pri_node, new_node:
5365 if res.failed or not res.data or my_vg not in res.data:
5366 raise errors.OpExecError("Volume group '%s' not found on %s" %
5368 for idx, dev in enumerate(instance.disks):
5369 if idx not in self.op.disks:
5371 info("checking disk/%d on %s" % (idx, pri_node))
5372 cfg.SetDiskID(dev, pri_node)
5373 result = self.rpc.call_blockdev_find(pri_node, dev)
5374 msg = result.RemoteFailMsg()
5375 if not msg and not result.payload:
5376 msg = "disk not found"
5378 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5379 (idx, pri_node, msg))
5381 # Step: check other node consistency
5382 self.proc.LogStep(2, steps_total, "check peer consistency")
5383 for idx, dev in enumerate(instance.disks):
5384 if idx not in self.op.disks:
5386 info("checking disk/%d consistency on %s" % (idx, pri_node))
5387 if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5388 raise errors.OpExecError("Primary node (%s) has degraded storage,"
5389 " unsafe to replace the secondary" %
5392 # Step: create new storage
5393 self.proc.LogStep(3, steps_total, "allocate new storage")
5394 for idx, dev in enumerate(instance.disks):
5395 info("adding new local storage on %s for disk/%d" %
5397 # we pass force_create=True to force LVM creation
5398 for new_lv in dev.children:
5399 _CreateBlockDev(self, new_node, instance, new_lv, True,
5400 _GetInstanceInfoText(instance), False)
5402 # Step 4: dbrd minors and drbd setups changes
5403 # after this, we must manually remove the drbd minors on both the
5404 # error and the success paths
5405 minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5407 logging.debug("Allocated minors %s" % (minors,))
5408 self.proc.LogStep(4, steps_total, "changing drbd configuration")
5409 for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5411 info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5412 # create new devices on new_node; note that we create two IDs:
5413 # one without port, so the drbd will be activated without
5414 # networking information on the new node at this stage, and one
5415 # with network, for the latter activation in step 4
5416 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5417 if pri_node == o_node1:
5422 new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5423 new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5425 iv_names[idx] = (dev, dev.children, new_net_id)
5426 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5428 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5429 logical_id=new_alone_id,
5430 children=dev.children)
5432 _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5433 _GetInstanceInfoText(instance), False)
5434 except errors.GenericError:
5435 self.cfg.ReleaseDRBDMinors(instance.name)
5438 for idx, dev in enumerate(instance.disks):
5439 # we have new devices, shutdown the drbd on the old secondary
5440 info("shutting down drbd for disk/%d on old node" % idx)
5441 cfg.SetDiskID(dev, old_node)
5442 msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5444 warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5446 hint="Please cleanup this device manually as soon as possible")
5448 info("detaching primary drbds from the network (=> standalone)")
5449 result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5450 instance.disks)[pri_node]
5452 msg = result.RemoteFailMsg()
5454 # detaches didn't succeed (unlikely)
5455 self.cfg.ReleaseDRBDMinors(instance.name)
5456 raise errors.OpExecError("Can't detach the disks from the network on"
5457 " old node: %s" % (msg,))
5459 # if we managed to detach at least one, we update all the disks of
5460 # the instance to point to the new secondary
5461 info("updating instance configuration")
5462 for dev, _, new_logical_id in iv_names.itervalues():
5463 dev.logical_id = new_logical_id
5464 cfg.SetDiskID(dev, pri_node)
5465 cfg.Update(instance)
5467 # and now perform the drbd attach
5468 info("attaching primary drbds to new secondary (standalone => connected)")
5469 result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5470 instance.disks, instance.name,
5472 for to_node, to_result in result.items():
5473 msg = to_result.RemoteFailMsg()
5475 warning("can't attach drbd disks on node %s: %s", to_node, msg,
5476 hint="please do a gnt-instance info to see the"
5479 # this can fail as the old devices are degraded and _WaitForSync
5480 # does a combined result over all disks, so we don't check its
5482 self.proc.LogStep(5, steps_total, "sync devices")
5483 _WaitForSync(self, instance, unlock=True)
5485 # so check manually all the devices
5486 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5487 cfg.SetDiskID(dev, pri_node)
5488 result = self.rpc.call_blockdev_find(pri_node, dev)
5489 msg = result.RemoteFailMsg()
5490 if not msg and not result.payload:
5491 msg = "disk not found"
5493 raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5495 if result.payload[5]:
5496 raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5498 self.proc.LogStep(6, steps_total, "removing old storage")
5499 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5500 info("remove logical volumes for disk/%d" % idx)
5502 cfg.SetDiskID(lv, old_node)
5503 msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5505 warning("Can't remove LV on old secondary: %s", msg,
5506 hint="Cleanup stale volumes by hand")
5508 def Exec(self, feedback_fn):
5509 """Execute disk replacement.
5511 This dispatches the disk replacement to the appropriate handler.
5514 instance = self.instance
5516 # Activate the instance disks if we're replacing them on a down instance
5517 if not instance.admin_up:
5518 _StartInstanceDisks(self, instance, True)
5520 if self.op.mode == constants.REPLACE_DISK_CHG:
5521 fn = self._ExecD8Secondary
5523 fn = self._ExecD8DiskOnly
5525 ret = fn(feedback_fn)
5527 # Deactivate the instance disks if we're replacing them on a down instance
5528 if not instance.admin_up:
5529 _SafeShutdownInstanceDisks(self, instance)
5534 class LUGrowDisk(LogicalUnit):
5535 """Grow a disk of an instance.
5539 HTYPE = constants.HTYPE_INSTANCE
5540 _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5543 def ExpandNames(self):
5544 self._ExpandAndLockInstance()
5545 self.needed_locks[locking.LEVEL_NODE] = []
5546 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5548 def DeclareLocks(self, level):
5549 if level == locking.LEVEL_NODE:
5550 self._LockInstancesNodes()
5552 def BuildHooksEnv(self):
5555 This runs on the master, the primary and all the secondaries.
5559 "DISK": self.op.disk,
5560 "AMOUNT": self.op.amount,
5562 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5564 self.cfg.GetMasterNode(),
5565 self.instance.primary_node,
5569 def CheckPrereq(self):
5570 """Check prerequisites.
5572 This checks that the instance is in the cluster.
5575 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5576 assert instance is not None, \
5577 "Cannot retrieve locked instance %s" % self.op.instance_name
5578 nodenames = list(instance.all_nodes)
5579 for node in nodenames:
5580 _CheckNodeOnline(self, node)
5583 self.instance = instance
5585 if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5586 raise errors.OpPrereqError("Instance's disk layout does not support"
5589 self.disk = instance.FindDisk(self.op.disk)
5591 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5592 instance.hypervisor)
5593 for node in nodenames:
5594 info = nodeinfo[node]
5595 if info.failed or not info.data:
5596 raise errors.OpPrereqError("Cannot get current information"
5597 " from node '%s'" % node)
5598 vg_free = info.data.get('vg_free', None)
5599 if not isinstance(vg_free, int):
5600 raise errors.OpPrereqError("Can't compute free disk space on"
5602 if self.op.amount > vg_free:
5603 raise errors.OpPrereqError("Not enough disk space on target node %s:"
5604 " %d MiB available, %d MiB required" %
5605 (node, vg_free, self.op.amount))
5607 def Exec(self, feedback_fn):
5608 """Execute disk grow.
5611 instance = self.instance
5613 for node in instance.all_nodes:
5614 self.cfg.SetDiskID(disk, node)
5615 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5616 msg = result.RemoteFailMsg()
5618 raise errors.OpExecError("Grow request failed to node %s: %s" %
5620 disk.RecordGrow(self.op.amount)
5621 self.cfg.Update(instance)
5622 if self.op.wait_for_sync:
5623 disk_abort = not _WaitForSync(self, instance)
5625 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5626 " status.\nPlease check the instance.")
5629 class LUQueryInstanceData(NoHooksLU):
5630 """Query runtime instance data.
5633 _OP_REQP = ["instances", "static"]
5636 def ExpandNames(self):
5637 self.needed_locks = {}
5638 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5640 if not isinstance(self.op.instances, list):
5641 raise errors.OpPrereqError("Invalid argument type 'instances'")
5643 if self.op.instances:
5644 self.wanted_names = []
5645 for name in self.op.instances:
5646 full_name = self.cfg.ExpandInstanceName(name)
5647 if full_name is None:
5648 raise errors.OpPrereqError("Instance '%s' not known" % name)
5649 self.wanted_names.append(full_name)
5650 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5652 self.wanted_names = None
5653 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5655 self.needed_locks[locking.LEVEL_NODE] = []
5656 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5658 def DeclareLocks(self, level):
5659 if level == locking.LEVEL_NODE:
5660 self._LockInstancesNodes()
5662 def CheckPrereq(self):
5663 """Check prerequisites.
5665 This only checks the optional instance list against the existing names.
5668 if self.wanted_names is None:
5669 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5671 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5672 in self.wanted_names]
5675 def _ComputeDiskStatus(self, instance, snode, dev):
5676 """Compute block device status.
5679 static = self.op.static
5681 self.cfg.SetDiskID(dev, instance.primary_node)
5682 dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5683 if dev_pstatus.offline:
5686 msg = dev_pstatus.RemoteFailMsg()
5688 raise errors.OpExecError("Can't compute disk status for %s: %s" %
5689 (instance.name, msg))
5690 dev_pstatus = dev_pstatus.payload
5694 if dev.dev_type in constants.LDS_DRBD:
5695 # we change the snode then (otherwise we use the one passed in)
5696 if dev.logical_id[0] == instance.primary_node:
5697 snode = dev.logical_id[1]
5699 snode = dev.logical_id[0]
5701 if snode and not static:
5702 self.cfg.SetDiskID(dev, snode)
5703 dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5704 if dev_sstatus.offline:
5707 msg = dev_sstatus.RemoteFailMsg()
5709 raise errors.OpExecError("Can't compute disk status for %s: %s" %
5710 (instance.name, msg))
5711 dev_sstatus = dev_sstatus.payload
5716 dev_children = [self._ComputeDiskStatus(instance, snode, child)
5717 for child in dev.children]
5722 "iv_name": dev.iv_name,
5723 "dev_type": dev.dev_type,
5724 "logical_id": dev.logical_id,
5725 "physical_id": dev.physical_id,
5726 "pstatus": dev_pstatus,
5727 "sstatus": dev_sstatus,
5728 "children": dev_children,
5734 def Exec(self, feedback_fn):
5735 """Gather and return data"""
5738 cluster = self.cfg.GetClusterInfo()
5740 for instance in self.wanted_instances:
5741 if not self.op.static:
5742 remote_info = self.rpc.call_instance_info(instance.primary_node,
5744 instance.hypervisor)
5746 remote_info = remote_info.data
5747 if remote_info and "state" in remote_info:
5750 remote_state = "down"
5753 if instance.admin_up:
5756 config_state = "down"
5758 disks = [self._ComputeDiskStatus(instance, None, device)
5759 for device in instance.disks]
5762 "name": instance.name,
5763 "config_state": config_state,
5764 "run_state": remote_state,
5765 "pnode": instance.primary_node,
5766 "snodes": instance.secondary_nodes,
5768 "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5770 "hypervisor": instance.hypervisor,
5771 "network_port": instance.network_port,
5772 "hv_instance": instance.hvparams,
5773 "hv_actual": cluster.FillHV(instance),
5774 "be_instance": instance.beparams,
5775 "be_actual": cluster.FillBE(instance),
5778 result[instance.name] = idict
5783 class LUSetInstanceParams(LogicalUnit):
5784 """Modifies an instances's parameters.
5787 HPATH = "instance-modify"
5788 HTYPE = constants.HTYPE_INSTANCE
5789 _OP_REQP = ["instance_name"]
5792 def CheckArguments(self):
5793 if not hasattr(self.op, 'nics'):
5795 if not hasattr(self.op, 'disks'):
5797 if not hasattr(self.op, 'beparams'):
5798 self.op.beparams = {}
5799 if not hasattr(self.op, 'hvparams'):
5800 self.op.hvparams = {}
5801 self.op.force = getattr(self.op, "force", False)
5802 if not (self.op.nics or self.op.disks or
5803 self.op.hvparams or self.op.beparams):
5804 raise errors.OpPrereqError("No changes submitted")
5808 for disk_op, disk_dict in self.op.disks:
5809 if disk_op == constants.DDM_REMOVE:
5812 elif disk_op == constants.DDM_ADD:
5815 if not isinstance(disk_op, int):
5816 raise errors.OpPrereqError("Invalid disk index")
5817 if disk_op == constants.DDM_ADD:
5818 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5819 if mode not in constants.DISK_ACCESS_SET:
5820 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5821 size = disk_dict.get('size', None)
5823 raise errors.OpPrereqError("Required disk parameter size missing")
5826 except ValueError, err:
5827 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5829 disk_dict['size'] = size
5831 # modification of disk
5832 if 'size' in disk_dict:
5833 raise errors.OpPrereqError("Disk size change not possible, use"
5836 if disk_addremove > 1:
5837 raise errors.OpPrereqError("Only one disk add or remove operation"
5838 " supported at a time")
5842 for nic_op, nic_dict in self.op.nics:
5843 if nic_op == constants.DDM_REMOVE:
5846 elif nic_op == constants.DDM_ADD:
5849 if not isinstance(nic_op, int):
5850 raise errors.OpPrereqError("Invalid nic index")
5852 # nic_dict should be a dict
5853 nic_ip = nic_dict.get('ip', None)
5854 if nic_ip is not None:
5855 if nic_ip.lower() == constants.VALUE_NONE:
5856 nic_dict['ip'] = None
5858 if not utils.IsValidIP(nic_ip):
5859 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5861 if nic_op == constants.DDM_ADD:
5862 nic_bridge = nic_dict.get('bridge', None)
5863 if nic_bridge is None:
5864 nic_dict['bridge'] = self.cfg.GetDefBridge()
5865 nic_mac = nic_dict.get('mac', None)
5867 nic_dict['mac'] = constants.VALUE_AUTO
5869 if 'mac' in nic_dict:
5870 nic_mac = nic_dict['mac']
5871 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5872 if not utils.IsValidMac(nic_mac):
5873 raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5874 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5875 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5876 " modifying an existing nic")
5878 if nic_addremove > 1:
5879 raise errors.OpPrereqError("Only one NIC add or remove operation"
5880 " supported at a time")
5882 def ExpandNames(self):
5883 self._ExpandAndLockInstance()
5884 self.needed_locks[locking.LEVEL_NODE] = []
5885 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5887 def DeclareLocks(self, level):
5888 if level == locking.LEVEL_NODE:
5889 self._LockInstancesNodes()
5891 def BuildHooksEnv(self):
5894 This runs on the master, primary and secondaries.
5898 if constants.BE_MEMORY in self.be_new:
5899 args['memory'] = self.be_new[constants.BE_MEMORY]
5900 if constants.BE_VCPUS in self.be_new:
5901 args['vcpus'] = self.be_new[constants.BE_VCPUS]
5902 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5903 # information at all.
5906 nic_override = dict(self.op.nics)
5907 for idx, nic in enumerate(self.instance.nics):
5908 if idx in nic_override:
5909 this_nic_override = nic_override[idx]
5911 this_nic_override = {}
5912 if 'ip' in this_nic_override:
5913 ip = this_nic_override['ip']
5916 if 'bridge' in this_nic_override:
5917 bridge = this_nic_override['bridge']
5920 if 'mac' in this_nic_override:
5921 mac = this_nic_override['mac']
5924 args['nics'].append((ip, bridge, mac))
5925 if constants.DDM_ADD in nic_override:
5926 ip = nic_override[constants.DDM_ADD].get('ip', None)
5927 bridge = nic_override[constants.DDM_ADD]['bridge']
5928 mac = nic_override[constants.DDM_ADD]['mac']
5929 args['nics'].append((ip, bridge, mac))
5930 elif constants.DDM_REMOVE in nic_override:
5931 del args['nics'][-1]
5933 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5934 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5937 def CheckPrereq(self):
5938 """Check prerequisites.
5940 This only checks the instance list against the existing names.
5943 force = self.force = self.op.force
5945 # checking the new params on the primary/secondary nodes
5947 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5948 assert self.instance is not None, \
5949 "Cannot retrieve locked instance %s" % self.op.instance_name
5950 pnode = instance.primary_node
5951 nodelist = list(instance.all_nodes)
5953 # hvparams processing
5954 if self.op.hvparams:
5955 i_hvdict = copy.deepcopy(instance.hvparams)
5956 for key, val in self.op.hvparams.iteritems():
5957 if val == constants.VALUE_DEFAULT:
5964 cluster = self.cfg.GetClusterInfo()
5965 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5966 hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5969 hypervisor.GetHypervisor(
5970 instance.hypervisor).CheckParameterSyntax(hv_new)
5971 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5972 self.hv_new = hv_new # the new actual values
5973 self.hv_inst = i_hvdict # the new dict (without defaults)
5975 self.hv_new = self.hv_inst = {}
5977 # beparams processing
5978 if self.op.beparams:
5979 i_bedict = copy.deepcopy(instance.beparams)
5980 for key, val in self.op.beparams.iteritems():
5981 if val == constants.VALUE_DEFAULT:
5988 cluster = self.cfg.GetClusterInfo()
5989 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5990 be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5992 self.be_new = be_new # the new actual values
5993 self.be_inst = i_bedict # the new dict (without defaults)
5995 self.be_new = self.be_inst = {}
5999 if constants.BE_MEMORY in self.op.beparams and not self.force:
6000 mem_check_list = [pnode]
6001 if be_new[constants.BE_AUTO_BALANCE]:
6002 # either we changed auto_balance to yes or it was from before
6003 mem_check_list.extend(instance.secondary_nodes)
6004 instance_info = self.rpc.call_instance_info(pnode, instance.name,
6005 instance.hypervisor)
6006 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6007 instance.hypervisor)
6008 if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
6009 # Assume the primary node is unreachable and go ahead
6010 self.warn.append("Can't get info from primary node %s" % pnode)
6012 if not instance_info.failed and instance_info.data:
6013 current_mem = int(instance_info.data['memory'])
6015 # Assume instance not running
6016 # (there is a slight race condition here, but it's not very probable,
6017 # and we have no other way to check)
6019 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6020 nodeinfo[pnode].data['memory_free'])
6022 raise errors.OpPrereqError("This change will prevent the instance"
6023 " from starting, due to %d MB of memory"
6024 " missing on its primary node" % miss_mem)
6026 if be_new[constants.BE_AUTO_BALANCE]:
6027 for node, nres in nodeinfo.iteritems():
6028 if node not in instance.secondary_nodes:
6030 if nres.failed or not isinstance(nres.data, dict):
6031 self.warn.append("Can't get info from secondary node %s" % node)
6032 elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
6033 self.warn.append("Not enough memory to failover instance to"
6034 " secondary node %s" % node)
6037 for nic_op, nic_dict in self.op.nics:
6038 if nic_op == constants.DDM_REMOVE:
6039 if not instance.nics:
6040 raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6042 if nic_op != constants.DDM_ADD:
6044 if nic_op < 0 or nic_op >= len(instance.nics):
6045 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6047 (nic_op, len(instance.nics)))
6048 if 'bridge' in nic_dict:
6049 nic_bridge = nic_dict['bridge']
6050 if nic_bridge is None:
6051 raise errors.OpPrereqError('Cannot set the nic bridge to None')
6052 if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
6053 msg = ("Bridge '%s' doesn't exist on one of"
6054 " the instance nodes" % nic_bridge)
6056 self.warn.append(msg)
6058 raise errors.OpPrereqError(msg)
6059 if 'mac' in nic_dict:
6060 nic_mac = nic_dict['mac']
6062 raise errors.OpPrereqError('Cannot set the nic mac to None')
6063 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6064 # otherwise generate the mac
6065 nic_dict['mac'] = self.cfg.GenerateMAC()
6067 # or validate/reserve the current one
6068 if self.cfg.IsMacInUse(nic_mac):
6069 raise errors.OpPrereqError("MAC address %s already in use"
6070 " in cluster" % nic_mac)
6073 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6074 raise errors.OpPrereqError("Disk operations not supported for"
6075 " diskless instances")
6076 for disk_op, disk_dict in self.op.disks:
6077 if disk_op == constants.DDM_REMOVE:
6078 if len(instance.disks) == 1:
6079 raise errors.OpPrereqError("Cannot remove the last disk of"
6081 ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6082 ins_l = ins_l[pnode]
6083 if ins_l.failed or not isinstance(ins_l.data, list):
6084 raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6085 if instance.name in ins_l.data:
6086 raise errors.OpPrereqError("Instance is running, can't remove"
6089 if (disk_op == constants.DDM_ADD and
6090 len(instance.nics) >= constants.MAX_DISKS):
6091 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6092 " add more" % constants.MAX_DISKS)
6093 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6095 if disk_op < 0 or disk_op >= len(instance.disks):
6096 raise errors.OpPrereqError("Invalid disk index %s, valid values"
6098 (disk_op, len(instance.disks)))
6102 def Exec(self, feedback_fn):
6103 """Modifies an instance.
6105 All parameters take effect only at the next restart of the instance.
6108 # Process here the warnings from CheckPrereq, as we don't have a
6109 # feedback_fn there.
6110 for warn in self.warn:
6111 feedback_fn("WARNING: %s" % warn)
6114 instance = self.instance
6116 for disk_op, disk_dict in self.op.disks:
6117 if disk_op == constants.DDM_REMOVE:
6118 # remove the last disk
6119 device = instance.disks.pop()
6120 device_idx = len(instance.disks)
6121 for node, disk in device.ComputeNodeTree(instance.primary_node):
6122 self.cfg.SetDiskID(disk, node)
6123 msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6125 self.LogWarning("Could not remove disk/%d on node %s: %s,"
6126 " continuing anyway", device_idx, node, msg)
6127 result.append(("disk/%d" % device_idx, "remove"))
6128 elif disk_op == constants.DDM_ADD:
6130 if instance.disk_template == constants.DT_FILE:
6131 file_driver, file_path = instance.disks[0].logical_id
6132 file_path = os.path.dirname(file_path)
6134 file_driver = file_path = None
6135 disk_idx_base = len(instance.disks)
6136 new_disk = _GenerateDiskTemplate(self,
6137 instance.disk_template,
6138 instance.name, instance.primary_node,
6139 instance.secondary_nodes,
6144 instance.disks.append(new_disk)
6145 info = _GetInstanceInfoText(instance)
6147 logging.info("Creating volume %s for instance %s",
6148 new_disk.iv_name, instance.name)
6149 # Note: this needs to be kept in sync with _CreateDisks
6151 for node in instance.all_nodes:
6152 f_create = node == instance.primary_node
6154 _CreateBlockDev(self, node, instance, new_disk,
6155 f_create, info, f_create)
6156 except errors.OpExecError, err:
6157 self.LogWarning("Failed to create volume %s (%s) on"
6159 new_disk.iv_name, new_disk, node, err)
6160 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6161 (new_disk.size, new_disk.mode)))
6163 # change a given disk
6164 instance.disks[disk_op].mode = disk_dict['mode']
6165 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6167 for nic_op, nic_dict in self.op.nics:
6168 if nic_op == constants.DDM_REMOVE:
6169 # remove the last nic
6170 del instance.nics[-1]
6171 result.append(("nic.%d" % len(instance.nics), "remove"))
6172 elif nic_op == constants.DDM_ADD:
6173 # mac and bridge should be set, by now
6174 mac = nic_dict['mac']
6175 bridge = nic_dict['bridge']
6176 new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6178 instance.nics.append(new_nic)
6179 result.append(("nic.%d" % (len(instance.nics) - 1),
6180 "add:mac=%s,ip=%s,bridge=%s" %
6181 (new_nic.mac, new_nic.ip, new_nic.bridge)))
6183 # change a given nic
6184 for key in 'mac', 'ip', 'bridge':
6186 setattr(instance.nics[nic_op], key, nic_dict[key])
6187 result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6190 if self.op.hvparams:
6191 instance.hvparams = self.hv_inst
6192 for key, val in self.op.hvparams.iteritems():
6193 result.append(("hv/%s" % key, val))
6196 if self.op.beparams:
6197 instance.beparams = self.be_inst
6198 for key, val in self.op.beparams.iteritems():
6199 result.append(("be/%s" % key, val))
6201 self.cfg.Update(instance)
6206 class LUQueryExports(NoHooksLU):
6207 """Query the exports list
6210 _OP_REQP = ['nodes']
6213 def ExpandNames(self):
6214 self.needed_locks = {}
6215 self.share_locks[locking.LEVEL_NODE] = 1
6216 if not self.op.nodes:
6217 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6219 self.needed_locks[locking.LEVEL_NODE] = \
6220 _GetWantedNodes(self, self.op.nodes)
6222 def CheckPrereq(self):
6223 """Check prerequisites.
6226 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6228 def Exec(self, feedback_fn):
6229 """Compute the list of all the exported system images.
6232 @return: a dictionary with the structure node->(export-list)
6233 where export-list is a list of the instances exported on
6237 rpcresult = self.rpc.call_export_list(self.nodes)
6239 for node in rpcresult:
6240 if rpcresult[node].failed:
6241 result[node] = False
6243 result[node] = rpcresult[node].data
6248 class LUExportInstance(LogicalUnit):
6249 """Export an instance to an image in the cluster.
6252 HPATH = "instance-export"
6253 HTYPE = constants.HTYPE_INSTANCE
6254 _OP_REQP = ["instance_name", "target_node", "shutdown"]
6257 def ExpandNames(self):
6258 self._ExpandAndLockInstance()
6259 # FIXME: lock only instance primary and destination node
6261 # Sad but true, for now we have do lock all nodes, as we don't know where
6262 # the previous export might be, and and in this LU we search for it and
6263 # remove it from its current node. In the future we could fix this by:
6264 # - making a tasklet to search (share-lock all), then create the new one,
6265 # then one to remove, after
6266 # - removing the removal operation altoghether
6267 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6269 def DeclareLocks(self, level):
6270 """Last minute lock declaration."""
6271 # All nodes are locked anyway, so nothing to do here.
6273 def BuildHooksEnv(self):
6276 This will run on the master, primary node and target node.
6280 "EXPORT_NODE": self.op.target_node,
6281 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6283 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6284 nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6285 self.op.target_node]
6288 def CheckPrereq(self):
6289 """Check prerequisites.
6291 This checks that the instance and node names are valid.
6294 instance_name = self.op.instance_name
6295 self.instance = self.cfg.GetInstanceInfo(instance_name)
6296 assert self.instance is not None, \
6297 "Cannot retrieve locked instance %s" % self.op.instance_name
6298 _CheckNodeOnline(self, self.instance.primary_node)
6300 self.dst_node = self.cfg.GetNodeInfo(
6301 self.cfg.ExpandNodeName(self.op.target_node))
6303 if self.dst_node is None:
6304 # This is wrong node name, not a non-locked node
6305 raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6306 _CheckNodeOnline(self, self.dst_node.name)
6307 _CheckNodeNotDrained(self, self.dst_node.name)
6309 # instance disk type verification
6310 for disk in self.instance.disks:
6311 if disk.dev_type == constants.LD_FILE:
6312 raise errors.OpPrereqError("Export not supported for instances with"
6313 " file-based disks")
6315 def Exec(self, feedback_fn):
6316 """Export an instance to an image in the cluster.
6319 instance = self.instance
6320 dst_node = self.dst_node
6321 src_node = instance.primary_node
6322 if self.op.shutdown:
6323 # shutdown the instance, but not the disks
6324 result = self.rpc.call_instance_shutdown(src_node, instance)
6325 msg = result.RemoteFailMsg()
6327 raise errors.OpExecError("Could not shutdown instance %s on"
6329 (instance.name, src_node, msg))
6331 vgname = self.cfg.GetVGName()
6335 # set the disks ID correctly since call_instance_start needs the
6336 # correct drbd minor to create the symlinks
6337 for disk in instance.disks:
6338 self.cfg.SetDiskID(disk, src_node)
6341 for disk in instance.disks:
6342 # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6343 new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6344 if new_dev_name.failed or not new_dev_name.data:
6345 self.LogWarning("Could not snapshot block device %s on node %s",
6346 disk.logical_id[1], src_node)
6347 snap_disks.append(False)
6349 new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6350 logical_id=(vgname, new_dev_name.data),
6351 physical_id=(vgname, new_dev_name.data),
6352 iv_name=disk.iv_name)
6353 snap_disks.append(new_dev)
6356 if self.op.shutdown and instance.admin_up:
6357 result = self.rpc.call_instance_start(src_node, instance, None, None)
6358 msg = result.RemoteFailMsg()
6360 _ShutdownInstanceDisks(self, instance)
6361 raise errors.OpExecError("Could not start instance: %s" % msg)
6363 # TODO: check for size
6365 cluster_name = self.cfg.GetClusterName()
6366 for idx, dev in enumerate(snap_disks):
6368 result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6369 instance, cluster_name, idx)
6370 if result.failed or not result.data:
6371 self.LogWarning("Could not export block device %s from node %s to"
6372 " node %s", dev.logical_id[1], src_node,
6374 msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6376 self.LogWarning("Could not remove snapshot block device %s from node"
6377 " %s: %s", dev.logical_id[1], src_node, msg)
6379 result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6380 if result.failed or not result.data:
6381 self.LogWarning("Could not finalize export for instance %s on node %s",
6382 instance.name, dst_node.name)
6384 nodelist = self.cfg.GetNodeList()
6385 nodelist.remove(dst_node.name)
6387 # on one-node clusters nodelist will be empty after the removal
6388 # if we proceed the backup would be removed because OpQueryExports
6389 # substitutes an empty list with the full cluster node list.
6391 exportlist = self.rpc.call_export_list(nodelist)
6392 for node in exportlist:
6393 if exportlist[node].failed:
6395 if instance.name in exportlist[node].data:
6396 if not self.rpc.call_export_remove(node, instance.name):
6397 self.LogWarning("Could not remove older export for instance %s"
6398 " on node %s", instance.name, node)
6401 class LURemoveExport(NoHooksLU):
6402 """Remove exports related to the named instance.
6405 _OP_REQP = ["instance_name"]
6408 def ExpandNames(self):
6409 self.needed_locks = {}
6410 # We need all nodes to be locked in order for RemoveExport to work, but we
6411 # don't need to lock the instance itself, as nothing will happen to it (and
6412 # we can remove exports also for a removed instance)
6413 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6415 def CheckPrereq(self):
6416 """Check prerequisites.
6420 def Exec(self, feedback_fn):
6421 """Remove any export.
6424 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6425 # If the instance was not found we'll try with the name that was passed in.
6426 # This will only work if it was an FQDN, though.
6428 if not instance_name:
6430 instance_name = self.op.instance_name
6432 exportlist = self.rpc.call_export_list(self.acquired_locks[
6433 locking.LEVEL_NODE])
6435 for node in exportlist:
6436 if exportlist[node].failed:
6437 self.LogWarning("Failed to query node %s, continuing" % node)
6439 if instance_name in exportlist[node].data:
6441 result = self.rpc.call_export_remove(node, instance_name)
6442 if result.failed or not result.data:
6443 logging.error("Could not remove export for instance %s"
6444 " on node %s", instance_name, node)
6446 if fqdn_warn and not found:
6447 feedback_fn("Export not found. If trying to remove an export belonging"
6448 " to a deleted instance please use its Fully Qualified"
6452 class TagsLU(NoHooksLU):
6455 This is an abstract class which is the parent of all the other tags LUs.
6459 def ExpandNames(self):
6460 self.needed_locks = {}
6461 if self.op.kind == constants.TAG_NODE:
6462 name = self.cfg.ExpandNodeName(self.op.name)
6464 raise errors.OpPrereqError("Invalid node name (%s)" %
6467 self.needed_locks[locking.LEVEL_NODE] = name
6468 elif self.op.kind == constants.TAG_INSTANCE:
6469 name = self.cfg.ExpandInstanceName(self.op.name)
6471 raise errors.OpPrereqError("Invalid instance name (%s)" %
6474 self.needed_locks[locking.LEVEL_INSTANCE] = name
6476 def CheckPrereq(self):
6477 """Check prerequisites.
6480 if self.op.kind == constants.TAG_CLUSTER:
6481 self.target = self.cfg.GetClusterInfo()
6482 elif self.op.kind == constants.TAG_NODE:
6483 self.target = self.cfg.GetNodeInfo(self.op.name)
6484 elif self.op.kind == constants.TAG_INSTANCE:
6485 self.target = self.cfg.GetInstanceInfo(self.op.name)
6487 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6491 class LUGetTags(TagsLU):
6492 """Returns the tags of a given object.
6495 _OP_REQP = ["kind", "name"]
6498 def Exec(self, feedback_fn):
6499 """Returns the tag list.
6502 return list(self.target.GetTags())
6505 class LUSearchTags(NoHooksLU):
6506 """Searches the tags for a given pattern.
6509 _OP_REQP = ["pattern"]
6512 def ExpandNames(self):
6513 self.needed_locks = {}
6515 def CheckPrereq(self):
6516 """Check prerequisites.
6518 This checks the pattern passed for validity by compiling it.
6522 self.re = re.compile(self.op.pattern)
6523 except re.error, err:
6524 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6525 (self.op.pattern, err))
6527 def Exec(self, feedback_fn):
6528 """Returns the tag list.
6532 tgts = [("/cluster", cfg.GetClusterInfo())]
6533 ilist = cfg.GetAllInstancesInfo().values()
6534 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6535 nlist = cfg.GetAllNodesInfo().values()
6536 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6538 for path, target in tgts:
6539 for tag in target.GetTags():
6540 if self.re.search(tag):
6541 results.append((path, tag))
6545 class LUAddTags(TagsLU):
6546 """Sets a tag on a given object.
6549 _OP_REQP = ["kind", "name", "tags"]
6552 def CheckPrereq(self):
6553 """Check prerequisites.
6555 This checks the type and length of the tag name and value.
6558 TagsLU.CheckPrereq(self)
6559 for tag in self.op.tags:
6560 objects.TaggableObject.ValidateTag(tag)
6562 def Exec(self, feedback_fn):
6567 for tag in self.op.tags:
6568 self.target.AddTag(tag)
6569 except errors.TagError, err:
6570 raise errors.OpExecError("Error while setting tag: %s" % str(err))
6572 self.cfg.Update(self.target)
6573 except errors.ConfigurationError:
6574 raise errors.OpRetryError("There has been a modification to the"
6575 " config file and the operation has been"
6576 " aborted. Please retry.")
6579 class LUDelTags(TagsLU):
6580 """Delete a list of tags from a given object.
6583 _OP_REQP = ["kind", "name", "tags"]
6586 def CheckPrereq(self):
6587 """Check prerequisites.
6589 This checks that we have the given tag.
6592 TagsLU.CheckPrereq(self)
6593 for tag in self.op.tags:
6594 objects.TaggableObject.ValidateTag(tag)
6595 del_tags = frozenset(self.op.tags)
6596 cur_tags = self.target.GetTags()
6597 if not del_tags <= cur_tags:
6598 diff_tags = del_tags - cur_tags
6599 diff_names = ["'%s'" % tag for tag in diff_tags]
6601 raise errors.OpPrereqError("Tag(s) %s not found" %
6602 (",".join(diff_names)))
6604 def Exec(self, feedback_fn):
6605 """Remove the tag from the object.
6608 for tag in self.op.tags:
6609 self.target.RemoveTag(tag)
6611 self.cfg.Update(self.target)
6612 except errors.ConfigurationError:
6613 raise errors.OpRetryError("There has been a modification to the"
6614 " config file and the operation has been"
6615 " aborted. Please retry.")
6618 class LUTestDelay(NoHooksLU):
6619 """Sleep for a specified amount of time.
6621 This LU sleeps on the master and/or nodes for a specified amount of
6625 _OP_REQP = ["duration", "on_master", "on_nodes"]
6628 def ExpandNames(self):
6629 """Expand names and set required locks.
6631 This expands the node list, if any.
6634 self.needed_locks = {}
6635 if self.op.on_nodes:
6636 # _GetWantedNodes can be used here, but is not always appropriate to use
6637 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6639 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6640 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6642 def CheckPrereq(self):
6643 """Check prerequisites.
6647 def Exec(self, feedback_fn):
6648 """Do the actual sleep.
6651 if self.op.on_master:
6652 if not utils.TestDelay(self.op.duration):
6653 raise errors.OpExecError("Error during master delay test")
6654 if self.op.on_nodes:
6655 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6657 raise errors.OpExecError("Complete failure from rpc call")
6658 for node, node_result in result.items():
6660 if not node_result.data:
6661 raise errors.OpExecError("Failure during rpc call to node %s,"
6662 " result: %s" % (node, node_result.data))
6665 class IAllocator(object):
6666 """IAllocator framework.
6668 An IAllocator instance has three sets of attributes:
6669 - cfg that is needed to query the cluster
6670 - input data (all members of the _KEYS class attribute are required)
6671 - four buffer attributes (in|out_data|text), that represent the
6672 input (to the external script) in text and data structure format,
6673 and the output from it, again in two formats
6674 - the result variables from the script (success, info, nodes) for
6679 "mem_size", "disks", "disk_template",
6680 "os", "tags", "nics", "vcpus", "hypervisor",
6686 def __init__(self, lu, mode, name, **kwargs):
6688 # init buffer variables
6689 self.in_text = self.out_text = self.in_data = self.out_data = None
6690 # init all input fields so that pylint is happy
6693 self.mem_size = self.disks = self.disk_template = None
6694 self.os = self.tags = self.nics = self.vcpus = None
6695 self.hypervisor = None
6696 self.relocate_from = None
6698 self.required_nodes = None
6699 # init result fields
6700 self.success = self.info = self.nodes = None
6701 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6702 keyset = self._ALLO_KEYS
6703 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6704 keyset = self._RELO_KEYS
6706 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6707 " IAllocator" % self.mode)
6709 if key not in keyset:
6710 raise errors.ProgrammerError("Invalid input parameter '%s' to"
6711 " IAllocator" % key)
6712 setattr(self, key, kwargs[key])
6714 if key not in kwargs:
6715 raise errors.ProgrammerError("Missing input parameter '%s' to"
6716 " IAllocator" % key)
6717 self._BuildInputData()
6719 def _ComputeClusterData(self):
6720 """Compute the generic allocator input data.
6722 This is the data that is independent of the actual operation.
6726 cluster_info = cfg.GetClusterInfo()
6729 "version": constants.IALLOCATOR_VERSION,
6730 "cluster_name": cfg.GetClusterName(),
6731 "cluster_tags": list(cluster_info.GetTags()),
6732 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6733 # we don't have job IDs
6735 iinfo = cfg.GetAllInstancesInfo().values()
6736 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6740 node_list = cfg.GetNodeList()
6742 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6743 hypervisor_name = self.hypervisor
6744 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6745 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6747 node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6749 node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6750 cluster_info.enabled_hypervisors)
6751 for nname, nresult in node_data.items():
6752 # first fill in static (config-based) values
6753 ninfo = cfg.GetNodeInfo(nname)
6755 "tags": list(ninfo.GetTags()),
6756 "primary_ip": ninfo.primary_ip,
6757 "secondary_ip": ninfo.secondary_ip,
6758 "offline": ninfo.offline,
6759 "drained": ninfo.drained,
6760 "master_candidate": ninfo.master_candidate,
6763 if not ninfo.offline:
6765 if not isinstance(nresult.data, dict):
6766 raise errors.OpExecError("Can't get data for node %s" % nname)
6767 remote_info = nresult.data
6768 for attr in ['memory_total', 'memory_free', 'memory_dom0',
6769 'vg_size', 'vg_free', 'cpu_total']:
6770 if attr not in remote_info:
6771 raise errors.OpExecError("Node '%s' didn't return attribute"
6772 " '%s'" % (nname, attr))
6774 remote_info[attr] = int(remote_info[attr])
6775 except ValueError, err:
6776 raise errors.OpExecError("Node '%s' returned invalid value"
6777 " for '%s': %s" % (nname, attr, err))
6778 # compute memory used by primary instances
6779 i_p_mem = i_p_up_mem = 0
6780 for iinfo, beinfo in i_list:
6781 if iinfo.primary_node == nname:
6782 i_p_mem += beinfo[constants.BE_MEMORY]
6783 if iinfo.name not in node_iinfo[nname].data:
6786 i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6787 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6788 remote_info['memory_free'] -= max(0, i_mem_diff)
6791 i_p_up_mem += beinfo[constants.BE_MEMORY]
6793 # compute memory used by instances
6795 "total_memory": remote_info['memory_total'],
6796 "reserved_memory": remote_info['memory_dom0'],
6797 "free_memory": remote_info['memory_free'],
6798 "total_disk": remote_info['vg_size'],
6799 "free_disk": remote_info['vg_free'],
6800 "total_cpus": remote_info['cpu_total'],
6801 "i_pri_memory": i_p_mem,
6802 "i_pri_up_memory": i_p_up_mem,
6806 node_results[nname] = pnr
6807 data["nodes"] = node_results
6811 for iinfo, beinfo in i_list:
6812 nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6813 for n in iinfo.nics]
6815 "tags": list(iinfo.GetTags()),
6816 "admin_up": iinfo.admin_up,
6817 "vcpus": beinfo[constants.BE_VCPUS],
6818 "memory": beinfo[constants.BE_MEMORY],
6820 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6822 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6823 "disk_template": iinfo.disk_template,
6824 "hypervisor": iinfo.hypervisor,
6826 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6828 instance_data[iinfo.name] = pir
6830 data["instances"] = instance_data
6834 def _AddNewInstance(self):
6835 """Add new instance data to allocator structure.
6837 This in combination with _AllocatorGetClusterData will create the
6838 correct structure needed as input for the allocator.
6840 The checks for the completeness of the opcode must have already been
6846 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6848 if self.disk_template in constants.DTS_NET_MIRROR:
6849 self.required_nodes = 2
6851 self.required_nodes = 1
6855 "disk_template": self.disk_template,
6858 "vcpus": self.vcpus,
6859 "memory": self.mem_size,
6860 "disks": self.disks,
6861 "disk_space_total": disk_space,
6863 "required_nodes": self.required_nodes,
6865 data["request"] = request
6867 def _AddRelocateInstance(self):
6868 """Add relocate instance data to allocator structure.
6870 This in combination with _IAllocatorGetClusterData will create the
6871 correct structure needed as input for the allocator.
6873 The checks for the completeness of the opcode must have already been
6877 instance = self.lu.cfg.GetInstanceInfo(self.name)
6878 if instance is None:
6879 raise errors.ProgrammerError("Unknown instance '%s' passed to"
6880 " IAllocator" % self.name)
6882 if instance.disk_template not in constants.DTS_NET_MIRROR:
6883 raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6885 if len(instance.secondary_nodes) != 1:
6886 raise errors.OpPrereqError("Instance has not exactly one secondary node")
6888 self.required_nodes = 1
6889 disk_sizes = [{'size': disk.size} for disk in instance.disks]
6890 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6895 "disk_space_total": disk_space,
6896 "required_nodes": self.required_nodes,
6897 "relocate_from": self.relocate_from,
6899 self.in_data["request"] = request
6901 def _BuildInputData(self):
6902 """Build input data structures.
6905 self._ComputeClusterData()
6907 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6908 self._AddNewInstance()
6910 self._AddRelocateInstance()
6912 self.in_text = serializer.Dump(self.in_data)
6914 def Run(self, name, validate=True, call_fn=None):
6915 """Run an instance allocator and return the results.
6919 call_fn = self.lu.rpc.call_iallocator_runner
6922 result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6925 if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6926 raise errors.OpExecError("Invalid result from master iallocator runner")
6928 rcode, stdout, stderr, fail = result.data
6930 if rcode == constants.IARUN_NOTFOUND:
6931 raise errors.OpExecError("Can't find allocator '%s'" % name)
6932 elif rcode == constants.IARUN_FAILURE:
6933 raise errors.OpExecError("Instance allocator call failed: %s,"
6934 " output: %s" % (fail, stdout+stderr))
6935 self.out_text = stdout
6937 self._ValidateResult()
6939 def _ValidateResult(self):
6940 """Process the allocator results.
6942 This will process and if successful save the result in
6943 self.out_data and the other parameters.
6947 rdict = serializer.Load(self.out_text)
6948 except Exception, err:
6949 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6951 if not isinstance(rdict, dict):
6952 raise errors.OpExecError("Can't parse iallocator results: not a dict")
6954 for key in "success", "info", "nodes":
6955 if key not in rdict:
6956 raise errors.OpExecError("Can't parse iallocator results:"
6957 " missing key '%s'" % key)
6958 setattr(self, key, rdict[key])
6960 if not isinstance(rdict["nodes"], list):
6961 raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6963 self.out_data = rdict
6966 class LUTestAllocator(NoHooksLU):
6967 """Run allocator tests.
6969 This LU runs the allocator tests
6972 _OP_REQP = ["direction", "mode", "name"]
6974 def CheckPrereq(self):
6975 """Check prerequisites.
6977 This checks the opcode parameters depending on the director and mode test.
6980 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6981 for attr in ["name", "mem_size", "disks", "disk_template",
6982 "os", "tags", "nics", "vcpus"]:
6983 if not hasattr(self.op, attr):
6984 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6986 iname = self.cfg.ExpandInstanceName(self.op.name)
6987 if iname is not None:
6988 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6990 if not isinstance(self.op.nics, list):
6991 raise errors.OpPrereqError("Invalid parameter 'nics'")
6992 for row in self.op.nics:
6993 if (not isinstance(row, dict) or
6996 "bridge" not in row):
6997 raise errors.OpPrereqError("Invalid contents of the"
6998 " 'nics' parameter")
6999 if not isinstance(self.op.disks, list):
7000 raise errors.OpPrereqError("Invalid parameter 'disks'")
7001 for row in self.op.disks:
7002 if (not isinstance(row, dict) or
7003 "size" not in row or
7004 not isinstance(row["size"], int) or
7005 "mode" not in row or
7006 row["mode"] not in ['r', 'w']):
7007 raise errors.OpPrereqError("Invalid contents of the"
7008 " 'disks' parameter")
7009 if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7010 self.op.hypervisor = self.cfg.GetHypervisorType()
7011 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7012 if not hasattr(self.op, "name"):
7013 raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7014 fname = self.cfg.ExpandInstanceName(self.op.name)
7016 raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7018 self.op.name = fname
7019 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7021 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7024 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7025 if not hasattr(self.op, "allocator") or self.op.allocator is None:
7026 raise errors.OpPrereqError("Missing allocator name")
7027 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7028 raise errors.OpPrereqError("Wrong allocator test '%s'" %
7031 def Exec(self, feedback_fn):
7032 """Run the allocator test.
7035 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7036 ial = IAllocator(self,
7039 mem_size=self.op.mem_size,
7040 disks=self.op.disks,
7041 disk_template=self.op.disk_template,
7045 vcpus=self.op.vcpus,
7046 hypervisor=self.op.hypervisor,
7049 ial = IAllocator(self,
7052 relocate_from=list(self.relocate_from),
7055 if self.op.direction == constants.IALLOCATOR_DIR_IN:
7056 result = ial.in_text
7058 ial.Run(self.op.allocator, validate=False)
7059 result = ial.out_text