4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0613,W0201
36 from ganeti import ssh
37 from ganeti import utils
38 from ganeti import errors
39 from ganeti import hypervisor
40 from ganeti import locking
41 from ganeti import constants
42 from ganeti import objects
43 from ganeti import opcodes
44 from ganeti import serializer
45 from ganeti import ssconf
48 class LogicalUnit(object):
49 """Logical Unit base class.
51 Subclasses must follow these rules:
52 - implement ExpandNames
53 - implement CheckPrereq
55 - implement BuildHooksEnv
56 - redefine HPATH and HTYPE
57 - optionally redefine their run requirements:
58 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 Note that all commands require root permissions.
68 def __init__(self, processor, op, context, rpc):
69 """Constructor for LogicalUnit.
71 This needs to be overriden in derived classes in order to check op
77 self.cfg = context.cfg
78 self.context = context
80 # Dicts used to declare locking needs to mcpu
81 self.needed_locks = None
82 self.acquired_locks = {}
83 self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 self.remove_locks = {}
86 # Used to force good behavior when calling helper functions
87 self.recalculate_locks = {}
90 self.LogWarning = processor.LogWarning
91 self.LogInfo = processor.LogInfo
93 for attr_name in self._OP_REQP:
94 attr_val = getattr(op, attr_name, None)
96 raise errors.OpPrereqError("Required parameter '%s' missing" %
101 """Returns the SshRunner object
105 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
108 ssh = property(fget=__GetSSH)
110 def CheckArguments(self):
111 """Check syntactic validity for the opcode arguments.
113 This method is for doing a simple syntactic check and ensure
114 validity of opcode parameters, without any cluster-related
115 checks. While the same can be accomplished in ExpandNames and/or
116 CheckPrereq, doing these separate is better because:
118 - ExpandNames is left as as purely a lock-related function
119 - CheckPrereq is run after we have aquired locks (and possible
122 The function is allowed to change the self.op attribute so that
123 later methods can no longer worry about missing parameters.
128 def ExpandNames(self):
129 """Expand names for this LU.
131 This method is called before starting to execute the opcode, and it should
132 update all the parameters of the opcode to their canonical form (e.g. a
133 short node name must be fully expanded after this method has successfully
134 completed). This way locking, hooks, logging, ecc. can work correctly.
136 LUs which implement this method must also populate the self.needed_locks
137 member, as a dict with lock levels as keys, and a list of needed lock names
140 - use an empty dict if you don't need any lock
141 - if you don't need any lock at a particular level omit that level
142 - don't put anything for the BGL level
143 - if you want all locks at a level use locking.ALL_SET as a value
145 If you need to share locks (rather than acquire them exclusively) at one
146 level you can modify self.share_locks, setting a true value (usually 1) for
147 that level. By default locks are not shared.
151 # Acquire all nodes and one instance
152 self.needed_locks = {
153 locking.LEVEL_NODE: locking.ALL_SET,
154 locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156 # Acquire just two nodes
157 self.needed_locks = {
158 locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
161 self.needed_locks = {} # No, you can't leave it to the default value None
164 # The implementation of this method is mandatory only if the new LU is
165 # concurrent, so that old LUs don't need to be changed all at the same
168 self.needed_locks = {} # Exclusive LUs don't need locks.
170 raise NotImplementedError
172 def DeclareLocks(self, level):
173 """Declare LU locking needs for a level
175 While most LUs can just declare their locking needs at ExpandNames time,
176 sometimes there's the need to calculate some locks after having acquired
177 the ones before. This function is called just before acquiring locks at a
178 particular level, but after acquiring the ones at lower levels, and permits
179 such calculations. It can be used to modify self.needed_locks, and by
180 default it does nothing.
182 This function is only called if you have something already set in
183 self.needed_locks for the level.
185 @param level: Locking level which is going to be locked
186 @type level: member of ganeti.locking.LEVELS
190 def CheckPrereq(self):
191 """Check prerequisites for this LU.
193 This method should check that the prerequisites for the execution
194 of this LU are fulfilled. It can do internode communication, but
195 it should be idempotent - no cluster or system changes are
198 The method should raise errors.OpPrereqError in case something is
199 not fulfilled. Its return value is ignored.
201 This method should also update all the parameters of the opcode to
202 their canonical form if it hasn't been done by ExpandNames before.
205 raise NotImplementedError
207 def Exec(self, feedback_fn):
210 This method should implement the actual work. It should raise
211 errors.OpExecError for failures that are somewhat dealt with in
215 raise NotImplementedError
217 def BuildHooksEnv(self):
218 """Build hooks environment for this LU.
220 This method should return a three-node tuple consisting of: a dict
221 containing the environment that will be used for running the
222 specific hook for this LU, a list of node names on which the hook
223 should run before the execution, and a list of node names on which
224 the hook should run after the execution.
226 The keys of the dict must not have 'GANETI_' prefixed as this will
227 be handled in the hooks runner. Also note additional keys will be
228 added by the hooks runner. If the LU doesn't define any
229 environment, an empty dict (and not None) should be returned.
231 No nodes should be returned as an empty list (and not None).
233 Note that if the HPATH for a LU class is None, this function will
237 raise NotImplementedError
239 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 """Notify the LU about the results of its hooks.
242 This method is called every time a hooks phase is executed, and notifies
243 the Logical Unit about the hooks' result. The LU can then use it to alter
244 its result based on the hooks. By default the method does nothing and the
245 previous result is passed back unchanged but any LU can define it if it
246 wants to use the local cluster hook-scripts somehow.
248 @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 @param hook_results: the results of the multi-node hooks rpc call
251 @param feedback_fn: function used send feedback back to the caller
252 @param lu_result: the previous Exec result this LU had, or None
254 @return: the new Exec result, based on the previous result
260 def _ExpandAndLockInstance(self):
261 """Helper function to expand and lock an instance.
263 Many LUs that work on an instance take its name in self.op.instance_name
264 and need to expand it and then declare the expanded name for locking. This
265 function does it, and then updates self.op.instance_name to the expanded
266 name. It also initializes needed_locks as a dict, if this hasn't been done
270 if self.needed_locks is None:
271 self.needed_locks = {}
273 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 "_ExpandAndLockInstance called with instance-level locks set"
275 expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 if expanded_name is None:
277 raise errors.OpPrereqError("Instance '%s' not known" %
278 self.op.instance_name)
279 self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 self.op.instance_name = expanded_name
282 def _LockInstancesNodes(self, primary_only=False):
283 """Helper function to declare instances' nodes for locking.
285 This function should be called after locking one or more instances to lock
286 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 with all primary or secondary nodes for instances already locked and
288 present in self.needed_locks[locking.LEVEL_INSTANCE].
290 It should be called from DeclareLocks, and for safety only works if
291 self.recalculate_locks[locking.LEVEL_NODE] is set.
293 In the future it may grow parameters to just lock some instance's nodes, or
294 to just lock primaries or secondary nodes, if needed.
296 If should be called in DeclareLocks in a way similar to::
298 if level == locking.LEVEL_NODE:
299 self._LockInstancesNodes()
301 @type primary_only: boolean
302 @param primary_only: only lock primary nodes of locked instances
305 assert locking.LEVEL_NODE in self.recalculate_locks, \
306 "_LockInstancesNodes helper function called with no nodes to recalculate"
308 # TODO: check if we're really been called with the instance locks held
310 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 # future we might want to have different behaviors depending on the value
312 # of self.recalculate_locks[locking.LEVEL_NODE]
314 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 instance = self.context.cfg.GetInstanceInfo(instance_name)
316 wanted_nodes.append(instance.primary_node)
318 wanted_nodes.extend(instance.secondary_nodes)
320 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325 del self.recalculate_locks[locking.LEVEL_NODE]
328 class NoHooksLU(LogicalUnit):
329 """Simple LU which runs no hooks.
331 This LU is intended as a parent for other LogicalUnits which will
332 run no hooks, in order to reduce duplicate code.
339 def _GetWantedNodes(lu, nodes):
340 """Returns list of checked and expanded node names.
342 @type lu: L{LogicalUnit}
343 @param lu: the logical unit on whose behalf we execute
345 @param nodes: list of node names or None for all nodes
347 @return: the list of nodes, sorted
348 @raise errors.OpProgrammerError: if the nodes parameter is wrong type
351 if not isinstance(nodes, list):
352 raise errors.OpPrereqError("Invalid argument type 'nodes'")
355 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 " non-empty list of nodes whose name is to be expanded.")
360 node = lu.cfg.ExpandNodeName(name)
362 raise errors.OpPrereqError("No such node name '%s'" % name)
365 return utils.NiceSort(wanted)
368 def _GetWantedInstances(lu, instances):
369 """Returns list of checked and expanded instance names.
371 @type lu: L{LogicalUnit}
372 @param lu: the logical unit on whose behalf we execute
373 @type instances: list
374 @param instances: list of instance names or None for all instances
376 @return: the list of instances, sorted
377 @raise errors.OpPrereqError: if the instances parameter is wrong type
378 @raise errors.OpPrereqError: if any of the passed instances is not found
381 if not isinstance(instances, list):
382 raise errors.OpPrereqError("Invalid argument type 'instances'")
387 for name in instances:
388 instance = lu.cfg.ExpandInstanceName(name)
390 raise errors.OpPrereqError("No such instance name '%s'" % name)
391 wanted.append(instance)
394 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
398 def _CheckOutputFields(static, dynamic, selected):
399 """Checks whether all selected fields are valid.
401 @type static: L{utils.FieldSet}
402 @param static: static fields set
403 @type dynamic: L{utils.FieldSet}
404 @param dynamic: dynamic fields set
411 delta = f.NonMatching(selected)
413 raise errors.OpPrereqError("Unknown output fields selected: %s"
417 def _CheckBooleanOpField(op, name):
418 """Validates boolean opcode parameters.
420 This will ensure that an opcode parameter is either a boolean value,
421 or None (but that it always exists).
424 val = getattr(op, name, None)
425 if not (val is None or isinstance(val, bool)):
426 raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428 setattr(op, name, val)
431 def _CheckNodeOnline(lu, node):
432 """Ensure that a given node is online.
434 @param lu: the LU on behalf of which we make the check
435 @param node: the node to check
436 @raise errors.OpPrereqError: if the node is offline
439 if lu.cfg.GetNodeInfo(node).offline:
440 raise errors.OpPrereqError("Can't use offline node %s" % node)
443 def _CheckNodeNotDrained(lu, node):
444 """Ensure that a given node is not drained.
446 @param lu: the LU on behalf of which we make the check
447 @param node: the node to check
448 @raise errors.OpPrereqError: if the node is drained
451 if lu.cfg.GetNodeInfo(node).drained:
452 raise errors.OpPrereqError("Can't use drained node %s" % node)
455 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 memory, vcpus, nics, disk_template, disks):
457 """Builds instance related env variables for hooks
459 This builds the hook environment from individual variables.
462 @param name: the name of the instance
463 @type primary_node: string
464 @param primary_node: the name of the instance's primary node
465 @type secondary_nodes: list
466 @param secondary_nodes: list of secondary nodes as strings
467 @type os_type: string
468 @param os_type: the name of the instance's OS
469 @type status: boolean
470 @param status: the should_run status of the instance
472 @param memory: the memory size of the instance
474 @param vcpus: the count of VCPUs the instance has
476 @param nics: list of tuples (ip, bridge, mac) representing
477 the NICs the instance has
478 @type disk_template: string
479 @param disk_template: the distk template of the instance
481 @param disks: the list of (size, mode) pairs
483 @return: the hook environment for this instance
492 "INSTANCE_NAME": name,
493 "INSTANCE_PRIMARY": primary_node,
494 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
495 "INSTANCE_OS_TYPE": os_type,
496 "INSTANCE_STATUS": str_status,
497 "INSTANCE_MEMORY": memory,
498 "INSTANCE_VCPUS": vcpus,
499 "INSTANCE_DISK_TEMPLATE": disk_template,
503 nic_count = len(nics)
504 for idx, (ip, bridge, mac) in enumerate(nics):
507 env["INSTANCE_NIC%d_IP" % idx] = ip
508 env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
509 env["INSTANCE_NIC%d_MAC" % idx] = mac
513 env["INSTANCE_NIC_COUNT"] = nic_count
516 disk_count = len(disks)
517 for idx, (size, mode) in enumerate(disks):
518 env["INSTANCE_DISK%d_SIZE" % idx] = size
519 env["INSTANCE_DISK%d_MODE" % idx] = mode
523 env["INSTANCE_DISK_COUNT"] = disk_count
528 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
529 """Builds instance related env variables for hooks from an object.
531 @type lu: L{LogicalUnit}
532 @param lu: the logical unit on whose behalf we execute
533 @type instance: L{objects.Instance}
534 @param instance: the instance for which we should build the
537 @param override: dictionary with key/values that will override
540 @return: the hook environment dictionary
543 bep = lu.cfg.GetClusterInfo().FillBE(instance)
545 'name': instance.name,
546 'primary_node': instance.primary_node,
547 'secondary_nodes': instance.secondary_nodes,
548 'os_type': instance.os,
549 'status': instance.admin_up,
550 'memory': bep[constants.BE_MEMORY],
551 'vcpus': bep[constants.BE_VCPUS],
552 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
553 'disk_template': instance.disk_template,
554 'disks': [(disk.size, disk.mode) for disk in instance.disks],
557 args.update(override)
558 return _BuildInstanceHookEnv(**args)
561 def _AdjustCandidatePool(lu):
562 """Adjust the candidate pool after node operations.
565 mod_list = lu.cfg.MaintainCandidatePool()
567 lu.LogInfo("Promoted nodes to master candidate role: %s",
568 ", ".join(node.name for node in mod_list))
569 for name in mod_list:
570 lu.context.ReaddNode(name)
571 mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
573 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
577 def _CheckInstanceBridgesExist(lu, instance):
578 """Check that the brigdes needed by an instance exist.
581 # check bridges existance
582 brlist = [nic.bridge for nic in instance.nics]
583 result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
586 raise errors.OpPrereqError("One or more target bridges %s does not"
587 " exist on destination node '%s'" %
588 (brlist, instance.primary_node))
591 class LUDestroyCluster(NoHooksLU):
592 """Logical unit for destroying the cluster.
597 def CheckPrereq(self):
598 """Check prerequisites.
600 This checks whether the cluster is empty.
602 Any errors are signalled by raising errors.OpPrereqError.
605 master = self.cfg.GetMasterNode()
607 nodelist = self.cfg.GetNodeList()
608 if len(nodelist) != 1 or nodelist[0] != master:
609 raise errors.OpPrereqError("There are still %d node(s) in"
610 " this cluster." % (len(nodelist) - 1))
611 instancelist = self.cfg.GetInstanceList()
613 raise errors.OpPrereqError("There are still %d instance(s) in"
614 " this cluster." % len(instancelist))
616 def Exec(self, feedback_fn):
617 """Destroys the cluster.
620 master = self.cfg.GetMasterNode()
621 result = self.rpc.call_node_stop_master(master, False)
624 raise errors.OpExecError("Could not disable the master role")
625 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
626 utils.CreateBackup(priv_key)
627 utils.CreateBackup(pub_key)
631 class LUVerifyCluster(LogicalUnit):
632 """Verifies the cluster status.
635 HPATH = "cluster-verify"
636 HTYPE = constants.HTYPE_CLUSTER
637 _OP_REQP = ["skip_checks"]
640 def ExpandNames(self):
641 self.needed_locks = {
642 locking.LEVEL_NODE: locking.ALL_SET,
643 locking.LEVEL_INSTANCE: locking.ALL_SET,
645 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
647 def _VerifyNode(self, nodeinfo, file_list, local_cksum,
648 node_result, feedback_fn, master_files,
650 """Run multiple tests against a node.
654 - compares ganeti version
655 - checks vg existance and size > 20G
656 - checks config file checksum
657 - checks ssh to other nodes
659 @type nodeinfo: L{objects.Node}
660 @param nodeinfo: the node to check
661 @param file_list: required list of files
662 @param local_cksum: dictionary of local files and their checksums
663 @param node_result: the results from the node
664 @param feedback_fn: function used to accumulate results
665 @param master_files: list of files that only masters should have
666 @param drbd_map: the useddrbd minors for this node, in
667 form of minor: (instance, must_exist) which correspond to instances
668 and their running status
669 @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
674 # main result, node_result should be a non-empty dict
675 if not node_result or not isinstance(node_result, dict):
676 feedback_fn(" - ERROR: unable to verify node %s." % (node,))
679 # compares ganeti version
680 local_version = constants.PROTOCOL_VERSION
681 remote_version = node_result.get('version', None)
682 if not (remote_version and isinstance(remote_version, (list, tuple)) and
683 len(remote_version) == 2):
684 feedback_fn(" - ERROR: connection to %s failed" % (node))
687 if local_version != remote_version[0]:
688 feedback_fn(" - ERROR: incompatible protocol versions: master %s,"
689 " node %s %s" % (local_version, node, remote_version[0]))
692 # node seems compatible, we can actually try to look into its results
696 # full package version
697 if constants.RELEASE_VERSION != remote_version[1]:
698 feedback_fn(" - WARNING: software version mismatch: master %s,"
700 (constants.RELEASE_VERSION, node, remote_version[1]))
702 # checks vg existence and size > 20G
703 if vg_name is not None:
704 vglist = node_result.get(constants.NV_VGLIST, None)
706 feedback_fn(" - ERROR: unable to check volume groups on node %s." %
710 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
711 constants.MIN_VG_SIZE)
713 feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
716 # checks config file checksum
718 remote_cksum = node_result.get(constants.NV_FILELIST, None)
719 if not isinstance(remote_cksum, dict):
721 feedback_fn(" - ERROR: node hasn't returned file checksum data")
723 for file_name in file_list:
724 node_is_mc = nodeinfo.master_candidate
725 must_have_file = file_name not in master_files
726 if file_name not in remote_cksum:
727 if node_is_mc or must_have_file:
729 feedback_fn(" - ERROR: file '%s' missing" % file_name)
730 elif remote_cksum[file_name] != local_cksum[file_name]:
731 if node_is_mc or must_have_file:
733 feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
735 # not candidate and this is not a must-have file
737 feedback_fn(" - ERROR: non master-candidate has old/wrong file"
740 # all good, except non-master/non-must have combination
741 if not node_is_mc and not must_have_file:
742 feedback_fn(" - ERROR: file '%s' should not exist on non master"
743 " candidates" % file_name)
747 if constants.NV_NODELIST not in node_result:
749 feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
751 if node_result[constants.NV_NODELIST]:
753 for node in node_result[constants.NV_NODELIST]:
754 feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
755 (node, node_result[constants.NV_NODELIST][node]))
757 if constants.NV_NODENETTEST not in node_result:
759 feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
761 if node_result[constants.NV_NODENETTEST]:
763 nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
765 feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
766 (node, node_result[constants.NV_NODENETTEST][node]))
768 hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
769 if isinstance(hyp_result, dict):
770 for hv_name, hv_result in hyp_result.iteritems():
771 if hv_result is not None:
772 feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
773 (hv_name, hv_result))
775 # check used drbd list
776 if vg_name is not None:
777 used_minors = node_result.get(constants.NV_DRBDLIST, [])
778 if not isinstance(used_minors, (tuple, list)):
779 feedback_fn(" - ERROR: cannot parse drbd status file: %s" %
782 for minor, (iname, must_exist) in drbd_map.items():
783 if minor not in used_minors and must_exist:
784 feedback_fn(" - ERROR: drbd minor %d of instance %s is"
785 " not active" % (minor, iname))
787 for minor in used_minors:
788 if minor not in drbd_map:
789 feedback_fn(" - ERROR: unallocated drbd minor %d is in use" %
795 def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
796 node_instance, feedback_fn, n_offline):
797 """Verify an instance.
799 This function checks to see if the required block devices are
800 available on the instance's node.
805 node_current = instanceconfig.primary_node
808 instanceconfig.MapLVsByNode(node_vol_should)
810 for node in node_vol_should:
811 if node in n_offline:
812 # ignore missing volumes on offline nodes
814 for volume in node_vol_should[node]:
815 if node not in node_vol_is or volume not in node_vol_is[node]:
816 feedback_fn(" - ERROR: volume %s missing on node %s" %
820 if instanceconfig.admin_up:
821 if ((node_current not in node_instance or
822 not instance in node_instance[node_current]) and
823 node_current not in n_offline):
824 feedback_fn(" - ERROR: instance %s not running on node %s" %
825 (instance, node_current))
828 for node in node_instance:
829 if (not node == node_current):
830 if instance in node_instance[node]:
831 feedback_fn(" - ERROR: instance %s should not run on node %s" %
837 def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
838 """Verify if there are any unknown volumes in the cluster.
840 The .os, .swap and backup volumes are ignored. All other volumes are
846 for node in node_vol_is:
847 for volume in node_vol_is[node]:
848 if node not in node_vol_should or volume not in node_vol_should[node]:
849 feedback_fn(" - ERROR: volume %s on node %s should not exist" %
854 def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
855 """Verify the list of running instances.
857 This checks what instances are running but unknown to the cluster.
861 for node in node_instance:
862 for runninginstance in node_instance[node]:
863 if runninginstance not in instancelist:
864 feedback_fn(" - ERROR: instance %s on node %s should not exist" %
865 (runninginstance, node))
869 def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
870 """Verify N+1 Memory Resilience.
872 Check that if one single node dies we can still start all the instances it
878 for node, nodeinfo in node_info.iteritems():
879 # This code checks that every node which is now listed as secondary has
880 # enough memory to host all instances it is supposed to should a single
881 # other node in the cluster fail.
882 # FIXME: not ready for failover to an arbitrary node
883 # FIXME: does not support file-backed instances
884 # WARNING: we currently take into account down instances as well as up
885 # ones, considering that even if they're down someone might want to start
886 # them even in the event of a node failure.
887 for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
889 for instance in instances:
890 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
891 if bep[constants.BE_AUTO_BALANCE]:
892 needed_mem += bep[constants.BE_MEMORY]
893 if nodeinfo['mfree'] < needed_mem:
894 feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
895 " failovers should node %s fail" % (node, prinode))
899 def CheckPrereq(self):
900 """Check prerequisites.
902 Transform the list of checks we're going to skip into a set and check that
903 all its members are valid.
906 self.skip_set = frozenset(self.op.skip_checks)
907 if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
908 raise errors.OpPrereqError("Invalid checks to be skipped specified")
910 def BuildHooksEnv(self):
913 Cluster-Verify hooks just rone in the post phase and their failure makes
914 the output be logged in the verify output and the verification to fail.
917 all_nodes = self.cfg.GetNodeList()
919 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
921 for node in self.cfg.GetAllNodesInfo().values():
922 env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
924 return env, [], all_nodes
926 def Exec(self, feedback_fn):
927 """Verify integrity of cluster, performing various test on nodes.
931 feedback_fn("* Verifying global settings")
932 for msg in self.cfg.VerifyConfig():
933 feedback_fn(" - ERROR: %s" % msg)
935 vg_name = self.cfg.GetVGName()
936 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
937 nodelist = utils.NiceSort(self.cfg.GetNodeList())
938 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
939 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
940 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
941 for iname in instancelist)
942 i_non_redundant = [] # Non redundant instances
943 i_non_a_balanced = [] # Non auto-balanced instances
944 n_offline = [] # List of offline nodes
945 n_drained = [] # List of nodes being drained
951 # FIXME: verify OS list
953 master_files = [constants.CLUSTER_CONF_FILE]
955 file_names = ssconf.SimpleStore().GetFileList()
956 file_names.append(constants.SSL_CERT_FILE)
957 file_names.append(constants.RAPI_CERT_FILE)
958 file_names.extend(master_files)
960 local_checksums = utils.FingerprintFiles(file_names)
962 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
963 node_verify_param = {
964 constants.NV_FILELIST: file_names,
965 constants.NV_NODELIST: [node.name for node in nodeinfo
966 if not node.offline],
967 constants.NV_HYPERVISOR: hypervisors,
968 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
969 node.secondary_ip) for node in nodeinfo
970 if not node.offline],
971 constants.NV_INSTANCELIST: hypervisors,
972 constants.NV_VERSION: None,
973 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
975 if vg_name is not None:
976 node_verify_param[constants.NV_VGLIST] = None
977 node_verify_param[constants.NV_LVLIST] = vg_name
978 node_verify_param[constants.NV_DRBDLIST] = None
979 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
980 self.cfg.GetClusterName())
982 cluster = self.cfg.GetClusterInfo()
983 master_node = self.cfg.GetMasterNode()
984 all_drbd_map = self.cfg.ComputeDRBDMap()
986 for node_i in nodeinfo:
988 nresult = all_nvinfo[node].data
991 feedback_fn("* Skipping offline node %s" % (node,))
992 n_offline.append(node)
995 if node == master_node:
997 elif node_i.master_candidate:
998 ntype = "master candidate"
1001 n_drained.append(node)
1004 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1006 if all_nvinfo[node].failed or not isinstance(nresult, dict):
1007 feedback_fn(" - ERROR: connection to %s failed" % (node,))
1012 for minor, instance in all_drbd_map[node].items():
1013 if instance not in instanceinfo:
1014 feedback_fn(" - ERROR: ghost instance '%s' in temporary DRBD map" %
1016 # ghost instance should not be running, but otherwise we
1017 # don't give double warnings (both ghost instance and
1018 # unallocated minor in use)
1019 node_drbd[minor] = (instance, False)
1021 instance = instanceinfo[instance]
1022 node_drbd[minor] = (instance.name, instance.admin_up)
1023 result = self._VerifyNode(node_i, file_names, local_checksums,
1024 nresult, feedback_fn, master_files,
1028 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1030 node_volume[node] = {}
1031 elif isinstance(lvdata, basestring):
1032 feedback_fn(" - ERROR: LVM problem on node %s: %s" %
1033 (node, utils.SafeEncode(lvdata)))
1035 node_volume[node] = {}
1036 elif not isinstance(lvdata, dict):
1037 feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,))
1041 node_volume[node] = lvdata
1044 idata = nresult.get(constants.NV_INSTANCELIST, None)
1045 if not isinstance(idata, list):
1046 feedback_fn(" - ERROR: connection to %s failed (instancelist)" %
1051 node_instance[node] = idata
1054 nodeinfo = nresult.get(constants.NV_HVINFO, None)
1055 if not isinstance(nodeinfo, dict):
1056 feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,))
1062 "mfree": int(nodeinfo['memory_free']),
1065 # dictionary holding all instances this node is secondary for,
1066 # grouped by their primary node. Each key is a cluster node, and each
1067 # value is a list of instances which have the key as primary and the
1068 # current node as secondary. this is handy to calculate N+1 memory
1069 # availability if you can only failover from a primary to its
1071 "sinst-by-pnode": {},
1073 # FIXME: devise a free space model for file based instances as well
1074 if vg_name is not None:
1075 if (constants.NV_VGLIST not in nresult or
1076 vg_name not in nresult[constants.NV_VGLIST]):
1077 feedback_fn(" - ERROR: node %s didn't return data for the"
1078 " volume group '%s' - it is either missing or broken" %
1082 node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1083 except (ValueError, KeyError):
1084 feedback_fn(" - ERROR: invalid nodeinfo value returned"
1085 " from node %s" % (node,))
1089 node_vol_should = {}
1091 for instance in instancelist:
1092 feedback_fn("* Verifying instance %s" % instance)
1093 inst_config = instanceinfo[instance]
1094 result = self._VerifyInstance(instance, inst_config, node_volume,
1095 node_instance, feedback_fn, n_offline)
1097 inst_nodes_offline = []
1099 inst_config.MapLVsByNode(node_vol_should)
1101 instance_cfg[instance] = inst_config
1103 pnode = inst_config.primary_node
1104 if pnode in node_info:
1105 node_info[pnode]['pinst'].append(instance)
1106 elif pnode not in n_offline:
1107 feedback_fn(" - ERROR: instance %s, connection to primary node"
1108 " %s failed" % (instance, pnode))
1111 if pnode in n_offline:
1112 inst_nodes_offline.append(pnode)
1114 # If the instance is non-redundant we cannot survive losing its primary
1115 # node, so we are not N+1 compliant. On the other hand we have no disk
1116 # templates with more than one secondary so that situation is not well
1118 # FIXME: does not support file-backed instances
1119 if len(inst_config.secondary_nodes) == 0:
1120 i_non_redundant.append(instance)
1121 elif len(inst_config.secondary_nodes) > 1:
1122 feedback_fn(" - WARNING: multiple secondaries for instance %s"
1125 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1126 i_non_a_balanced.append(instance)
1128 for snode in inst_config.secondary_nodes:
1129 if snode in node_info:
1130 node_info[snode]['sinst'].append(instance)
1131 if pnode not in node_info[snode]['sinst-by-pnode']:
1132 node_info[snode]['sinst-by-pnode'][pnode] = []
1133 node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1134 elif snode not in n_offline:
1135 feedback_fn(" - ERROR: instance %s, connection to secondary node"
1136 " %s failed" % (instance, snode))
1138 if snode in n_offline:
1139 inst_nodes_offline.append(snode)
1141 if inst_nodes_offline:
1142 # warn that the instance lives on offline nodes, and set bad=True
1143 feedback_fn(" - ERROR: instance lives on offline node(s) %s" %
1144 ", ".join(inst_nodes_offline))
1147 feedback_fn("* Verifying orphan volumes")
1148 result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1152 feedback_fn("* Verifying remaining instances")
1153 result = self._VerifyOrphanInstances(instancelist, node_instance,
1157 if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1158 feedback_fn("* Verifying N+1 Memory redundancy")
1159 result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1162 feedback_fn("* Other Notes")
1164 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
1165 % len(i_non_redundant))
1167 if i_non_a_balanced:
1168 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
1169 % len(i_non_a_balanced))
1172 feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline))
1175 feedback_fn(" - NOTICE: %d drained node(s) found." % len(n_drained))
1179 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1180 """Analize the post-hooks' result
1182 This method analyses the hook result, handles it, and sends some
1183 nicely-formatted feedback back to the user.
1185 @param phase: one of L{constants.HOOKS_PHASE_POST} or
1186 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1187 @param hooks_results: the results of the multi-node hooks rpc call
1188 @param feedback_fn: function used send feedback back to the caller
1189 @param lu_result: previous Exec result
1190 @return: the new Exec result, based on the previous result
1194 # We only really run POST phase hooks, and are only interested in
1196 if phase == constants.HOOKS_PHASE_POST:
1197 # Used to change hooks' output to proper indentation
1198 indent_re = re.compile('^', re.M)
1199 feedback_fn("* Hooks Results")
1200 if not hooks_results:
1201 feedback_fn(" - ERROR: general communication failure")
1204 for node_name in hooks_results:
1205 show_node_header = True
1206 res = hooks_results[node_name]
1207 if res.failed or res.data is False or not isinstance(res.data, list):
1209 # no need to warn or set fail return value
1211 feedback_fn(" Communication failure in hooks execution")
1214 for script, hkr, output in res.data:
1215 if hkr == constants.HKR_FAIL:
1216 # The node header is only shown once, if there are
1217 # failing hooks on that node
1218 if show_node_header:
1219 feedback_fn(" Node %s:" % node_name)
1220 show_node_header = False
1221 feedback_fn(" ERROR: Script %s failed, output:" % script)
1222 output = indent_re.sub(' ', output)
1223 feedback_fn("%s" % output)
1229 class LUVerifyDisks(NoHooksLU):
1230 """Verifies the cluster disks status.
1236 def ExpandNames(self):
1237 self.needed_locks = {
1238 locking.LEVEL_NODE: locking.ALL_SET,
1239 locking.LEVEL_INSTANCE: locking.ALL_SET,
1241 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1243 def CheckPrereq(self):
1244 """Check prerequisites.
1246 This has no prerequisites.
1251 def Exec(self, feedback_fn):
1252 """Verify integrity of cluster disks.
1255 result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1257 vg_name = self.cfg.GetVGName()
1258 nodes = utils.NiceSort(self.cfg.GetNodeList())
1259 instances = [self.cfg.GetInstanceInfo(name)
1260 for name in self.cfg.GetInstanceList()]
1263 for inst in instances:
1265 if (not inst.admin_up or
1266 inst.disk_template not in constants.DTS_NET_MIRROR):
1268 inst.MapLVsByNode(inst_lvs)
1269 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1270 for node, vol_list in inst_lvs.iteritems():
1271 for vol in vol_list:
1272 nv_dict[(node, vol)] = inst
1277 node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1282 lvs = node_lvs[node]
1285 self.LogWarning("Connection to node %s failed: %s" %
1289 if isinstance(lvs, basestring):
1290 logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1291 res_nlvm[node] = lvs
1293 elif not isinstance(lvs, dict):
1294 logging.warning("Connection to node %s failed or invalid data"
1296 res_nodes.append(node)
1299 for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1300 inst = nv_dict.pop((node, lv_name), None)
1301 if (not lv_online and inst is not None
1302 and inst.name not in res_instances):
1303 res_instances.append(inst.name)
1305 # any leftover items in nv_dict are missing LVs, let's arrange the
1307 for key, inst in nv_dict.iteritems():
1308 if inst.name not in res_missing:
1309 res_missing[inst.name] = []
1310 res_missing[inst.name].append(key)
1315 class LURenameCluster(LogicalUnit):
1316 """Rename the cluster.
1319 HPATH = "cluster-rename"
1320 HTYPE = constants.HTYPE_CLUSTER
1323 def BuildHooksEnv(self):
1328 "OP_TARGET": self.cfg.GetClusterName(),
1329 "NEW_NAME": self.op.name,
1331 mn = self.cfg.GetMasterNode()
1332 return env, [mn], [mn]
1334 def CheckPrereq(self):
1335 """Verify that the passed name is a valid one.
1338 hostname = utils.HostInfo(self.op.name)
1340 new_name = hostname.name
1341 self.ip = new_ip = hostname.ip
1342 old_name = self.cfg.GetClusterName()
1343 old_ip = self.cfg.GetMasterIP()
1344 if new_name == old_name and new_ip == old_ip:
1345 raise errors.OpPrereqError("Neither the name nor the IP address of the"
1346 " cluster has changed")
1347 if new_ip != old_ip:
1348 if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1349 raise errors.OpPrereqError("The given cluster IP address (%s) is"
1350 " reachable on the network. Aborting." %
1353 self.op.name = new_name
1355 def Exec(self, feedback_fn):
1356 """Rename the cluster.
1359 clustername = self.op.name
1362 # shutdown the master IP
1363 master = self.cfg.GetMasterNode()
1364 result = self.rpc.call_node_stop_master(master, False)
1365 if result.failed or not result.data:
1366 raise errors.OpExecError("Could not disable the master role")
1369 cluster = self.cfg.GetClusterInfo()
1370 cluster.cluster_name = clustername
1371 cluster.master_ip = ip
1372 self.cfg.Update(cluster)
1374 # update the known hosts file
1375 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1376 node_list = self.cfg.GetNodeList()
1378 node_list.remove(master)
1381 result = self.rpc.call_upload_file(node_list,
1382 constants.SSH_KNOWN_HOSTS_FILE)
1383 for to_node, to_result in result.iteritems():
1384 if to_result.failed or not to_result.data:
1385 logging.error("Copy of file %s to node %s failed",
1386 constants.SSH_KNOWN_HOSTS_FILE, to_node)
1389 result = self.rpc.call_node_start_master(master, False)
1390 if result.failed or not result.data:
1391 self.LogWarning("Could not re-enable the master role on"
1392 " the master, please restart manually.")
1395 def _RecursiveCheckIfLVMBased(disk):
1396 """Check if the given disk or its children are lvm-based.
1398 @type disk: L{objects.Disk}
1399 @param disk: the disk to check
1401 @return: boolean indicating whether a LD_LV dev_type was found or not
1405 for chdisk in disk.children:
1406 if _RecursiveCheckIfLVMBased(chdisk):
1408 return disk.dev_type == constants.LD_LV
1411 class LUSetClusterParams(LogicalUnit):
1412 """Change the parameters of the cluster.
1415 HPATH = "cluster-modify"
1416 HTYPE = constants.HTYPE_CLUSTER
1420 def CheckArguments(self):
1424 if not hasattr(self.op, "candidate_pool_size"):
1425 self.op.candidate_pool_size = None
1426 if self.op.candidate_pool_size is not None:
1428 self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1429 except (ValueError, TypeError), err:
1430 raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1432 if self.op.candidate_pool_size < 1:
1433 raise errors.OpPrereqError("At least one master candidate needed")
1435 def ExpandNames(self):
1436 # FIXME: in the future maybe other cluster params won't require checking on
1437 # all nodes to be modified.
1438 self.needed_locks = {
1439 locking.LEVEL_NODE: locking.ALL_SET,
1441 self.share_locks[locking.LEVEL_NODE] = 1
1443 def BuildHooksEnv(self):
1448 "OP_TARGET": self.cfg.GetClusterName(),
1449 "NEW_VG_NAME": self.op.vg_name,
1451 mn = self.cfg.GetMasterNode()
1452 return env, [mn], [mn]
1454 def CheckPrereq(self):
1455 """Check prerequisites.
1457 This checks whether the given params don't conflict and
1458 if the given volume group is valid.
1461 if self.op.vg_name is not None and not self.op.vg_name:
1462 instances = self.cfg.GetAllInstancesInfo().values()
1463 for inst in instances:
1464 for disk in inst.disks:
1465 if _RecursiveCheckIfLVMBased(disk):
1466 raise errors.OpPrereqError("Cannot disable lvm storage while"
1467 " lvm-based instances exist")
1469 node_list = self.acquired_locks[locking.LEVEL_NODE]
1471 # if vg_name not None, checks given volume group on all nodes
1473 vglist = self.rpc.call_vg_list(node_list)
1474 for node in node_list:
1475 if vglist[node].failed:
1476 # ignoring down node
1477 self.LogWarning("Node %s unreachable/error, ignoring" % node)
1479 vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1481 constants.MIN_VG_SIZE)
1483 raise errors.OpPrereqError("Error on node '%s': %s" %
1486 self.cluster = cluster = self.cfg.GetClusterInfo()
1487 # validate beparams changes
1488 if self.op.beparams:
1489 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1490 self.new_beparams = cluster.FillDict(
1491 cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1493 # hypervisor list/parameters
1494 self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1495 if self.op.hvparams:
1496 if not isinstance(self.op.hvparams, dict):
1497 raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1498 for hv_name, hv_dict in self.op.hvparams.items():
1499 if hv_name not in self.new_hvparams:
1500 self.new_hvparams[hv_name] = hv_dict
1502 self.new_hvparams[hv_name].update(hv_dict)
1504 if self.op.enabled_hypervisors is not None:
1505 self.hv_list = self.op.enabled_hypervisors
1507 self.hv_list = cluster.enabled_hypervisors
1509 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1510 # either the enabled list has changed, or the parameters have, validate
1511 for hv_name, hv_params in self.new_hvparams.items():
1512 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1513 (self.op.enabled_hypervisors and
1514 hv_name in self.op.enabled_hypervisors)):
1515 # either this is a new hypervisor, or its parameters have changed
1516 hv_class = hypervisor.GetHypervisor(hv_name)
1517 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1518 hv_class.CheckParameterSyntax(hv_params)
1519 _CheckHVParams(self, node_list, hv_name, hv_params)
1521 def Exec(self, feedback_fn):
1522 """Change the parameters of the cluster.
1525 if self.op.vg_name is not None:
1526 new_volume = self.op.vg_name
1529 if new_volume != self.cfg.GetVGName():
1530 self.cfg.SetVGName(new_volume)
1532 feedback_fn("Cluster LVM configuration already in desired"
1533 " state, not changing")
1534 if self.op.hvparams:
1535 self.cluster.hvparams = self.new_hvparams
1536 if self.op.enabled_hypervisors is not None:
1537 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1538 if self.op.beparams:
1539 self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1540 if self.op.candidate_pool_size is not None:
1541 self.cluster.candidate_pool_size = self.op.candidate_pool_size
1543 self.cfg.Update(self.cluster)
1545 # we want to update nodes after the cluster so that if any errors
1546 # happen, we have recorded and saved the cluster info
1547 if self.op.candidate_pool_size is not None:
1548 _AdjustCandidatePool(self)
1551 def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1552 """Distribute additional files which are part of the cluster configuration.
1554 ConfigWriter takes care of distributing the config and ssconf files, but
1555 there are more files which should be distributed to all nodes. This function
1556 makes sure those are copied.
1558 @param lu: calling logical unit
1559 @param additional_nodes: list of nodes not in the config to distribute to
1562 # 1. Gather target nodes
1563 myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1564 dist_nodes = lu.cfg.GetNodeList()
1565 if additional_nodes is not None:
1566 dist_nodes.extend(additional_nodes)
1567 if myself.name in dist_nodes:
1568 dist_nodes.remove(myself.name)
1569 # 2. Gather files to distribute
1570 dist_files = set([constants.ETC_HOSTS,
1571 constants.SSH_KNOWN_HOSTS_FILE,
1572 constants.RAPI_CERT_FILE,
1573 constants.RAPI_USERS_FILE,
1575 # 3. Perform the files upload
1576 for fname in dist_files:
1577 if os.path.exists(fname):
1578 result = lu.rpc.call_upload_file(dist_nodes, fname)
1579 for to_node, to_result in result.items():
1580 if to_result.failed or not to_result.data:
1581 logging.error("Copy of file %s to node %s failed", fname, to_node)
1584 class LURedistributeConfig(NoHooksLU):
1585 """Force the redistribution of cluster configuration.
1587 This is a very simple LU.
1593 def ExpandNames(self):
1594 self.needed_locks = {
1595 locking.LEVEL_NODE: locking.ALL_SET,
1597 self.share_locks[locking.LEVEL_NODE] = 1
1599 def CheckPrereq(self):
1600 """Check prerequisites.
1604 def Exec(self, feedback_fn):
1605 """Redistribute the configuration.
1608 self.cfg.Update(self.cfg.GetClusterInfo())
1609 _RedistributeAncillaryFiles(self)
1612 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1613 """Sleep and poll for an instance's disk to sync.
1616 if not instance.disks:
1620 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1622 node = instance.primary_node
1624 for dev in instance.disks:
1625 lu.cfg.SetDiskID(dev, node)
1631 cumul_degraded = False
1632 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1633 if rstats.failed or not rstats.data:
1634 lu.LogWarning("Can't get any data from node %s", node)
1637 raise errors.RemoteError("Can't contact node %s for mirror data,"
1638 " aborting." % node)
1641 rstats = rstats.data
1643 for i, mstat in enumerate(rstats):
1645 lu.LogWarning("Can't compute data for node %s/%s",
1646 node, instance.disks[i].iv_name)
1648 # we ignore the ldisk parameter
1649 perc_done, est_time, is_degraded, _ = mstat
1650 cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1651 if perc_done is not None:
1653 if est_time is not None:
1654 rem_time = "%d estimated seconds remaining" % est_time
1657 rem_time = "no time estimate"
1658 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1659 (instance.disks[i].iv_name, perc_done, rem_time))
1663 time.sleep(min(60, max_time))
1666 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1667 return not cumul_degraded
1670 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1671 """Check that mirrors are not degraded.
1673 The ldisk parameter, if True, will change the test from the
1674 is_degraded attribute (which represents overall non-ok status for
1675 the device(s)) to the ldisk (representing the local storage status).
1678 lu.cfg.SetDiskID(dev, node)
1685 if on_primary or dev.AssembleOnSecondary():
1686 rstats = lu.rpc.call_blockdev_find(node, dev)
1687 msg = rstats.RemoteFailMsg()
1689 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1691 elif not rstats.payload:
1692 lu.LogWarning("Can't find disk on node %s", node)
1695 result = result and (not rstats.payload[idx])
1697 for child in dev.children:
1698 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1703 class LUDiagnoseOS(NoHooksLU):
1704 """Logical unit for OS diagnose/query.
1707 _OP_REQP = ["output_fields", "names"]
1709 _FIELDS_STATIC = utils.FieldSet()
1710 _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1712 def ExpandNames(self):
1714 raise errors.OpPrereqError("Selective OS query not supported")
1716 _CheckOutputFields(static=self._FIELDS_STATIC,
1717 dynamic=self._FIELDS_DYNAMIC,
1718 selected=self.op.output_fields)
1720 # Lock all nodes, in shared mode
1721 # Temporary removal of locks, should be reverted later
1722 # TODO: reintroduce locks when they are lighter-weight
1723 self.needed_locks = {}
1724 #self.share_locks[locking.LEVEL_NODE] = 1
1725 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1727 def CheckPrereq(self):
1728 """Check prerequisites.
1733 def _DiagnoseByOS(node_list, rlist):
1734 """Remaps a per-node return list into an a per-os per-node dictionary
1736 @param node_list: a list with the names of all nodes
1737 @param rlist: a map with node names as keys and OS objects as values
1740 @return: a dictionary with osnames as keys and as value another map, with
1741 nodes as keys and list of OS objects as values, eg::
1743 {"debian-etch": {"node1": [<object>,...],
1744 "node2": [<object>,]}
1749 # we build here the list of nodes that didn't fail the RPC (at RPC
1750 # level), so that nodes with a non-responding node daemon don't
1751 # make all OSes invalid
1752 good_nodes = [node_name for node_name in rlist
1753 if not rlist[node_name].failed]
1754 for node_name, nr in rlist.iteritems():
1755 if nr.failed or not nr.data:
1757 for os_obj in nr.data:
1758 if os_obj.name not in all_os:
1759 # build a list of nodes for this os containing empty lists
1760 # for each node in node_list
1761 all_os[os_obj.name] = {}
1762 for nname in good_nodes:
1763 all_os[os_obj.name][nname] = []
1764 all_os[os_obj.name][node_name].append(os_obj)
1767 def Exec(self, feedback_fn):
1768 """Compute the list of OSes.
1771 valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1772 node_data = self.rpc.call_os_diagnose(valid_nodes)
1773 if node_data == False:
1774 raise errors.OpExecError("Can't gather the list of OSes")
1775 pol = self._DiagnoseByOS(valid_nodes, node_data)
1777 for os_name, os_data in pol.iteritems():
1779 for field in self.op.output_fields:
1782 elif field == "valid":
1783 val = utils.all([osl and osl[0] for osl in os_data.values()])
1784 elif field == "node_status":
1786 for node_name, nos_list in os_data.iteritems():
1787 val[node_name] = [(v.status, v.path) for v in nos_list]
1789 raise errors.ParameterError(field)
1796 class LURemoveNode(LogicalUnit):
1797 """Logical unit for removing a node.
1800 HPATH = "node-remove"
1801 HTYPE = constants.HTYPE_NODE
1802 _OP_REQP = ["node_name"]
1804 def BuildHooksEnv(self):
1807 This doesn't run on the target node in the pre phase as a failed
1808 node would then be impossible to remove.
1812 "OP_TARGET": self.op.node_name,
1813 "NODE_NAME": self.op.node_name,
1815 all_nodes = self.cfg.GetNodeList()
1816 all_nodes.remove(self.op.node_name)
1817 return env, all_nodes, all_nodes
1819 def CheckPrereq(self):
1820 """Check prerequisites.
1823 - the node exists in the configuration
1824 - it does not have primary or secondary instances
1825 - it's not the master
1827 Any errors are signalled by raising errors.OpPrereqError.
1830 node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1832 raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1834 instance_list = self.cfg.GetInstanceList()
1836 masternode = self.cfg.GetMasterNode()
1837 if node.name == masternode:
1838 raise errors.OpPrereqError("Node is the master node,"
1839 " you need to failover first.")
1841 for instance_name in instance_list:
1842 instance = self.cfg.GetInstanceInfo(instance_name)
1843 if node.name in instance.all_nodes:
1844 raise errors.OpPrereqError("Instance %s is still running on the node,"
1845 " please remove first." % instance_name)
1846 self.op.node_name = node.name
1849 def Exec(self, feedback_fn):
1850 """Removes the node from the cluster.
1854 logging.info("Stopping the node daemon and removing configs from node %s",
1857 self.context.RemoveNode(node.name)
1859 self.rpc.call_node_leave_cluster(node.name)
1861 # Promote nodes to master candidate as needed
1862 _AdjustCandidatePool(self)
1865 class LUQueryNodes(NoHooksLU):
1866 """Logical unit for querying nodes.
1869 _OP_REQP = ["output_fields", "names", "use_locking"]
1871 _FIELDS_DYNAMIC = utils.FieldSet(
1873 "mtotal", "mnode", "mfree",
1875 "ctotal", "cnodes", "csockets",
1878 _FIELDS_STATIC = utils.FieldSet(
1879 "name", "pinst_cnt", "sinst_cnt",
1880 "pinst_list", "sinst_list",
1881 "pip", "sip", "tags",
1889 def ExpandNames(self):
1890 _CheckOutputFields(static=self._FIELDS_STATIC,
1891 dynamic=self._FIELDS_DYNAMIC,
1892 selected=self.op.output_fields)
1894 self.needed_locks = {}
1895 self.share_locks[locking.LEVEL_NODE] = 1
1898 self.wanted = _GetWantedNodes(self, self.op.names)
1900 self.wanted = locking.ALL_SET
1902 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1903 self.do_locking = self.do_node_query and self.op.use_locking
1905 # if we don't request only static fields, we need to lock the nodes
1906 self.needed_locks[locking.LEVEL_NODE] = self.wanted
1909 def CheckPrereq(self):
1910 """Check prerequisites.
1913 # The validation of the node list is done in the _GetWantedNodes,
1914 # if non empty, and if empty, there's no validation to do
1917 def Exec(self, feedback_fn):
1918 """Computes the list of nodes and their attributes.
1921 all_info = self.cfg.GetAllNodesInfo()
1923 nodenames = self.acquired_locks[locking.LEVEL_NODE]
1924 elif self.wanted != locking.ALL_SET:
1925 nodenames = self.wanted
1926 missing = set(nodenames).difference(all_info.keys())
1928 raise errors.OpExecError(
1929 "Some nodes were removed before retrieving their data: %s" % missing)
1931 nodenames = all_info.keys()
1933 nodenames = utils.NiceSort(nodenames)
1934 nodelist = [all_info[name] for name in nodenames]
1936 # begin data gathering
1938 if self.do_node_query:
1940 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1941 self.cfg.GetHypervisorType())
1942 for name in nodenames:
1943 nodeinfo = node_data[name]
1944 if not nodeinfo.failed and nodeinfo.data:
1945 nodeinfo = nodeinfo.data
1946 fn = utils.TryConvert
1948 "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1949 "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1950 "mfree": fn(int, nodeinfo.get('memory_free', None)),
1951 "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1952 "dfree": fn(int, nodeinfo.get('vg_free', None)),
1953 "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1954 "bootid": nodeinfo.get('bootid', None),
1955 "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1956 "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1959 live_data[name] = {}
1961 live_data = dict.fromkeys(nodenames, {})
1963 node_to_primary = dict([(name, set()) for name in nodenames])
1964 node_to_secondary = dict([(name, set()) for name in nodenames])
1966 inst_fields = frozenset(("pinst_cnt", "pinst_list",
1967 "sinst_cnt", "sinst_list"))
1968 if inst_fields & frozenset(self.op.output_fields):
1969 instancelist = self.cfg.GetInstanceList()
1971 for instance_name in instancelist:
1972 inst = self.cfg.GetInstanceInfo(instance_name)
1973 if inst.primary_node in node_to_primary:
1974 node_to_primary[inst.primary_node].add(inst.name)
1975 for secnode in inst.secondary_nodes:
1976 if secnode in node_to_secondary:
1977 node_to_secondary[secnode].add(inst.name)
1979 master_node = self.cfg.GetMasterNode()
1981 # end data gathering
1984 for node in nodelist:
1986 for field in self.op.output_fields:
1989 elif field == "pinst_list":
1990 val = list(node_to_primary[node.name])
1991 elif field == "sinst_list":
1992 val = list(node_to_secondary[node.name])
1993 elif field == "pinst_cnt":
1994 val = len(node_to_primary[node.name])
1995 elif field == "sinst_cnt":
1996 val = len(node_to_secondary[node.name])
1997 elif field == "pip":
1998 val = node.primary_ip
1999 elif field == "sip":
2000 val = node.secondary_ip
2001 elif field == "tags":
2002 val = list(node.GetTags())
2003 elif field == "serial_no":
2004 val = node.serial_no
2005 elif field == "master_candidate":
2006 val = node.master_candidate
2007 elif field == "master":
2008 val = node.name == master_node
2009 elif field == "offline":
2011 elif field == "drained":
2013 elif self._FIELDS_DYNAMIC.Matches(field):
2014 val = live_data[node.name].get(field, None)
2016 raise errors.ParameterError(field)
2017 node_output.append(val)
2018 output.append(node_output)
2023 class LUQueryNodeVolumes(NoHooksLU):
2024 """Logical unit for getting volumes on node(s).
2027 _OP_REQP = ["nodes", "output_fields"]
2029 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2030 _FIELDS_STATIC = utils.FieldSet("node")
2032 def ExpandNames(self):
2033 _CheckOutputFields(static=self._FIELDS_STATIC,
2034 dynamic=self._FIELDS_DYNAMIC,
2035 selected=self.op.output_fields)
2037 self.needed_locks = {}
2038 self.share_locks[locking.LEVEL_NODE] = 1
2039 if not self.op.nodes:
2040 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2042 self.needed_locks[locking.LEVEL_NODE] = \
2043 _GetWantedNodes(self, self.op.nodes)
2045 def CheckPrereq(self):
2046 """Check prerequisites.
2048 This checks that the fields required are valid output fields.
2051 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2053 def Exec(self, feedback_fn):
2054 """Computes the list of nodes and their attributes.
2057 nodenames = self.nodes
2058 volumes = self.rpc.call_node_volumes(nodenames)
2060 ilist = [self.cfg.GetInstanceInfo(iname) for iname
2061 in self.cfg.GetInstanceList()]
2063 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2066 for node in nodenames:
2067 if node not in volumes or volumes[node].failed or not volumes[node].data:
2070 node_vols = volumes[node].data[:]
2071 node_vols.sort(key=lambda vol: vol['dev'])
2073 for vol in node_vols:
2075 for field in self.op.output_fields:
2078 elif field == "phys":
2082 elif field == "name":
2084 elif field == "size":
2085 val = int(float(vol['size']))
2086 elif field == "instance":
2088 if node not in lv_by_node[inst]:
2090 if vol['name'] in lv_by_node[inst][node]:
2096 raise errors.ParameterError(field)
2097 node_output.append(str(val))
2099 output.append(node_output)
2104 class LUAddNode(LogicalUnit):
2105 """Logical unit for adding node to the cluster.
2109 HTYPE = constants.HTYPE_NODE
2110 _OP_REQP = ["node_name"]
2112 def BuildHooksEnv(self):
2115 This will run on all nodes before, and on all nodes + the new node after.
2119 "OP_TARGET": self.op.node_name,
2120 "NODE_NAME": self.op.node_name,
2121 "NODE_PIP": self.op.primary_ip,
2122 "NODE_SIP": self.op.secondary_ip,
2124 nodes_0 = self.cfg.GetNodeList()
2125 nodes_1 = nodes_0 + [self.op.node_name, ]
2126 return env, nodes_0, nodes_1
2128 def CheckPrereq(self):
2129 """Check prerequisites.
2132 - the new node is not already in the config
2134 - its parameters (single/dual homed) matches the cluster
2136 Any errors are signalled by raising errors.OpPrereqError.
2139 node_name = self.op.node_name
2142 dns_data = utils.HostInfo(node_name)
2144 node = dns_data.name
2145 primary_ip = self.op.primary_ip = dns_data.ip
2146 secondary_ip = getattr(self.op, "secondary_ip", None)
2147 if secondary_ip is None:
2148 secondary_ip = primary_ip
2149 if not utils.IsValidIP(secondary_ip):
2150 raise errors.OpPrereqError("Invalid secondary IP given")
2151 self.op.secondary_ip = secondary_ip
2153 node_list = cfg.GetNodeList()
2154 if not self.op.readd and node in node_list:
2155 raise errors.OpPrereqError("Node %s is already in the configuration" %
2157 elif self.op.readd and node not in node_list:
2158 raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2160 for existing_node_name in node_list:
2161 existing_node = cfg.GetNodeInfo(existing_node_name)
2163 if self.op.readd and node == existing_node_name:
2164 if (existing_node.primary_ip != primary_ip or
2165 existing_node.secondary_ip != secondary_ip):
2166 raise errors.OpPrereqError("Readded node doesn't have the same IP"
2167 " address configuration as before")
2170 if (existing_node.primary_ip == primary_ip or
2171 existing_node.secondary_ip == primary_ip or
2172 existing_node.primary_ip == secondary_ip or
2173 existing_node.secondary_ip == secondary_ip):
2174 raise errors.OpPrereqError("New node ip address(es) conflict with"
2175 " existing node %s" % existing_node.name)
2177 # check that the type of the node (single versus dual homed) is the
2178 # same as for the master
2179 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2180 master_singlehomed = myself.secondary_ip == myself.primary_ip
2181 newbie_singlehomed = secondary_ip == primary_ip
2182 if master_singlehomed != newbie_singlehomed:
2183 if master_singlehomed:
2184 raise errors.OpPrereqError("The master has no private ip but the"
2185 " new node has one")
2187 raise errors.OpPrereqError("The master has a private ip but the"
2188 " new node doesn't have one")
2190 # checks reachablity
2191 if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2192 raise errors.OpPrereqError("Node not reachable by ping")
2194 if not newbie_singlehomed:
2195 # check reachability from my secondary ip to newbie's secondary ip
2196 if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2197 source=myself.secondary_ip):
2198 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2199 " based ping to noded port")
2201 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2202 mc_now, _ = self.cfg.GetMasterCandidateStats()
2203 master_candidate = mc_now < cp_size
2205 self.new_node = objects.Node(name=node,
2206 primary_ip=primary_ip,
2207 secondary_ip=secondary_ip,
2208 master_candidate=master_candidate,
2209 offline=False, drained=False)
2211 def Exec(self, feedback_fn):
2212 """Adds the new node to the cluster.
2215 new_node = self.new_node
2216 node = new_node.name
2218 # check connectivity
2219 result = self.rpc.call_version([node])[node]
2222 if constants.PROTOCOL_VERSION == result.data:
2223 logging.info("Communication to node %s fine, sw version %s match",
2226 raise errors.OpExecError("Version mismatch master version %s,"
2227 " node version %s" %
2228 (constants.PROTOCOL_VERSION, result.data))
2230 raise errors.OpExecError("Cannot get version from the new node")
2233 logging.info("Copy ssh key to node %s", node)
2234 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2236 keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2237 constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2243 keyarray.append(f.read())
2247 result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2249 keyarray[3], keyarray[4], keyarray[5])
2251 msg = result.RemoteFailMsg()
2253 raise errors.OpExecError("Cannot transfer ssh keys to the"
2254 " new node: %s" % msg)
2256 # Add node to our /etc/hosts, and add key to known_hosts
2257 utils.AddHostToEtcHosts(new_node.name)
2259 if new_node.secondary_ip != new_node.primary_ip:
2260 result = self.rpc.call_node_has_ip_address(new_node.name,
2261 new_node.secondary_ip)
2262 if result.failed or not result.data:
2263 raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2264 " you gave (%s). Please fix and re-run this"
2265 " command." % new_node.secondary_ip)
2267 node_verify_list = [self.cfg.GetMasterNode()]
2268 node_verify_param = {
2270 # TODO: do a node-net-test as well?
2273 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2274 self.cfg.GetClusterName())
2275 for verifier in node_verify_list:
2276 if result[verifier].failed or not result[verifier].data:
2277 raise errors.OpExecError("Cannot communicate with %s's node daemon"
2278 " for remote verification" % verifier)
2279 if result[verifier].data['nodelist']:
2280 for failed in result[verifier].data['nodelist']:
2281 feedback_fn("ssh/hostname verification failed %s -> %s" %
2282 (verifier, result[verifier].data['nodelist'][failed]))
2283 raise errors.OpExecError("ssh/hostname verification failed.")
2286 _RedistributeAncillaryFiles(self)
2287 self.context.ReaddNode(new_node)
2289 _RedistributeAncillaryFiles(self, additional_nodes=node)
2290 self.context.AddNode(new_node)
2293 class LUSetNodeParams(LogicalUnit):
2294 """Modifies the parameters of a node.
2297 HPATH = "node-modify"
2298 HTYPE = constants.HTYPE_NODE
2299 _OP_REQP = ["node_name"]
2302 def CheckArguments(self):
2303 node_name = self.cfg.ExpandNodeName(self.op.node_name)
2304 if node_name is None:
2305 raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2306 self.op.node_name = node_name
2307 _CheckBooleanOpField(self.op, 'master_candidate')
2308 _CheckBooleanOpField(self.op, 'offline')
2309 _CheckBooleanOpField(self.op, 'drained')
2310 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2311 if all_mods.count(None) == 3:
2312 raise errors.OpPrereqError("Please pass at least one modification")
2313 if all_mods.count(True) > 1:
2314 raise errors.OpPrereqError("Can't set the node into more than one"
2315 " state at the same time")
2317 def ExpandNames(self):
2318 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2320 def BuildHooksEnv(self):
2323 This runs on the master node.
2327 "OP_TARGET": self.op.node_name,
2328 "MASTER_CANDIDATE": str(self.op.master_candidate),
2329 "OFFLINE": str(self.op.offline),
2330 "DRAINED": str(self.op.drained),
2332 nl = [self.cfg.GetMasterNode(),
2336 def CheckPrereq(self):
2337 """Check prerequisites.
2339 This only checks the instance list against the existing names.
2342 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2344 if ((self.op.master_candidate == False or self.op.offline == True or
2345 self.op.drained == True) and node.master_candidate):
2346 # we will demote the node from master_candidate
2347 if self.op.node_name == self.cfg.GetMasterNode():
2348 raise errors.OpPrereqError("The master node has to be a"
2349 " master candidate, online and not drained")
2350 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2351 num_candidates, _ = self.cfg.GetMasterCandidateStats()
2352 if num_candidates <= cp_size:
2353 msg = ("Not enough master candidates (desired"
2354 " %d, new value will be %d)" % (cp_size, num_candidates-1))
2356 self.LogWarning(msg)
2358 raise errors.OpPrereqError(msg)
2360 if (self.op.master_candidate == True and
2361 ((node.offline and not self.op.offline == False) or
2362 (node.drained and not self.op.drained == False))):
2363 raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2364 " to master_candidate" % node.name)
2368 def Exec(self, feedback_fn):
2377 if self.op.offline is not None:
2378 node.offline = self.op.offline
2379 result.append(("offline", str(self.op.offline)))
2380 if self.op.offline == True:
2381 if node.master_candidate:
2382 node.master_candidate = False
2384 result.append(("master_candidate", "auto-demotion due to offline"))
2386 node.drained = False
2387 result.append(("drained", "clear drained status due to offline"))
2389 if self.op.master_candidate is not None:
2390 node.master_candidate = self.op.master_candidate
2392 result.append(("master_candidate", str(self.op.master_candidate)))
2393 if self.op.master_candidate == False:
2394 rrc = self.rpc.call_node_demote_from_mc(node.name)
2395 msg = rrc.RemoteFailMsg()
2397 self.LogWarning("Node failed to demote itself: %s" % msg)
2399 if self.op.drained is not None:
2400 node.drained = self.op.drained
2401 result.append(("drained", str(self.op.drained)))
2402 if self.op.drained == True:
2403 if node.master_candidate:
2404 node.master_candidate = False
2406 result.append(("master_candidate", "auto-demotion due to drain"))
2408 node.offline = False
2409 result.append(("offline", "clear offline status due to drain"))
2411 # this will trigger configuration file update, if needed
2412 self.cfg.Update(node)
2413 # this will trigger job queue propagation or cleanup
2415 self.context.ReaddNode(node)
2420 class LUQueryClusterInfo(NoHooksLU):
2421 """Query cluster configuration.
2427 def ExpandNames(self):
2428 self.needed_locks = {}
2430 def CheckPrereq(self):
2431 """No prerequsites needed for this LU.
2436 def Exec(self, feedback_fn):
2437 """Return cluster config.
2440 cluster = self.cfg.GetClusterInfo()
2442 "software_version": constants.RELEASE_VERSION,
2443 "protocol_version": constants.PROTOCOL_VERSION,
2444 "config_version": constants.CONFIG_VERSION,
2445 "os_api_version": constants.OS_API_VERSION,
2446 "export_version": constants.EXPORT_VERSION,
2447 "architecture": (platform.architecture()[0], platform.machine()),
2448 "name": cluster.cluster_name,
2449 "master": cluster.master_node,
2450 "default_hypervisor": cluster.default_hypervisor,
2451 "enabled_hypervisors": cluster.enabled_hypervisors,
2452 "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2453 for hypervisor in cluster.enabled_hypervisors]),
2454 "beparams": cluster.beparams,
2455 "candidate_pool_size": cluster.candidate_pool_size,
2456 "default_bridge": cluster.default_bridge,
2457 "master_netdev": cluster.master_netdev,
2458 "volume_group_name": cluster.volume_group_name,
2459 "file_storage_dir": cluster.file_storage_dir,
2465 class LUQueryConfigValues(NoHooksLU):
2466 """Return configuration values.
2471 _FIELDS_DYNAMIC = utils.FieldSet()
2472 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2474 def ExpandNames(self):
2475 self.needed_locks = {}
2477 _CheckOutputFields(static=self._FIELDS_STATIC,
2478 dynamic=self._FIELDS_DYNAMIC,
2479 selected=self.op.output_fields)
2481 def CheckPrereq(self):
2482 """No prerequisites.
2487 def Exec(self, feedback_fn):
2488 """Dump a representation of the cluster config to the standard output.
2492 for field in self.op.output_fields:
2493 if field == "cluster_name":
2494 entry = self.cfg.GetClusterName()
2495 elif field == "master_node":
2496 entry = self.cfg.GetMasterNode()
2497 elif field == "drain_flag":
2498 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2500 raise errors.ParameterError(field)
2501 values.append(entry)
2505 class LUActivateInstanceDisks(NoHooksLU):
2506 """Bring up an instance's disks.
2509 _OP_REQP = ["instance_name"]
2512 def ExpandNames(self):
2513 self._ExpandAndLockInstance()
2514 self.needed_locks[locking.LEVEL_NODE] = []
2515 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2517 def DeclareLocks(self, level):
2518 if level == locking.LEVEL_NODE:
2519 self._LockInstancesNodes()
2521 def CheckPrereq(self):
2522 """Check prerequisites.
2524 This checks that the instance is in the cluster.
2527 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2528 assert self.instance is not None, \
2529 "Cannot retrieve locked instance %s" % self.op.instance_name
2530 _CheckNodeOnline(self, self.instance.primary_node)
2532 def Exec(self, feedback_fn):
2533 """Activate the disks.
2536 disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2538 raise errors.OpExecError("Cannot activate block devices")
2543 def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2544 """Prepare the block devices for an instance.
2546 This sets up the block devices on all nodes.
2548 @type lu: L{LogicalUnit}
2549 @param lu: the logical unit on whose behalf we execute
2550 @type instance: L{objects.Instance}
2551 @param instance: the instance for whose disks we assemble
2552 @type ignore_secondaries: boolean
2553 @param ignore_secondaries: if true, errors on secondary nodes
2554 won't result in an error return from the function
2555 @return: False if the operation failed, otherwise a list of
2556 (host, instance_visible_name, node_visible_name)
2557 with the mapping from node devices to instance devices
2562 iname = instance.name
2563 # With the two passes mechanism we try to reduce the window of
2564 # opportunity for the race condition of switching DRBD to primary
2565 # before handshaking occured, but we do not eliminate it
2567 # The proper fix would be to wait (with some limits) until the
2568 # connection has been made and drbd transitions from WFConnection
2569 # into any other network-connected state (Connected, SyncTarget,
2572 # 1st pass, assemble on all nodes in secondary mode
2573 for inst_disk in instance.disks:
2574 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2575 lu.cfg.SetDiskID(node_disk, node)
2576 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2577 msg = result.RemoteFailMsg()
2579 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2580 " (is_primary=False, pass=1): %s",
2581 inst_disk.iv_name, node, msg)
2582 if not ignore_secondaries:
2585 # FIXME: race condition on drbd migration to primary
2587 # 2nd pass, do only the primary node
2588 for inst_disk in instance.disks:
2589 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2590 if node != instance.primary_node:
2592 lu.cfg.SetDiskID(node_disk, node)
2593 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2594 msg = result.RemoteFailMsg()
2596 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2597 " (is_primary=True, pass=2): %s",
2598 inst_disk.iv_name, node, msg)
2600 device_info.append((instance.primary_node, inst_disk.iv_name,
2603 # leave the disks configured for the primary node
2604 # this is a workaround that would be fixed better by
2605 # improving the logical/physical id handling
2606 for disk in instance.disks:
2607 lu.cfg.SetDiskID(disk, instance.primary_node)
2609 return disks_ok, device_info
2612 def _StartInstanceDisks(lu, instance, force):
2613 """Start the disks of an instance.
2616 disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2617 ignore_secondaries=force)
2619 _ShutdownInstanceDisks(lu, instance)
2620 if force is not None and not force:
2621 lu.proc.LogWarning("", hint="If the message above refers to a"
2623 " you can retry the operation using '--force'.")
2624 raise errors.OpExecError("Disk consistency error")
2627 class LUDeactivateInstanceDisks(NoHooksLU):
2628 """Shutdown an instance's disks.
2631 _OP_REQP = ["instance_name"]
2634 def ExpandNames(self):
2635 self._ExpandAndLockInstance()
2636 self.needed_locks[locking.LEVEL_NODE] = []
2637 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2639 def DeclareLocks(self, level):
2640 if level == locking.LEVEL_NODE:
2641 self._LockInstancesNodes()
2643 def CheckPrereq(self):
2644 """Check prerequisites.
2646 This checks that the instance is in the cluster.
2649 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2650 assert self.instance is not None, \
2651 "Cannot retrieve locked instance %s" % self.op.instance_name
2653 def Exec(self, feedback_fn):
2654 """Deactivate the disks
2657 instance = self.instance
2658 _SafeShutdownInstanceDisks(self, instance)
2661 def _SafeShutdownInstanceDisks(lu, instance):
2662 """Shutdown block devices of an instance.
2664 This function checks if an instance is running, before calling
2665 _ShutdownInstanceDisks.
2668 ins_l = lu.rpc.call_instance_list([instance.primary_node],
2669 [instance.hypervisor])
2670 ins_l = ins_l[instance.primary_node]
2671 if ins_l.failed or not isinstance(ins_l.data, list):
2672 raise errors.OpExecError("Can't contact node '%s'" %
2673 instance.primary_node)
2675 if instance.name in ins_l.data:
2676 raise errors.OpExecError("Instance is running, can't shutdown"
2679 _ShutdownInstanceDisks(lu, instance)
2682 def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2683 """Shutdown block devices of an instance.
2685 This does the shutdown on all nodes of the instance.
2687 If the ignore_primary is false, errors on the primary node are
2692 for disk in instance.disks:
2693 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2694 lu.cfg.SetDiskID(top_disk, node)
2695 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2696 msg = result.RemoteFailMsg()
2698 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2699 disk.iv_name, node, msg)
2700 if not ignore_primary or node != instance.primary_node:
2705 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2706 """Checks if a node has enough free memory.
2708 This function check if a given node has the needed amount of free
2709 memory. In case the node has less memory or we cannot get the
2710 information from the node, this function raise an OpPrereqError
2713 @type lu: C{LogicalUnit}
2714 @param lu: a logical unit from which we get configuration data
2716 @param node: the node to check
2717 @type reason: C{str}
2718 @param reason: string to use in the error message
2719 @type requested: C{int}
2720 @param requested: the amount of memory in MiB to check for
2721 @type hypervisor_name: C{str}
2722 @param hypervisor_name: the hypervisor to ask for memory stats
2723 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2724 we cannot check the node
2727 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2728 nodeinfo[node].Raise()
2729 free_mem = nodeinfo[node].data.get('memory_free')
2730 if not isinstance(free_mem, int):
2731 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2732 " was '%s'" % (node, free_mem))
2733 if requested > free_mem:
2734 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2735 " needed %s MiB, available %s MiB" %
2736 (node, reason, requested, free_mem))
2739 class LUStartupInstance(LogicalUnit):
2740 """Starts an instance.
2743 HPATH = "instance-start"
2744 HTYPE = constants.HTYPE_INSTANCE
2745 _OP_REQP = ["instance_name", "force"]
2748 def ExpandNames(self):
2749 self._ExpandAndLockInstance()
2751 def BuildHooksEnv(self):
2754 This runs on master, primary and secondary nodes of the instance.
2758 "FORCE": self.op.force,
2760 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2761 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2764 def CheckPrereq(self):
2765 """Check prerequisites.
2767 This checks that the instance is in the cluster.
2770 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2771 assert self.instance is not None, \
2772 "Cannot retrieve locked instance %s" % self.op.instance_name
2775 self.beparams = getattr(self.op, "beparams", {})
2777 if not isinstance(self.beparams, dict):
2778 raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2779 " dict" % (type(self.beparams), ))
2780 # fill the beparams dict
2781 utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2782 self.op.beparams = self.beparams
2785 self.hvparams = getattr(self.op, "hvparams", {})
2787 if not isinstance(self.hvparams, dict):
2788 raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2789 " dict" % (type(self.hvparams), ))
2791 # check hypervisor parameter syntax (locally)
2792 cluster = self.cfg.GetClusterInfo()
2793 utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2794 filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
2796 filled_hvp.update(self.hvparams)
2797 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2798 hv_type.CheckParameterSyntax(filled_hvp)
2799 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2800 self.op.hvparams = self.hvparams
2802 _CheckNodeOnline(self, instance.primary_node)
2804 bep = self.cfg.GetClusterInfo().FillBE(instance)
2805 # check bridges existance
2806 _CheckInstanceBridgesExist(self, instance)
2808 remote_info = self.rpc.call_instance_info(instance.primary_node,
2810 instance.hypervisor)
2812 if not remote_info.data:
2813 _CheckNodeFreeMemory(self, instance.primary_node,
2814 "starting instance %s" % instance.name,
2815 bep[constants.BE_MEMORY], instance.hypervisor)
2817 def Exec(self, feedback_fn):
2818 """Start the instance.
2821 instance = self.instance
2822 force = self.op.force
2824 self.cfg.MarkInstanceUp(instance.name)
2826 node_current = instance.primary_node
2828 _StartInstanceDisks(self, instance, force)
2830 result = self.rpc.call_instance_start(node_current, instance,
2831 self.hvparams, self.beparams)
2832 msg = result.RemoteFailMsg()
2834 _ShutdownInstanceDisks(self, instance)
2835 raise errors.OpExecError("Could not start instance: %s" % msg)
2838 class LURebootInstance(LogicalUnit):
2839 """Reboot an instance.
2842 HPATH = "instance-reboot"
2843 HTYPE = constants.HTYPE_INSTANCE
2844 _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2847 def ExpandNames(self):
2848 if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2849 constants.INSTANCE_REBOOT_HARD,
2850 constants.INSTANCE_REBOOT_FULL]:
2851 raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2852 (constants.INSTANCE_REBOOT_SOFT,
2853 constants.INSTANCE_REBOOT_HARD,
2854 constants.INSTANCE_REBOOT_FULL))
2855 self._ExpandAndLockInstance()
2857 def BuildHooksEnv(self):
2860 This runs on master, primary and secondary nodes of the instance.
2864 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2865 "REBOOT_TYPE": self.op.reboot_type,
2867 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2868 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2871 def CheckPrereq(self):
2872 """Check prerequisites.
2874 This checks that the instance is in the cluster.
2877 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2878 assert self.instance is not None, \
2879 "Cannot retrieve locked instance %s" % self.op.instance_name
2881 _CheckNodeOnline(self, instance.primary_node)
2883 # check bridges existance
2884 _CheckInstanceBridgesExist(self, instance)
2886 def Exec(self, feedback_fn):
2887 """Reboot the instance.
2890 instance = self.instance
2891 ignore_secondaries = self.op.ignore_secondaries
2892 reboot_type = self.op.reboot_type
2894 node_current = instance.primary_node
2896 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2897 constants.INSTANCE_REBOOT_HARD]:
2898 for disk in instance.disks:
2899 self.cfg.SetDiskID(disk, node_current)
2900 result = self.rpc.call_instance_reboot(node_current, instance,
2902 msg = result.RemoteFailMsg()
2904 raise errors.OpExecError("Could not reboot instance: %s" % msg)
2906 result = self.rpc.call_instance_shutdown(node_current, instance)
2907 msg = result.RemoteFailMsg()
2909 raise errors.OpExecError("Could not shutdown instance for"
2910 " full reboot: %s" % msg)
2911 _ShutdownInstanceDisks(self, instance)
2912 _StartInstanceDisks(self, instance, ignore_secondaries)
2913 result = self.rpc.call_instance_start(node_current, instance, None, None)
2914 msg = result.RemoteFailMsg()
2916 _ShutdownInstanceDisks(self, instance)
2917 raise errors.OpExecError("Could not start instance for"
2918 " full reboot: %s" % msg)
2920 self.cfg.MarkInstanceUp(instance.name)
2923 class LUShutdownInstance(LogicalUnit):
2924 """Shutdown an instance.
2927 HPATH = "instance-stop"
2928 HTYPE = constants.HTYPE_INSTANCE
2929 _OP_REQP = ["instance_name"]
2932 def ExpandNames(self):
2933 self._ExpandAndLockInstance()
2935 def BuildHooksEnv(self):
2938 This runs on master, primary and secondary nodes of the instance.
2941 env = _BuildInstanceHookEnvByObject(self, self.instance)
2942 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2945 def CheckPrereq(self):
2946 """Check prerequisites.
2948 This checks that the instance is in the cluster.
2951 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2952 assert self.instance is not None, \
2953 "Cannot retrieve locked instance %s" % self.op.instance_name
2954 _CheckNodeOnline(self, self.instance.primary_node)
2956 def Exec(self, feedback_fn):
2957 """Shutdown the instance.
2960 instance = self.instance
2961 node_current = instance.primary_node
2962 self.cfg.MarkInstanceDown(instance.name)
2963 result = self.rpc.call_instance_shutdown(node_current, instance)
2964 msg = result.RemoteFailMsg()
2966 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
2968 _ShutdownInstanceDisks(self, instance)
2971 class LUReinstallInstance(LogicalUnit):
2972 """Reinstall an instance.
2975 HPATH = "instance-reinstall"
2976 HTYPE = constants.HTYPE_INSTANCE
2977 _OP_REQP = ["instance_name"]
2980 def ExpandNames(self):
2981 self._ExpandAndLockInstance()
2983 def BuildHooksEnv(self):
2986 This runs on master, primary and secondary nodes of the instance.
2989 env = _BuildInstanceHookEnvByObject(self, self.instance)
2990 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2993 def CheckPrereq(self):
2994 """Check prerequisites.
2996 This checks that the instance is in the cluster and is not running.
2999 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3000 assert instance is not None, \
3001 "Cannot retrieve locked instance %s" % self.op.instance_name
3002 _CheckNodeOnline(self, instance.primary_node)
3004 if instance.disk_template == constants.DT_DISKLESS:
3005 raise errors.OpPrereqError("Instance '%s' has no disks" %
3006 self.op.instance_name)
3007 if instance.admin_up:
3008 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3009 self.op.instance_name)
3010 remote_info = self.rpc.call_instance_info(instance.primary_node,
3012 instance.hypervisor)
3014 if remote_info.data:
3015 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3016 (self.op.instance_name,
3017 instance.primary_node))
3019 self.op.os_type = getattr(self.op, "os_type", None)
3020 if self.op.os_type is not None:
3022 pnode = self.cfg.GetNodeInfo(
3023 self.cfg.ExpandNodeName(instance.primary_node))
3025 raise errors.OpPrereqError("Primary node '%s' is unknown" %
3027 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3029 if not isinstance(result.data, objects.OS):
3030 raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3031 " primary node" % self.op.os_type)
3033 self.instance = instance
3035 def Exec(self, feedback_fn):
3036 """Reinstall the instance.
3039 inst = self.instance
3041 if self.op.os_type is not None:
3042 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3043 inst.os = self.op.os_type
3044 self.cfg.Update(inst)
3046 _StartInstanceDisks(self, inst, None)
3048 feedback_fn("Running the instance OS create scripts...")
3049 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3050 msg = result.RemoteFailMsg()
3052 raise errors.OpExecError("Could not install OS for instance %s"
3054 (inst.name, inst.primary_node, msg))
3056 _ShutdownInstanceDisks(self, inst)
3059 class LURenameInstance(LogicalUnit):
3060 """Rename an instance.
3063 HPATH = "instance-rename"
3064 HTYPE = constants.HTYPE_INSTANCE
3065 _OP_REQP = ["instance_name", "new_name"]
3067 def BuildHooksEnv(self):
3070 This runs on master, primary and secondary nodes of the instance.
3073 env = _BuildInstanceHookEnvByObject(self, self.instance)
3074 env["INSTANCE_NEW_NAME"] = self.op.new_name
3075 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3078 def CheckPrereq(self):
3079 """Check prerequisites.
3081 This checks that the instance is in the cluster and is not running.
3084 instance = self.cfg.GetInstanceInfo(
3085 self.cfg.ExpandInstanceName(self.op.instance_name))
3086 if instance is None:
3087 raise errors.OpPrereqError("Instance '%s' not known" %
3088 self.op.instance_name)
3089 _CheckNodeOnline(self, instance.primary_node)
3091 if instance.admin_up:
3092 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3093 self.op.instance_name)
3094 remote_info = self.rpc.call_instance_info(instance.primary_node,
3096 instance.hypervisor)
3098 if remote_info.data:
3099 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3100 (self.op.instance_name,
3101 instance.primary_node))
3102 self.instance = instance
3104 # new name verification
3105 name_info = utils.HostInfo(self.op.new_name)
3107 self.op.new_name = new_name = name_info.name
3108 instance_list = self.cfg.GetInstanceList()
3109 if new_name in instance_list:
3110 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3113 if not getattr(self.op, "ignore_ip", False):
3114 if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3115 raise errors.OpPrereqError("IP %s of instance %s already in use" %
3116 (name_info.ip, new_name))
3119 def Exec(self, feedback_fn):
3120 """Reinstall the instance.
3123 inst = self.instance
3124 old_name = inst.name
3126 if inst.disk_template == constants.DT_FILE:
3127 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3129 self.cfg.RenameInstance(inst.name, self.op.new_name)
3130 # Change the instance lock. This is definitely safe while we hold the BGL
3131 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3132 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3134 # re-read the instance from the configuration after rename
3135 inst = self.cfg.GetInstanceInfo(self.op.new_name)
3137 if inst.disk_template == constants.DT_FILE:
3138 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3139 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3140 old_file_storage_dir,
3141 new_file_storage_dir)
3144 raise errors.OpExecError("Could not connect to node '%s' to rename"
3145 " directory '%s' to '%s' (but the instance"
3146 " has been renamed in Ganeti)" % (
3147 inst.primary_node, old_file_storage_dir,
3148 new_file_storage_dir))
3150 if not result.data[0]:
3151 raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3152 " (but the instance has been renamed in"
3153 " Ganeti)" % (old_file_storage_dir,
3154 new_file_storage_dir))
3156 _StartInstanceDisks(self, inst, None)
3158 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3160 msg = result.RemoteFailMsg()
3162 msg = ("Could not run OS rename script for instance %s on node %s"
3163 " (but the instance has been renamed in Ganeti): %s" %
3164 (inst.name, inst.primary_node, msg))
3165 self.proc.LogWarning(msg)
3167 _ShutdownInstanceDisks(self, inst)
3170 class LURemoveInstance(LogicalUnit):
3171 """Remove an instance.
3174 HPATH = "instance-remove"
3175 HTYPE = constants.HTYPE_INSTANCE
3176 _OP_REQP = ["instance_name", "ignore_failures"]
3179 def ExpandNames(self):
3180 self._ExpandAndLockInstance()
3181 self.needed_locks[locking.LEVEL_NODE] = []
3182 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3184 def DeclareLocks(self, level):
3185 if level == locking.LEVEL_NODE:
3186 self._LockInstancesNodes()
3188 def BuildHooksEnv(self):
3191 This runs on master, primary and secondary nodes of the instance.
3194 env = _BuildInstanceHookEnvByObject(self, self.instance)
3195 nl = [self.cfg.GetMasterNode()]
3198 def CheckPrereq(self):
3199 """Check prerequisites.
3201 This checks that the instance is in the cluster.
3204 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3205 assert self.instance is not None, \
3206 "Cannot retrieve locked instance %s" % self.op.instance_name
3208 def Exec(self, feedback_fn):
3209 """Remove the instance.
3212 instance = self.instance
3213 logging.info("Shutting down instance %s on node %s",
3214 instance.name, instance.primary_node)
3216 result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3217 msg = result.RemoteFailMsg()
3219 if self.op.ignore_failures:
3220 feedback_fn("Warning: can't shutdown instance: %s" % msg)
3222 raise errors.OpExecError("Could not shutdown instance %s on"
3224 (instance.name, instance.primary_node, msg))
3226 logging.info("Removing block devices for instance %s", instance.name)
3228 if not _RemoveDisks(self, instance):
3229 if self.op.ignore_failures:
3230 feedback_fn("Warning: can't remove instance's disks")
3232 raise errors.OpExecError("Can't remove instance's disks")
3234 logging.info("Removing instance %s out of cluster config", instance.name)
3236 self.cfg.RemoveInstance(instance.name)
3237 self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3240 class LUQueryInstances(NoHooksLU):
3241 """Logical unit for querying instances.
3244 _OP_REQP = ["output_fields", "names", "use_locking"]
3246 _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3248 "disk_template", "ip", "mac", "bridge",
3249 "sda_size", "sdb_size", "vcpus", "tags",
3250 "network_port", "beparams",
3251 r"(disk)\.(size)/([0-9]+)",
3252 r"(disk)\.(sizes)", "disk_usage",
3253 r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3254 r"(nic)\.(macs|ips|bridges)",
3255 r"(disk|nic)\.(count)",
3256 "serial_no", "hypervisor", "hvparams",] +
3258 for name in constants.HVS_PARAMETERS] +
3260 for name in constants.BES_PARAMETERS])
3261 _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3264 def ExpandNames(self):
3265 _CheckOutputFields(static=self._FIELDS_STATIC,
3266 dynamic=self._FIELDS_DYNAMIC,
3267 selected=self.op.output_fields)
3269 self.needed_locks = {}
3270 self.share_locks[locking.LEVEL_INSTANCE] = 1
3271 self.share_locks[locking.LEVEL_NODE] = 1
3274 self.wanted = _GetWantedInstances(self, self.op.names)
3276 self.wanted = locking.ALL_SET
3278 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3279 self.do_locking = self.do_node_query and self.op.use_locking
3281 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3282 self.needed_locks[locking.LEVEL_NODE] = []
3283 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3285 def DeclareLocks(self, level):
3286 if level == locking.LEVEL_NODE and self.do_locking:
3287 self._LockInstancesNodes()
3289 def CheckPrereq(self):
3290 """Check prerequisites.
3295 def Exec(self, feedback_fn):
3296 """Computes the list of nodes and their attributes.
3299 all_info = self.cfg.GetAllInstancesInfo()
3300 if self.wanted == locking.ALL_SET:
3301 # caller didn't specify instance names, so ordering is not important
3303 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3305 instance_names = all_info.keys()
3306 instance_names = utils.NiceSort(instance_names)
3308 # caller did specify names, so we must keep the ordering
3310 tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3312 tgt_set = all_info.keys()
3313 missing = set(self.wanted).difference(tgt_set)
3315 raise errors.OpExecError("Some instances were removed before"
3316 " retrieving their data: %s" % missing)
3317 instance_names = self.wanted
3319 instance_list = [all_info[iname] for iname in instance_names]
3321 # begin data gathering
3323 nodes = frozenset([inst.primary_node for inst in instance_list])
3324 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3328 if self.do_node_query:
3330 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3332 result = node_data[name]
3334 # offline nodes will be in both lists
3335 off_nodes.append(name)
3337 bad_nodes.append(name)
3340 live_data.update(result.data)
3341 # else no instance is alive
3343 live_data = dict([(name, {}) for name in instance_names])
3345 # end data gathering
3350 for instance in instance_list:
3352 i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3353 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3354 for field in self.op.output_fields:
3355 st_match = self._FIELDS_STATIC.Matches(field)
3360 elif field == "pnode":
3361 val = instance.primary_node
3362 elif field == "snodes":
3363 val = list(instance.secondary_nodes)
3364 elif field == "admin_state":
3365 val = instance.admin_up
3366 elif field == "oper_state":
3367 if instance.primary_node in bad_nodes:
3370 val = bool(live_data.get(instance.name))
3371 elif field == "status":
3372 if instance.primary_node in off_nodes:
3373 val = "ERROR_nodeoffline"
3374 elif instance.primary_node in bad_nodes:
3375 val = "ERROR_nodedown"
3377 running = bool(live_data.get(instance.name))
3379 if instance.admin_up:
3384 if instance.admin_up:
3388 elif field == "oper_ram":
3389 if instance.primary_node in bad_nodes:
3391 elif instance.name in live_data:
3392 val = live_data[instance.name].get("memory", "?")
3395 elif field == "disk_template":
3396 val = instance.disk_template
3398 val = instance.nics[0].ip
3399 elif field == "bridge":
3400 val = instance.nics[0].bridge
3401 elif field == "mac":
3402 val = instance.nics[0].mac
3403 elif field == "sda_size" or field == "sdb_size":
3404 idx = ord(field[2]) - ord('a')
3406 val = instance.FindDisk(idx).size
3407 except errors.OpPrereqError:
3409 elif field == "disk_usage": # total disk usage per node
3410 disk_sizes = [{'size': disk.size} for disk in instance.disks]
3411 val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3412 elif field == "tags":
3413 val = list(instance.GetTags())
3414 elif field == "serial_no":
3415 val = instance.serial_no
3416 elif field == "network_port":
3417 val = instance.network_port
3418 elif field == "hypervisor":
3419 val = instance.hypervisor
3420 elif field == "hvparams":
3422 elif (field.startswith(HVPREFIX) and
3423 field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3424 val = i_hv.get(field[len(HVPREFIX):], None)
3425 elif field == "beparams":
3427 elif (field.startswith(BEPREFIX) and
3428 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3429 val = i_be.get(field[len(BEPREFIX):], None)
3430 elif st_match and st_match.groups():
3431 # matches a variable list
3432 st_groups = st_match.groups()
3433 if st_groups and st_groups[0] == "disk":
3434 if st_groups[1] == "count":
3435 val = len(instance.disks)
3436 elif st_groups[1] == "sizes":
3437 val = [disk.size for disk in instance.disks]
3438 elif st_groups[1] == "size":
3440 val = instance.FindDisk(st_groups[2]).size
3441 except errors.OpPrereqError:
3444 assert False, "Unhandled disk parameter"
3445 elif st_groups[0] == "nic":
3446 if st_groups[1] == "count":
3447 val = len(instance.nics)
3448 elif st_groups[1] == "macs":
3449 val = [nic.mac for nic in instance.nics]
3450 elif st_groups[1] == "ips":
3451 val = [nic.ip for nic in instance.nics]
3452 elif st_groups[1] == "bridges":
3453 val = [nic.bridge for nic in instance.nics]
3456 nic_idx = int(st_groups[2])
3457 if nic_idx >= len(instance.nics):
3460 if st_groups[1] == "mac":
3461 val = instance.nics[nic_idx].mac
3462 elif st_groups[1] == "ip":
3463 val = instance.nics[nic_idx].ip
3464 elif st_groups[1] == "bridge":
3465 val = instance.nics[nic_idx].bridge
3467 assert False, "Unhandled NIC parameter"
3469 assert False, "Unhandled variable parameter"
3471 raise errors.ParameterError(field)
3478 class LUFailoverInstance(LogicalUnit):
3479 """Failover an instance.
3482 HPATH = "instance-failover"
3483 HTYPE = constants.HTYPE_INSTANCE
3484 _OP_REQP = ["instance_name", "ignore_consistency"]
3487 def ExpandNames(self):
3488 self._ExpandAndLockInstance()
3489 self.needed_locks[locking.LEVEL_NODE] = []
3490 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3492 def DeclareLocks(self, level):
3493 if level == locking.LEVEL_NODE:
3494 self._LockInstancesNodes()
3496 def BuildHooksEnv(self):
3499 This runs on master, primary and secondary nodes of the instance.
3503 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3505 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3506 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3509 def CheckPrereq(self):
3510 """Check prerequisites.
3512 This checks that the instance is in the cluster.
3515 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3516 assert self.instance is not None, \
3517 "Cannot retrieve locked instance %s" % self.op.instance_name
3519 bep = self.cfg.GetClusterInfo().FillBE(instance)
3520 if instance.disk_template not in constants.DTS_NET_MIRROR:
3521 raise errors.OpPrereqError("Instance's disk layout is not"
3522 " network mirrored, cannot failover.")
3524 secondary_nodes = instance.secondary_nodes
3525 if not secondary_nodes:
3526 raise errors.ProgrammerError("no secondary node but using "
3527 "a mirrored disk template")
3529 target_node = secondary_nodes[0]
3530 _CheckNodeOnline(self, target_node)
3531 _CheckNodeNotDrained(self, target_node)
3532 # check memory requirements on the secondary node
3533 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3534 instance.name, bep[constants.BE_MEMORY],
3535 instance.hypervisor)
3537 # check bridge existance
3538 brlist = [nic.bridge for nic in instance.nics]
3539 result = self.rpc.call_bridges_exist(target_node, brlist)
3542 raise errors.OpPrereqError("One or more target bridges %s does not"
3543 " exist on destination node '%s'" %
3544 (brlist, target_node))
3546 def Exec(self, feedback_fn):
3547 """Failover an instance.
3549 The failover is done by shutting it down on its present node and
3550 starting it on the secondary.
3553 instance = self.instance
3555 source_node = instance.primary_node
3556 target_node = instance.secondary_nodes[0]
3558 feedback_fn("* checking disk consistency between source and target")
3559 for dev in instance.disks:
3560 # for drbd, these are drbd over lvm
3561 if not _CheckDiskConsistency(self, dev, target_node, False):
3562 if instance.admin_up and not self.op.ignore_consistency:
3563 raise errors.OpExecError("Disk %s is degraded on target node,"
3564 " aborting failover." % dev.iv_name)
3566 feedback_fn("* shutting down instance on source node")
3567 logging.info("Shutting down instance %s on node %s",
3568 instance.name, source_node)
3570 result = self.rpc.call_instance_shutdown(source_node, instance)
3571 msg = result.RemoteFailMsg()
3573 if self.op.ignore_consistency:
3574 self.proc.LogWarning("Could not shutdown instance %s on node %s."
3575 " Proceeding anyway. Please make sure node"
3576 " %s is down. Error details: %s",
3577 instance.name, source_node, source_node, msg)
3579 raise errors.OpExecError("Could not shutdown instance %s on"
3581 (instance.name, source_node, msg))
3583 feedback_fn("* deactivating the instance's disks on source node")
3584 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3585 raise errors.OpExecError("Can't shut down the instance's disks.")
3587 instance.primary_node = target_node
3588 # distribute new instance config to the other nodes
3589 self.cfg.Update(instance)
3591 # Only start the instance if it's marked as up
3592 if instance.admin_up:
3593 feedback_fn("* activating the instance's disks on target node")
3594 logging.info("Starting instance %s on node %s",
3595 instance.name, target_node)
3597 disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3598 ignore_secondaries=True)
3600 _ShutdownInstanceDisks(self, instance)
3601 raise errors.OpExecError("Can't activate the instance's disks")
3603 feedback_fn("* starting the instance on the target node")
3604 result = self.rpc.call_instance_start(target_node, instance, None, None)
3605 msg = result.RemoteFailMsg()
3607 _ShutdownInstanceDisks(self, instance)
3608 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3609 (instance.name, target_node, msg))
3612 class LUMigrateInstance(LogicalUnit):
3613 """Migrate an instance.
3615 This is migration without shutting down, compared to the failover,
3616 which is done with shutdown.
3619 HPATH = "instance-migrate"
3620 HTYPE = constants.HTYPE_INSTANCE
3621 _OP_REQP = ["instance_name", "live", "cleanup"]
3625 def ExpandNames(self):
3626 self._ExpandAndLockInstance()
3627 self.needed_locks[locking.LEVEL_NODE] = []
3628 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3630 def DeclareLocks(self, level):
3631 if level == locking.LEVEL_NODE:
3632 self._LockInstancesNodes()
3634 def BuildHooksEnv(self):
3637 This runs on master, primary and secondary nodes of the instance.
3640 env = _BuildInstanceHookEnvByObject(self, self.instance)
3641 env["MIGRATE_LIVE"] = self.op.live
3642 env["MIGRATE_CLEANUP"] = self.op.cleanup
3643 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3646 def CheckPrereq(self):
3647 """Check prerequisites.
3649 This checks that the instance is in the cluster.
3652 instance = self.cfg.GetInstanceInfo(
3653 self.cfg.ExpandInstanceName(self.op.instance_name))
3654 if instance is None:
3655 raise errors.OpPrereqError("Instance '%s' not known" %
3656 self.op.instance_name)
3658 if instance.disk_template != constants.DT_DRBD8:
3659 raise errors.OpPrereqError("Instance's disk layout is not"
3660 " drbd8, cannot migrate.")
3662 secondary_nodes = instance.secondary_nodes
3663 if not secondary_nodes:
3664 raise errors.ConfigurationError("No secondary node but using"
3665 " drbd8 disk template")
3667 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3669 target_node = secondary_nodes[0]
3670 # check memory requirements on the secondary node
3671 _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3672 instance.name, i_be[constants.BE_MEMORY],
3673 instance.hypervisor)
3675 # check bridge existance
3676 brlist = [nic.bridge for nic in instance.nics]
3677 result = self.rpc.call_bridges_exist(target_node, brlist)
3678 if result.failed or not result.data:
3679 raise errors.OpPrereqError("One or more target bridges %s does not"
3680 " exist on destination node '%s'" %
3681 (brlist, target_node))
3683 if not self.op.cleanup:
3684 _CheckNodeNotDrained(self, target_node)
3685 result = self.rpc.call_instance_migratable(instance.primary_node,
3687 msg = result.RemoteFailMsg()
3689 raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3692 self.instance = instance
3694 def _WaitUntilSync(self):
3695 """Poll with custom rpc for disk sync.
3697 This uses our own step-based rpc call.
3700 self.feedback_fn("* wait until resync is done")
3704 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3706 self.instance.disks)
3708 for node, nres in result.items():
3709 msg = nres.RemoteFailMsg()
3711 raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3713 node_done, node_percent = nres.payload
3714 all_done = all_done and node_done
3715 if node_percent is not None:
3716 min_percent = min(min_percent, node_percent)
3718 if min_percent < 100:
3719 self.feedback_fn(" - progress: %.1f%%" % min_percent)
3722 def _EnsureSecondary(self, node):
3723 """Demote a node to secondary.
3726 self.feedback_fn("* switching node %s to secondary mode" % node)
3728 for dev in self.instance.disks:
3729 self.cfg.SetDiskID(dev, node)
3731 result = self.rpc.call_blockdev_close(node, self.instance.name,
3732 self.instance.disks)
3733 msg = result.RemoteFailMsg()
3735 raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3736 " error %s" % (node, msg))
3738 def _GoStandalone(self):
3739 """Disconnect from the network.
3742 self.feedback_fn("* changing into standalone mode")
3743 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3744 self.instance.disks)
3745 for node, nres in result.items():
3746 msg = nres.RemoteFailMsg()
3748 raise errors.OpExecError("Cannot disconnect disks node %s,"
3749 " error %s" % (node, msg))
3751 def _GoReconnect(self, multimaster):
3752 """Reconnect to the network.
3758 msg = "single-master"
3759 self.feedback_fn("* changing disks into %s mode" % msg)
3760 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3761 self.instance.disks,
3762 self.instance.name, multimaster)
3763 for node, nres in result.items():
3764 msg = nres.RemoteFailMsg()
3766 raise errors.OpExecError("Cannot change disks config on node %s,"
3767 " error: %s" % (node, msg))
3769 def _ExecCleanup(self):
3770 """Try to cleanup after a failed migration.
3772 The cleanup is done by:
3773 - check that the instance is running only on one node
3774 (and update the config if needed)
3775 - change disks on its secondary node to secondary
3776 - wait until disks are fully synchronized
3777 - disconnect from the network
3778 - change disks into single-master mode
3779 - wait again until disks are fully synchronized
3782 instance = self.instance
3783 target_node = self.target_node
3784 source_node = self.source_node
3786 # check running on only one node
3787 self.feedback_fn("* checking where the instance actually runs"
3788 " (if this hangs, the hypervisor might be in"
3790 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3791 for node, result in ins_l.items():
3793 if not isinstance(result.data, list):
3794 raise errors.OpExecError("Can't contact node '%s'" % node)
3796 runningon_source = instance.name in ins_l[source_node].data
3797 runningon_target = instance.name in ins_l[target_node].data
3799 if runningon_source and runningon_target:
3800 raise errors.OpExecError("Instance seems to be running on two nodes,"
3801 " or the hypervisor is confused. You will have"
3802 " to ensure manually that it runs only on one"
3803 " and restart this operation.")
3805 if not (runningon_source or runningon_target):
3806 raise errors.OpExecError("Instance does not seem to be running at all."
3807 " In this case, it's safer to repair by"
3808 " running 'gnt-instance stop' to ensure disk"
3809 " shutdown, and then restarting it.")
3811 if runningon_target:
3812 # the migration has actually succeeded, we need to update the config
3813 self.feedback_fn("* instance running on secondary node (%s),"
3814 " updating config" % target_node)
3815 instance.primary_node = target_node
3816 self.cfg.Update(instance)
3817 demoted_node = source_node
3819 self.feedback_fn("* instance confirmed to be running on its"
3820 " primary node (%s)" % source_node)
3821 demoted_node = target_node
3823 self._EnsureSecondary(demoted_node)
3825 self._WaitUntilSync()
3826 except errors.OpExecError:
3827 # we ignore here errors, since if the device is standalone, it
3828 # won't be able to sync
3830 self._GoStandalone()
3831 self._GoReconnect(False)
3832 self._WaitUntilSync()
3834 self.feedback_fn("* done")
3836 def _RevertDiskStatus(self):
3837 """Try to revert the disk status after a failed migration.
3840 target_node = self.target_node
3842 self._EnsureSecondary(target_node)
3843 self._GoStandalone()
3844 self._GoReconnect(False)
3845 self._WaitUntilSync()
3846 except errors.OpExecError, err:
3847 self.LogWarning("Migration failed and I can't reconnect the"
3848 " drives: error '%s'\n"
3849 "Please look and recover the instance status" %
3852 def _AbortMigration(self):
3853 """Call the hypervisor code to abort a started migration.
3856 instance = self.instance
3857 target_node = self.target_node
3858 migration_info = self.migration_info
3860 abort_result = self.rpc.call_finalize_migration(target_node,
3864 abort_msg = abort_result.RemoteFailMsg()
3866 logging.error("Aborting migration failed on target node %s: %s" %
3867 (target_node, abort_msg))
3868 # Don't raise an exception here, as we stil have to try to revert the
3869 # disk status, even if this step failed.
3871 def _ExecMigration(self):
3872 """Migrate an instance.
3874 The migrate is done by:
3875 - change the disks into dual-master mode
3876 - wait until disks are fully synchronized again
3877 - migrate the instance
3878 - change disks on the new secondary node (the old primary) to secondary
3879 - wait until disks are fully synchronized
3880 - change disks into single-master mode
3883 instance = self.instance
3884 target_node = self.target_node
3885 source_node = self.source_node
3887 self.feedback_fn("* checking disk consistency between source and target")
3888 for dev in instance.disks:
3889 if not _CheckDiskConsistency(self, dev, target_node, False):
3890 raise errors.OpExecError("Disk %s is degraded or not fully"
3891 " synchronized on target node,"
3892 " aborting migrate." % dev.iv_name)
3894 # First get the migration information from the remote node
3895 result = self.rpc.call_migration_info(source_node, instance)
3896 msg = result.RemoteFailMsg()
3898 log_err = ("Failed fetching source migration information from %s: %s" %
3900 logging.error(log_err)
3901 raise errors.OpExecError(log_err)
3903 self.migration_info = migration_info = result.payload
3905 # Then switch the disks to master/master mode
3906 self._EnsureSecondary(target_node)
3907 self._GoStandalone()
3908 self._GoReconnect(True)
3909 self._WaitUntilSync()
3911 self.feedback_fn("* preparing %s to accept the instance" % target_node)
3912 result = self.rpc.call_accept_instance(target_node,
3915 self.nodes_ip[target_node])
3917 msg = result.RemoteFailMsg()
3919 logging.error("Instance pre-migration failed, trying to revert"
3920 " disk status: %s", msg)
3921 self._AbortMigration()
3922 self._RevertDiskStatus()
3923 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3924 (instance.name, msg))
3926 self.feedback_fn("* migrating instance to %s" % target_node)
3928 result = self.rpc.call_instance_migrate(source_node, instance,
3929 self.nodes_ip[target_node],
3931 msg = result.RemoteFailMsg()
3933 logging.error("Instance migration failed, trying to revert"
3934 " disk status: %s", msg)
3935 self._AbortMigration()
3936 self._RevertDiskStatus()
3937 raise errors.OpExecError("Could not migrate instance %s: %s" %
3938 (instance.name, msg))
3941 instance.primary_node = target_node
3942 # distribute new instance config to the other nodes
3943 self.cfg.Update(instance)
3945 result = self.rpc.call_finalize_migration(target_node,
3949 msg = result.RemoteFailMsg()
3951 logging.error("Instance migration succeeded, but finalization failed:"
3953 raise errors.OpExecError("Could not finalize instance migration: %s" %
3956 self._EnsureSecondary(source_node)
3957 self._WaitUntilSync()
3958 self._GoStandalone()
3959 self._GoReconnect(False)
3960 self._WaitUntilSync()
3962 self.feedback_fn("* done")
3964 def Exec(self, feedback_fn):
3965 """Perform the migration.
3968 self.feedback_fn = feedback_fn
3970 self.source_node = self.instance.primary_node
3971 self.target_node = self.instance.secondary_nodes[0]
3972 self.all_nodes = [self.source_node, self.target_node]
3974 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3975 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3978 return self._ExecCleanup()
3980 return self._ExecMigration()
3983 def _CreateBlockDev(lu, node, instance, device, force_create,
3985 """Create a tree of block devices on a given node.
3987 If this device type has to be created on secondaries, create it and
3990 If not, just recurse to children keeping the same 'force' value.
3992 @param lu: the lu on whose behalf we execute
3993 @param node: the node on which to create the device
3994 @type instance: L{objects.Instance}
3995 @param instance: the instance which owns the device
3996 @type device: L{objects.Disk}
3997 @param device: the device to create
3998 @type force_create: boolean
3999 @param force_create: whether to force creation of this device; this
4000 will be change to True whenever we find a device which has
4001 CreateOnSecondary() attribute
4002 @param info: the extra 'metadata' we should attach to the device
4003 (this will be represented as a LVM tag)
4004 @type force_open: boolean
4005 @param force_open: this parameter will be passes to the
4006 L{backend.BlockdevCreate} function where it specifies
4007 whether we run on primary or not, and it affects both
4008 the child assembly and the device own Open() execution
4011 if device.CreateOnSecondary():
4015 for child in device.children:
4016 _CreateBlockDev(lu, node, instance, child, force_create,
4019 if not force_create:
4022 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4025 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4026 """Create a single block device on a given node.
4028 This will not recurse over children of the device, so they must be
4031 @param lu: the lu on whose behalf we execute
4032 @param node: the node on which to create the device
4033 @type instance: L{objects.Instance}
4034 @param instance: the instance which owns the device
4035 @type device: L{objects.Disk}
4036 @param device: the device to create
4037 @param info: the extra 'metadata' we should attach to the device
4038 (this will be represented as a LVM tag)
4039 @type force_open: boolean
4040 @param force_open: this parameter will be passes to the
4041 L{backend.BlockdevCreate} function where it specifies
4042 whether we run on primary or not, and it affects both
4043 the child assembly and the device own Open() execution
4046 lu.cfg.SetDiskID(device, node)
4047 result = lu.rpc.call_blockdev_create(node, device, device.size,
4048 instance.name, force_open, info)
4049 msg = result.RemoteFailMsg()
4051 raise errors.OpExecError("Can't create block device %s on"
4052 " node %s for instance %s: %s" %
4053 (device, node, instance.name, msg))
4054 if device.physical_id is None:
4055 device.physical_id = result.payload
4058 def _GenerateUniqueNames(lu, exts):
4059 """Generate a suitable LV name.
4061 This will generate a logical volume name for the given instance.
4066 new_id = lu.cfg.GenerateUniqueID()
4067 results.append("%s%s" % (new_id, val))
4071 def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4073 """Generate a drbd8 device complete with its children.
4076 port = lu.cfg.AllocatePort()
4077 vgname = lu.cfg.GetVGName()
4078 shared_secret = lu.cfg.GenerateDRBDSecret()
4079 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4080 logical_id=(vgname, names[0]))
4081 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4082 logical_id=(vgname, names[1]))
4083 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4084 logical_id=(primary, secondary, port,
4087 children=[dev_data, dev_meta],
4092 def _GenerateDiskTemplate(lu, template_name,
4093 instance_name, primary_node,
4094 secondary_nodes, disk_info,
4095 file_storage_dir, file_driver,
4097 """Generate the entire disk layout for a given template type.
4100 #TODO: compute space requirements
4102 vgname = lu.cfg.GetVGName()
4103 disk_count = len(disk_info)
4105 if template_name == constants.DT_DISKLESS:
4107 elif template_name == constants.DT_PLAIN:
4108 if len(secondary_nodes) != 0:
4109 raise errors.ProgrammerError("Wrong template configuration")
4111 names = _GenerateUniqueNames(lu, [".disk%d" % i
4112 for i in range(disk_count)])
4113 for idx, disk in enumerate(disk_info):
4114 disk_index = idx + base_index
4115 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4116 logical_id=(vgname, names[idx]),
4117 iv_name="disk/%d" % disk_index,
4119 disks.append(disk_dev)
4120 elif template_name == constants.DT_DRBD8:
4121 if len(secondary_nodes) != 1:
4122 raise errors.ProgrammerError("Wrong template configuration")
4123 remote_node = secondary_nodes[0]
4124 minors = lu.cfg.AllocateDRBDMinor(
4125 [primary_node, remote_node] * len(disk_info), instance_name)
4128 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4129 for i in range(disk_count)]):
4130 names.append(lv_prefix + "_data")
4131 names.append(lv_prefix + "_meta")
4132 for idx, disk in enumerate(disk_info):
4133 disk_index = idx + base_index
4134 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4135 disk["size"], names[idx*2:idx*2+2],
4136 "disk/%d" % disk_index,
4137 minors[idx*2], minors[idx*2+1])
4138 disk_dev.mode = disk["mode"]
4139 disks.append(disk_dev)
4140 elif template_name == constants.DT_FILE:
4141 if len(secondary_nodes) != 0:
4142 raise errors.ProgrammerError("Wrong template configuration")
4144 for idx, disk in enumerate(disk_info):
4145 disk_index = idx + base_index
4146 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4147 iv_name="disk/%d" % disk_index,
4148 logical_id=(file_driver,
4149 "%s/disk%d" % (file_storage_dir,
4152 disks.append(disk_dev)
4154 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4158 def _GetInstanceInfoText(instance):
4159 """Compute that text that should be added to the disk's metadata.
4162 return "originstname+%s" % instance.name
4165 def _CreateDisks(lu, instance):
4166 """Create all disks for an instance.
4168 This abstracts away some work from AddInstance.
4170 @type lu: L{LogicalUnit}
4171 @param lu: the logical unit on whose behalf we execute
4172 @type instance: L{objects.Instance}
4173 @param instance: the instance whose disks we should create
4175 @return: the success of the creation
4178 info = _GetInstanceInfoText(instance)
4179 pnode = instance.primary_node
4181 if instance.disk_template == constants.DT_FILE:
4182 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4183 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4185 if result.failed or not result.data:
4186 raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4188 if not result.data[0]:
4189 raise errors.OpExecError("Failed to create directory '%s'" %
4192 # Note: this needs to be kept in sync with adding of disks in
4193 # LUSetInstanceParams
4194 for device in instance.disks:
4195 logging.info("Creating volume %s for instance %s",
4196 device.iv_name, instance.name)
4198 for node in instance.all_nodes:
4199 f_create = node == pnode
4200 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4203 def _RemoveDisks(lu, instance):
4204 """Remove all disks for an instance.
4206 This abstracts away some work from `AddInstance()` and
4207 `RemoveInstance()`. Note that in case some of the devices couldn't
4208 be removed, the removal will continue with the other ones (compare
4209 with `_CreateDisks()`).
4211 @type lu: L{LogicalUnit}
4212 @param lu: the logical unit on whose behalf we execute
4213 @type instance: L{objects.Instance}
4214 @param instance: the instance whose disks we should remove
4216 @return: the success of the removal
4219 logging.info("Removing block devices for instance %s", instance.name)
4222 for device in instance.disks:
4223 for node, disk in device.ComputeNodeTree(instance.primary_node):
4224 lu.cfg.SetDiskID(disk, node)
4225 msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4227 lu.LogWarning("Could not remove block device %s on node %s,"
4228 " continuing anyway: %s", device.iv_name, node, msg)
4231 if instance.disk_template == constants.DT_FILE:
4232 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4233 result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4235 if result.failed or not result.data:
4236 logging.error("Could not remove directory '%s'", file_storage_dir)
4242 def _ComputeDiskSize(disk_template, disks):
4243 """Compute disk size requirements in the volume group
4246 # Required free disk space as a function of disk and swap space
4248 constants.DT_DISKLESS: None,
4249 constants.DT_PLAIN: sum(d["size"] for d in disks),
4250 # 128 MB are added for drbd metadata for each disk
4251 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4252 constants.DT_FILE: None,
4255 if disk_template not in req_size_dict:
4256 raise errors.ProgrammerError("Disk template '%s' size requirement"
4257 " is unknown" % disk_template)
4259 return req_size_dict[disk_template]
4262 def _CheckHVParams(lu, nodenames, hvname, hvparams):
4263 """Hypervisor parameter validation.
4265 This function abstract the hypervisor parameter validation to be
4266 used in both instance create and instance modify.
4268 @type lu: L{LogicalUnit}
4269 @param lu: the logical unit for which we check
4270 @type nodenames: list
4271 @param nodenames: the list of nodes on which we should check
4272 @type hvname: string
4273 @param hvname: the name of the hypervisor we should use
4274 @type hvparams: dict
4275 @param hvparams: the parameters which we need to check
4276 @raise errors.OpPrereqError: if the parameters are not valid
4279 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4282 for node in nodenames:
4286 msg = info.RemoteFailMsg()
4288 raise errors.OpPrereqError("Hypervisor parameter validation"
4289 " failed on node %s: %s" % (node, msg))
4292 class LUCreateInstance(LogicalUnit):
4293 """Create an instance.
4296 HPATH = "instance-add"
4297 HTYPE = constants.HTYPE_INSTANCE
4298 _OP_REQP = ["instance_name", "disks", "disk_template",
4300 "wait_for_sync", "ip_check", "nics",
4301 "hvparams", "beparams"]
4304 def _ExpandNode(self, node):
4305 """Expands and checks one node name.
4308 node_full = self.cfg.ExpandNodeName(node)
4309 if node_full is None:
4310 raise errors.OpPrereqError("Unknown node %s" % node)
4313 def ExpandNames(self):
4314 """ExpandNames for CreateInstance.
4316 Figure out the right locks for instance creation.
4319 self.needed_locks = {}
4321 # set optional parameters to none if they don't exist
4322 for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4323 if not hasattr(self.op, attr):
4324 setattr(self.op, attr, None)
4326 # cheap checks, mostly valid constants given
4328 # verify creation mode
4329 if self.op.mode not in (constants.INSTANCE_CREATE,
4330 constants.INSTANCE_IMPORT):
4331 raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4334 # disk template and mirror node verification
4335 if self.op.disk_template not in constants.DISK_TEMPLATES:
4336 raise errors.OpPrereqError("Invalid disk template name")
4338 if self.op.hypervisor is None:
4339 self.op.hypervisor = self.cfg.GetHypervisorType()
4341 cluster = self.cfg.GetClusterInfo()
4342 enabled_hvs = cluster.enabled_hypervisors
4343 if self.op.hypervisor not in enabled_hvs:
4344 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4345 " cluster (%s)" % (self.op.hypervisor,
4346 ",".join(enabled_hvs)))
4348 # check hypervisor parameter syntax (locally)
4349 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4350 filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4352 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4353 hv_type.CheckParameterSyntax(filled_hvp)
4355 # fill and remember the beparams dict
4356 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4357 self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4360 #### instance parameters check
4362 # instance name verification
4363 hostname1 = utils.HostInfo(self.op.instance_name)
4364 self.op.instance_name = instance_name = hostname1.name
4366 # this is just a preventive check, but someone might still add this
4367 # instance in the meantime, and creation will fail at lock-add time
4368 if instance_name in self.cfg.GetInstanceList():
4369 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4372 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4376 for nic in self.op.nics:
4377 # ip validity checks
4378 ip = nic.get("ip", None)
4379 if ip is None or ip.lower() == "none":
4381 elif ip.lower() == constants.VALUE_AUTO:
4382 nic_ip = hostname1.ip
4384 if not utils.IsValidIP(ip):
4385 raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4386 " like a valid IP" % ip)
4389 # MAC address verification
4390 mac = nic.get("mac", constants.VALUE_AUTO)
4391 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4392 if not utils.IsValidMac(mac.lower()):
4393 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4395 # bridge verification
4396 bridge = nic.get("bridge", None)
4398 bridge = self.cfg.GetDefBridge()
4399 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4401 # disk checks/pre-build
4403 for disk in self.op.disks:
4404 mode = disk.get("mode", constants.DISK_RDWR)
4405 if mode not in constants.DISK_ACCESS_SET:
4406 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4408 size = disk.get("size", None)
4410 raise errors.OpPrereqError("Missing disk size")
4414 raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4415 self.disks.append({"size": size, "mode": mode})
4417 # used in CheckPrereq for ip ping check
4418 self.check_ip = hostname1.ip
4420 # file storage checks
4421 if (self.op.file_driver and
4422 not self.op.file_driver in constants.FILE_DRIVER):
4423 raise errors.OpPrereqError("Invalid file driver name '%s'" %
4424 self.op.file_driver)
4426 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4427 raise errors.OpPrereqError("File storage directory path not absolute")
4429 ### Node/iallocator related checks
4430 if [self.op.iallocator, self.op.pnode].count(None) != 1:
4431 raise errors.OpPrereqError("One and only one of iallocator and primary"
4432 " node must be given")
4434 if self.op.iallocator:
4435 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4437 self.op.pnode = self._ExpandNode(self.op.pnode)
4438 nodelist = [self.op.pnode]
4439 if self.op.snode is not None:
4440 self.op.snode = self._ExpandNode(self.op.snode)
4441 nodelist.append(self.op.snode)
4442 self.needed_locks[locking.LEVEL_NODE] = nodelist
4444 # in case of import lock the source node too
4445 if self.op.mode == constants.INSTANCE_IMPORT:
4446 src_node = getattr(self.op, "src_node", None)
4447 src_path = getattr(self.op, "src_path", None)
4449 if src_path is None:
4450 self.op.src_path = src_path = self.op.instance_name
4452 if src_node is None:
4453 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4454 self.op.src_node = None
4455 if os.path.isabs(src_path):
4456 raise errors.OpPrereqError("Importing an instance from an absolute"
4457 " path requires a source node option.")
4459 self.op.src_node = src_node = self._ExpandNode(src_node)
4460 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4461 self.needed_locks[locking.LEVEL_NODE].append(src_node)
4462 if not os.path.isabs(src_path):
4463 self.op.src_path = src_path = \
4464 os.path.join(constants.EXPORT_DIR, src_path)
4466 else: # INSTANCE_CREATE
4467 if getattr(self.op, "os_type", None) is None:
4468 raise errors.OpPrereqError("No guest OS specified")
4470 def _RunAllocator(self):
4471 """Run the allocator based on input opcode.
4474 nics = [n.ToDict() for n in self.nics]
4475 ial = IAllocator(self,
4476 mode=constants.IALLOCATOR_MODE_ALLOC,
4477 name=self.op.instance_name,
4478 disk_template=self.op.disk_template,
4481 vcpus=self.be_full[constants.BE_VCPUS],
4482 mem_size=self.be_full[constants.BE_MEMORY],
4485 hypervisor=self.op.hypervisor,
4488 ial.Run(self.op.iallocator)
4491 raise errors.OpPrereqError("Can't compute nodes using"
4492 " iallocator '%s': %s" % (self.op.iallocator,
4494 if len(ial.nodes) != ial.required_nodes:
4495 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4496 " of nodes (%s), required %s" %
4497 (self.op.iallocator, len(ial.nodes),
4498 ial.required_nodes))
4499 self.op.pnode = ial.nodes[0]
4500 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4501 self.op.instance_name, self.op.iallocator,
4502 ", ".join(ial.nodes))
4503 if ial.required_nodes == 2:
4504 self.op.snode = ial.nodes[1]
4506 def BuildHooksEnv(self):
4509 This runs on master, primary and secondary nodes of the instance.
4513 "ADD_MODE": self.op.mode,
4515 if self.op.mode == constants.INSTANCE_IMPORT:
4516 env["SRC_NODE"] = self.op.src_node
4517 env["SRC_PATH"] = self.op.src_path
4518 env["SRC_IMAGES"] = self.src_images
4520 env.update(_BuildInstanceHookEnv(
4521 name=self.op.instance_name,
4522 primary_node=self.op.pnode,
4523 secondary_nodes=self.secondaries,
4524 status=self.op.start,
4525 os_type=self.op.os_type,
4526 memory=self.be_full[constants.BE_MEMORY],
4527 vcpus=self.be_full[constants.BE_VCPUS],
4528 nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4529 disk_template=self.op.disk_template,
4530 disks=[(d["size"], d["mode"]) for d in self.disks],
4533 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4538 def CheckPrereq(self):
4539 """Check prerequisites.
4542 if (not self.cfg.GetVGName() and
4543 self.op.disk_template not in constants.DTS_NOT_LVM):
4544 raise errors.OpPrereqError("Cluster does not support lvm-based"
4547 if self.op.mode == constants.INSTANCE_IMPORT:
4548 src_node = self.op.src_node
4549 src_path = self.op.src_path
4551 if src_node is None:
4552 exp_list = self.rpc.call_export_list(
4553 self.acquired_locks[locking.LEVEL_NODE])
4555 for node in exp_list:
4556 if not exp_list[node].failed and src_path in exp_list[node].data:
4558 self.op.src_node = src_node = node
4559 self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4563 raise errors.OpPrereqError("No export found for relative path %s" %
4566 _CheckNodeOnline(self, src_node)
4567 result = self.rpc.call_export_info(src_node, src_path)
4570 raise errors.OpPrereqError("No export found in dir %s" % src_path)
4572 export_info = result.data
4573 if not export_info.has_section(constants.INISECT_EXP):
4574 raise errors.ProgrammerError("Corrupted export config")
4576 ei_version = export_info.get(constants.INISECT_EXP, 'version')
4577 if (int(ei_version) != constants.EXPORT_VERSION):
4578 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4579 (ei_version, constants.EXPORT_VERSION))
4581 # Check that the new instance doesn't have less disks than the export
4582 instance_disks = len(self.disks)
4583 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4584 if instance_disks < export_disks:
4585 raise errors.OpPrereqError("Not enough disks to import."
4586 " (instance: %d, export: %d)" %
4587 (instance_disks, export_disks))
4589 self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4591 for idx in range(export_disks):
4592 option = 'disk%d_dump' % idx
4593 if export_info.has_option(constants.INISECT_INS, option):
4594 # FIXME: are the old os-es, disk sizes, etc. useful?
4595 export_name = export_info.get(constants.INISECT_INS, option)
4596 image = os.path.join(src_path, export_name)
4597 disk_images.append(image)
4599 disk_images.append(False)
4601 self.src_images = disk_images
4603 old_name = export_info.get(constants.INISECT_INS, 'name')
4604 # FIXME: int() here could throw a ValueError on broken exports
4605 exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4606 if self.op.instance_name == old_name:
4607 for idx, nic in enumerate(self.nics):
4608 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4609 nic_mac_ini = 'nic%d_mac' % idx
4610 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4612 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4613 # ip ping checks (we use the same ip that was resolved in ExpandNames)
4614 if self.op.start and not self.op.ip_check:
4615 raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4616 " adding an instance in start mode")
4618 if self.op.ip_check:
4619 if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4620 raise errors.OpPrereqError("IP %s of instance %s already in use" %
4621 (self.check_ip, self.op.instance_name))
4623 #### mac address generation
4624 # By generating here the mac address both the allocator and the hooks get
4625 # the real final mac address rather than the 'auto' or 'generate' value.
4626 # There is a race condition between the generation and the instance object
4627 # creation, which means that we know the mac is valid now, but we're not
4628 # sure it will be when we actually add the instance. If things go bad
4629 # adding the instance will abort because of a duplicate mac, and the
4630 # creation job will fail.
4631 for nic in self.nics:
4632 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4633 nic.mac = self.cfg.GenerateMAC()
4637 if self.op.iallocator is not None:
4638 self._RunAllocator()
4640 #### node related checks
4642 # check primary node
4643 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4644 assert self.pnode is not None, \
4645 "Cannot retrieve locked node %s" % self.op.pnode
4647 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4650 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4653 self.secondaries = []
4655 # mirror node verification
4656 if self.op.disk_template in constants.DTS_NET_MIRROR:
4657 if self.op.snode is None:
4658 raise errors.OpPrereqError("The networked disk templates need"
4660 if self.op.snode == pnode.name:
4661 raise errors.OpPrereqError("The secondary node cannot be"
4662 " the primary node.")
4663 _CheckNodeOnline(self, self.op.snode)
4664 _CheckNodeNotDrained(self, self.op.snode)
4665 self.secondaries.append(self.op.snode)
4667 nodenames = [pnode.name] + self.secondaries
4669 req_size = _ComputeDiskSize(self.op.disk_template,
4672 # Check lv size requirements
4673 if req_size is not None:
4674 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4676 for node in nodenames:
4677 info = nodeinfo[node]
4681 raise errors.OpPrereqError("Cannot get current information"
4682 " from node '%s'" % node)
4683 vg_free = info.get('vg_free', None)
4684 if not isinstance(vg_free, int):
4685 raise errors.OpPrereqError("Can't compute free disk space on"
4687 if req_size > info['vg_free']:
4688 raise errors.OpPrereqError("Not enough disk space on target node %s."
4689 " %d MB available, %d MB required" %
4690 (node, info['vg_free'], req_size))
4692 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4695 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4697 if not isinstance(result.data, objects.OS):
4698 raise errors.OpPrereqError("OS '%s' not in supported os list for"
4699 " primary node" % self.op.os_type)
4701 # bridge check on primary node
4702 bridges = [n.bridge for n in self.nics]
4703 result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4706 raise errors.OpPrereqError("One of the target bridges '%s' does not"
4707 " exist on destination node '%s'" %
4708 (",".join(bridges), pnode.name))
4710 # memory check on primary node
4712 _CheckNodeFreeMemory(self, self.pnode.name,
4713 "creating instance %s" % self.op.instance_name,
4714 self.be_full[constants.BE_MEMORY],
4717 def Exec(self, feedback_fn):
4718 """Create and add the instance to the cluster.
4721 instance = self.op.instance_name
4722 pnode_name = self.pnode.name
4724 ht_kind = self.op.hypervisor
4725 if ht_kind in constants.HTS_REQ_PORT:
4726 network_port = self.cfg.AllocatePort()
4730 ##if self.op.vnc_bind_address is None:
4731 ## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4733 # this is needed because os.path.join does not accept None arguments
4734 if self.op.file_storage_dir is None:
4735 string_file_storage_dir = ""
4737 string_file_storage_dir = self.op.file_storage_dir
4739 # build the full file storage dir path
4740 file_storage_dir = os.path.normpath(os.path.join(
4741 self.cfg.GetFileStorageDir(),
4742 string_file_storage_dir, instance))
4745 disks = _GenerateDiskTemplate(self,
4746 self.op.disk_template,
4747 instance, pnode_name,
4751 self.op.file_driver,
4754 iobj = objects.Instance(name=instance, os=self.op.os_type,
4755 primary_node=pnode_name,
4756 nics=self.nics, disks=disks,
4757 disk_template=self.op.disk_template,
4759 network_port=network_port,
4760 beparams=self.op.beparams,
4761 hvparams=self.op.hvparams,
4762 hypervisor=self.op.hypervisor,
4765 feedback_fn("* creating instance disks...")
4767 _CreateDisks(self, iobj)
4768 except errors.OpExecError:
4769 self.LogWarning("Device creation failed, reverting...")
4771 _RemoveDisks(self, iobj)
4773 self.cfg.ReleaseDRBDMinors(instance)
4776 feedback_fn("adding instance %s to cluster config" % instance)
4778 self.cfg.AddInstance(iobj)
4779 # Declare that we don't want to remove the instance lock anymore, as we've
4780 # added the instance to the config
4781 del self.remove_locks[locking.LEVEL_INSTANCE]
4782 # Unlock all the nodes
4783 if self.op.mode == constants.INSTANCE_IMPORT:
4784 nodes_keep = [self.op.src_node]
4785 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4786 if node != self.op.src_node]
4787 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4788 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4790 self.context.glm.release(locking.LEVEL_NODE)
4791 del self.acquired_locks[locking.LEVEL_NODE]
4793 if self.op.wait_for_sync:
4794 disk_abort = not _WaitForSync(self, iobj)
4795 elif iobj.disk_template in constants.DTS_NET_MIRROR:
4796 # make sure the disks are not degraded (still sync-ing is ok)
4798 feedback_fn("* checking mirrors status")
4799 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4804 _RemoveDisks(self, iobj)
4805 self.cfg.RemoveInstance(iobj.name)
4806 # Make sure the instance lock gets removed
4807 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4808 raise errors.OpExecError("There are some degraded disks for"
4811 feedback_fn("creating os for instance %s on node %s" %
4812 (instance, pnode_name))
4814 if iobj.disk_template != constants.DT_DISKLESS:
4815 if self.op.mode == constants.INSTANCE_CREATE:
4816 feedback_fn("* running the instance OS create scripts...")
4817 result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4818 msg = result.RemoteFailMsg()
4820 raise errors.OpExecError("Could not add os for instance %s"
4822 (instance, pnode_name, msg))
4824 elif self.op.mode == constants.INSTANCE_IMPORT:
4825 feedback_fn("* running the instance OS import scripts...")
4826 src_node = self.op.src_node
4827 src_images = self.src_images
4828 cluster_name = self.cfg.GetClusterName()
4829 import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4830 src_node, src_images,
4832 import_result.Raise()
4833 for idx, result in enumerate(import_result.data):
4835 self.LogWarning("Could not import the image %s for instance"
4836 " %s, disk %d, on node %s" %
4837 (src_images[idx], instance, idx, pnode_name))
4839 # also checked in the prereq part
4840 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4844 iobj.admin_up = True
4845 self.cfg.Update(iobj)
4846 logging.info("Starting instance %s on node %s", instance, pnode_name)
4847 feedback_fn("* starting instance...")
4848 result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4849 msg = result.RemoteFailMsg()
4851 raise errors.OpExecError("Could not start instance: %s" % msg)
4854 class LUConnectConsole(NoHooksLU):
4855 """Connect to an instance's console.
4857 This is somewhat special in that it returns the command line that
4858 you need to run on the master node in order to connect to the
4862 _OP_REQP = ["instance_name"]
4865 def ExpandNames(self):
4866 self._ExpandAndLockInstance()
4868 def CheckPrereq(self):
4869 """Check prerequisites.
4871 This checks that the instance is in the cluster.
4874 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4875 assert self.instance is not None, \
4876 "Cannot retrieve locked instance %s" % self.op.instance_name
4877 _CheckNodeOnline(self, self.instance.primary_node)
4879 def Exec(self, feedback_fn):
4880 """Connect to the console of an instance
4883 instance = self.instance
4884 node = instance.primary_node
4886 node_insts = self.rpc.call_instance_list([node],
4887 [instance.hypervisor])[node]
4890 if instance.name not in node_insts.data:
4891 raise errors.OpExecError("Instance %s is not running." % instance.name)
4893 logging.debug("Connecting to console of %s on %s", instance.name, node)
4895 hyper = hypervisor.GetHypervisor(instance.hypervisor)
4896 cluster = self.cfg.GetClusterInfo()
4897 # beparams and hvparams are passed separately, to avoid editing the
4898 # instance and then saving the defaults in the instance itself.
4899 hvparams = cluster.FillHV(instance)
4900 beparams = cluster.FillBE(instance)
4901 console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4904 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4907 class LUReplaceDisks(LogicalUnit):
4908 """Replace the disks of an instance.
4911 HPATH = "mirrors-replace"
4912 HTYPE = constants.HTYPE_INSTANCE
4913 _OP_REQP = ["instance_name", "mode", "disks"]
4916 def CheckArguments(self):
4917 if not hasattr(self.op, "remote_node"):
4918 self.op.remote_node = None
4919 if not hasattr(self.op, "iallocator"):
4920 self.op.iallocator = None
4922 # check for valid parameter combination
4923 cnt = [self.op.remote_node, self.op.iallocator].count(None)
4924 if self.op.mode == constants.REPLACE_DISK_CHG:
4926 raise errors.OpPrereqError("When changing the secondary either an"
4927 " iallocator script must be used or the"
4930 raise errors.OpPrereqError("Give either the iallocator or the new"
4931 " secondary, not both")
4932 else: # not replacing the secondary
4934 raise errors.OpPrereqError("The iallocator and new node options can"
4935 " be used only when changing the"
4938 def ExpandNames(self):
4939 self._ExpandAndLockInstance()
4941 if self.op.iallocator is not None:
4942 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4943 elif self.op.remote_node is not None:
4944 remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4945 if remote_node is None:
4946 raise errors.OpPrereqError("Node '%s' not known" %
4947 self.op.remote_node)
4948 self.op.remote_node = remote_node
4949 # Warning: do not remove the locking of the new secondary here
4950 # unless DRBD8.AddChildren is changed to work in parallel;
4951 # currently it doesn't since parallel invocations of
4952 # FindUnusedMinor will conflict
4953 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4954 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4956 self.needed_locks[locking.LEVEL_NODE] = []
4957 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4959 def DeclareLocks(self, level):
4960 # If we're not already locking all nodes in the set we have to declare the
4961 # instance's primary/secondary nodes.
4962 if (level == locking.LEVEL_NODE and
4963 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4964 self._LockInstancesNodes()
4966 def _RunAllocator(self):
4967 """Compute a new secondary node using an IAllocator.
4970 ial = IAllocator(self,
4971 mode=constants.IALLOCATOR_MODE_RELOC,
4972 name=self.op.instance_name,
4973 relocate_from=[self.sec_node])
4975 ial.Run(self.op.iallocator)
4978 raise errors.OpPrereqError("Can't compute nodes using"
4979 " iallocator '%s': %s" % (self.op.iallocator,
4981 if len(ial.nodes) != ial.required_nodes:
4982 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4983 " of nodes (%s), required %s" %
4984 (len(ial.nodes), ial.required_nodes))
4985 self.op.remote_node = ial.nodes[0]
4986 self.LogInfo("Selected new secondary for the instance: %s",
4987 self.op.remote_node)
4989 def BuildHooksEnv(self):
4992 This runs on the master, the primary and all the secondaries.
4996 "MODE": self.op.mode,
4997 "NEW_SECONDARY": self.op.remote_node,
4998 "OLD_SECONDARY": self.instance.secondary_nodes[0],
5000 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5002 self.cfg.GetMasterNode(),
5003 self.instance.primary_node,
5005 if self.op.remote_node is not None:
5006 nl.append(self.op.remote_node)
5009 def CheckPrereq(self):
5010 """Check prerequisites.
5012 This checks that the instance is in the cluster.
5015 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5016 assert instance is not None, \
5017 "Cannot retrieve locked instance %s" % self.op.instance_name
5018 self.instance = instance
5020 if instance.disk_template != constants.DT_DRBD8:
5021 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5024 if len(instance.secondary_nodes) != 1:
5025 raise errors.OpPrereqError("The instance has a strange layout,"
5026 " expected one secondary but found %d" %
5027 len(instance.secondary_nodes))
5029 self.sec_node = instance.secondary_nodes[0]
5031 if self.op.iallocator is not None:
5032 self._RunAllocator()
5034 remote_node = self.op.remote_node
5035 if remote_node is not None:
5036 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5037 assert self.remote_node_info is not None, \
5038 "Cannot retrieve locked node %s" % remote_node
5040 self.remote_node_info = None
5041 if remote_node == instance.primary_node:
5042 raise errors.OpPrereqError("The specified node is the primary node of"
5044 elif remote_node == self.sec_node:
5045 raise errors.OpPrereqError("The specified node is already the"
5046 " secondary node of the instance.")
5048 if self.op.mode == constants.REPLACE_DISK_PRI:
5049 n1 = self.tgt_node = instance.primary_node
5050 n2 = self.oth_node = self.sec_node
5051 elif self.op.mode == constants.REPLACE_DISK_SEC:
5052 n1 = self.tgt_node = self.sec_node
5053 n2 = self.oth_node = instance.primary_node
5054 elif self.op.mode == constants.REPLACE_DISK_CHG:
5055 n1 = self.new_node = remote_node
5056 n2 = self.oth_node = instance.primary_node
5057 self.tgt_node = self.sec_node
5058 _CheckNodeNotDrained(self, remote_node)
5060 raise errors.ProgrammerError("Unhandled disk replace mode")
5062 _CheckNodeOnline(self, n1)
5063 _CheckNodeOnline(self, n2)
5065 if not self.op.disks:
5066 self.op.disks = range(len(instance.disks))
5068 for disk_idx in self.op.disks:
5069 instance.FindDisk(disk_idx)
5071 def _ExecD8DiskOnly(self, feedback_fn):
5072 """Replace a disk on the primary or secondary for dbrd8.
5074 The algorithm for replace is quite complicated:
5076 1. for each disk to be replaced:
5078 1. create new LVs on the target node with unique names
5079 1. detach old LVs from the drbd device
5080 1. rename old LVs to name_replaced.<time_t>
5081 1. rename new LVs to old LVs
5082 1. attach the new LVs (with the old names now) to the drbd device
5084 1. wait for sync across all devices
5086 1. for each modified disk:
5088 1. remove old LVs (which have the name name_replaces.<time_t>)
5090 Failures are not very well handled.
5094 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5095 instance = self.instance
5097 vgname = self.cfg.GetVGName()
5100 tgt_node = self.tgt_node
5101 oth_node = self.oth_node
5103 # Step: check device activation
5104 self.proc.LogStep(1, steps_total, "check device existence")
5105 info("checking volume groups")
5106 my_vg = cfg.GetVGName()
5107 results = self.rpc.call_vg_list([oth_node, tgt_node])
5109 raise errors.OpExecError("Can't list volume groups on the nodes")
5110 for node in oth_node, tgt_node:
5112 if res.failed or not res.data or my_vg not in res.data:
5113 raise errors.OpExecError("Volume group '%s' not found on %s" %
5115 for idx, dev in enumerate(instance.disks):
5116 if idx not in self.op.disks:
5118 for node in tgt_node, oth_node:
5119 info("checking disk/%d on %s" % (idx, node))
5120 cfg.SetDiskID(dev, node)
5121 result = self.rpc.call_blockdev_find(node, dev)
5122 msg = result.RemoteFailMsg()
5123 if not msg and not result.payload:
5124 msg = "disk not found"
5126 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5129 # Step: check other node consistency
5130 self.proc.LogStep(2, steps_total, "check peer consistency")
5131 for idx, dev in enumerate(instance.disks):
5132 if idx not in self.op.disks:
5134 info("checking disk/%d consistency on %s" % (idx, oth_node))
5135 if not _CheckDiskConsistency(self, dev, oth_node,
5136 oth_node==instance.primary_node):
5137 raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5138 " to replace disks on this node (%s)" %
5139 (oth_node, tgt_node))
5141 # Step: create new storage
5142 self.proc.LogStep(3, steps_total, "allocate new storage")
5143 for idx, dev in enumerate(instance.disks):
5144 if idx not in self.op.disks:
5147 cfg.SetDiskID(dev, tgt_node)
5148 lv_names = [".disk%d_%s" % (idx, suf)
5149 for suf in ["data", "meta"]]
5150 names = _GenerateUniqueNames(self, lv_names)
5151 lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5152 logical_id=(vgname, names[0]))
5153 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5154 logical_id=(vgname, names[1]))
5155 new_lvs = [lv_data, lv_meta]
5156 old_lvs = dev.children
5157 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5158 info("creating new local storage on %s for %s" %
5159 (tgt_node, dev.iv_name))
5160 # we pass force_create=True to force the LVM creation
5161 for new_lv in new_lvs:
5162 _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5163 _GetInstanceInfoText(instance), False)
5165 # Step: for each lv, detach+rename*2+attach
5166 self.proc.LogStep(4, steps_total, "change drbd configuration")
5167 for dev, old_lvs, new_lvs in iv_names.itervalues():
5168 info("detaching %s drbd from local storage" % dev.iv_name)
5169 result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5172 raise errors.OpExecError("Can't detach drbd from local storage on node"
5173 " %s for device %s" % (tgt_node, dev.iv_name))
5175 #cfg.Update(instance)
5177 # ok, we created the new LVs, so now we know we have the needed
5178 # storage; as such, we proceed on the target node to rename
5179 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5180 # using the assumption that logical_id == physical_id (which in
5181 # turn is the unique_id on that node)
5183 # FIXME(iustin): use a better name for the replaced LVs
5184 temp_suffix = int(time.time())
5185 ren_fn = lambda d, suff: (d.physical_id[0],
5186 d.physical_id[1] + "_replaced-%s" % suff)
5187 # build the rename list based on what LVs exist on the node
5189 for to_ren in old_lvs:
5190 result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5191 if not result.RemoteFailMsg() and result.payload:
5193 rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5195 info("renaming the old LVs on the target node")
5196 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5199 raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5200 # now we rename the new LVs to the old LVs
5201 info("renaming the new LVs on the target node")
5202 rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5203 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5206 raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5208 for old, new in zip(old_lvs, new_lvs):
5209 new.logical_id = old.logical_id
5210 cfg.SetDiskID(new, tgt_node)
5212 for disk in old_lvs:
5213 disk.logical_id = ren_fn(disk, temp_suffix)
5214 cfg.SetDiskID(disk, tgt_node)
5216 # now that the new lvs have the old name, we can add them to the device
5217 info("adding new mirror component on %s" % tgt_node)
5218 result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5219 if result.failed or not result.data:
5220 for new_lv in new_lvs:
5221 msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5223 warning("Can't rollback device %s: %s", dev, msg,
5224 hint="cleanup manually the unused logical volumes")
5225 raise errors.OpExecError("Can't add local storage to drbd")
5227 dev.children = new_lvs
5228 cfg.Update(instance)
5230 # Step: wait for sync
5232 # this can fail as the old devices are degraded and _WaitForSync
5233 # does a combined result over all disks, so we don't check its
5235 self.proc.LogStep(5, steps_total, "sync devices")
5236 _WaitForSync(self, instance, unlock=True)
5238 # so check manually all the devices
5239 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5240 cfg.SetDiskID(dev, instance.primary_node)
5241 result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5242 msg = result.RemoteFailMsg()
5243 if not msg and not result.payload:
5244 msg = "disk not found"
5246 raise errors.OpExecError("Can't find DRBD device %s: %s" %
5248 if result.payload[5]:
5249 raise errors.OpExecError("DRBD device %s is degraded!" % name)
5251 # Step: remove old storage
5252 self.proc.LogStep(6, steps_total, "removing old storage")
5253 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5254 info("remove logical volumes for %s" % name)
5256 cfg.SetDiskID(lv, tgt_node)
5257 msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5259 warning("Can't remove old LV: %s" % msg,
5260 hint="manually remove unused LVs")
5263 def _ExecD8Secondary(self, feedback_fn):
5264 """Replace the secondary node for drbd8.
5266 The algorithm for replace is quite complicated:
5267 - for all disks of the instance:
5268 - create new LVs on the new node with same names
5269 - shutdown the drbd device on the old secondary
5270 - disconnect the drbd network on the primary
5271 - create the drbd device on the new secondary
5272 - network attach the drbd on the primary, using an artifice:
5273 the drbd code for Attach() will connect to the network if it
5274 finds a device which is connected to the good local disks but
5276 - wait for sync across all devices
5277 - remove all disks from the old secondary
5279 Failures are not very well handled.
5283 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5284 instance = self.instance
5288 old_node = self.tgt_node
5289 new_node = self.new_node
5290 pri_node = instance.primary_node
5292 old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5293 new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5294 pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5297 # Step: check device activation
5298 self.proc.LogStep(1, steps_total, "check device existence")
5299 info("checking volume groups")
5300 my_vg = cfg.GetVGName()
5301 results = self.rpc.call_vg_list([pri_node, new_node])
5302 for node in pri_node, new_node:
5304 if res.failed or not res.data or my_vg not in res.data:
5305 raise errors.OpExecError("Volume group '%s' not found on %s" %
5307 for idx, dev in enumerate(instance.disks):
5308 if idx not in self.op.disks:
5310 info("checking disk/%d on %s" % (idx, pri_node))
5311 cfg.SetDiskID(dev, pri_node)
5312 result = self.rpc.call_blockdev_find(pri_node, dev)
5313 msg = result.RemoteFailMsg()
5314 if not msg and not result.payload:
5315 msg = "disk not found"
5317 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5318 (idx, pri_node, msg))
5320 # Step: check other node consistency
5321 self.proc.LogStep(2, steps_total, "check peer consistency")
5322 for idx, dev in enumerate(instance.disks):
5323 if idx not in self.op.disks:
5325 info("checking disk/%d consistency on %s" % (idx, pri_node))
5326 if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5327 raise errors.OpExecError("Primary node (%s) has degraded storage,"
5328 " unsafe to replace the secondary" %
5331 # Step: create new storage
5332 self.proc.LogStep(3, steps_total, "allocate new storage")
5333 for idx, dev in enumerate(instance.disks):
5334 info("adding new local storage on %s for disk/%d" %
5336 # we pass force_create=True to force LVM creation
5337 for new_lv in dev.children:
5338 _CreateBlockDev(self, new_node, instance, new_lv, True,
5339 _GetInstanceInfoText(instance), False)
5341 # Step 4: dbrd minors and drbd setups changes
5342 # after this, we must manually remove the drbd minors on both the
5343 # error and the success paths
5344 minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5346 logging.debug("Allocated minors %s" % (minors,))
5347 self.proc.LogStep(4, steps_total, "changing drbd configuration")
5348 for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5350 info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5351 # create new devices on new_node; note that we create two IDs:
5352 # one without port, so the drbd will be activated without
5353 # networking information on the new node at this stage, and one
5354 # with network, for the latter activation in step 4
5355 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5356 if pri_node == o_node1:
5361 new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5362 new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5364 iv_names[idx] = (dev, dev.children, new_net_id)
5365 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5367 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5368 logical_id=new_alone_id,
5369 children=dev.children)
5371 _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5372 _GetInstanceInfoText(instance), False)
5373 except errors.GenericError:
5374 self.cfg.ReleaseDRBDMinors(instance.name)
5377 for idx, dev in enumerate(instance.disks):
5378 # we have new devices, shutdown the drbd on the old secondary
5379 info("shutting down drbd for disk/%d on old node" % idx)
5380 cfg.SetDiskID(dev, old_node)
5381 msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5383 warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5385 hint="Please cleanup this device manually as soon as possible")
5387 info("detaching primary drbds from the network (=> standalone)")
5388 result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5389 instance.disks)[pri_node]
5391 msg = result.RemoteFailMsg()
5393 # detaches didn't succeed (unlikely)
5394 self.cfg.ReleaseDRBDMinors(instance.name)
5395 raise errors.OpExecError("Can't detach the disks from the network on"
5396 " old node: %s" % (msg,))
5398 # if we managed to detach at least one, we update all the disks of
5399 # the instance to point to the new secondary
5400 info("updating instance configuration")
5401 for dev, _, new_logical_id in iv_names.itervalues():
5402 dev.logical_id = new_logical_id
5403 cfg.SetDiskID(dev, pri_node)
5404 cfg.Update(instance)
5406 # and now perform the drbd attach
5407 info("attaching primary drbds to new secondary (standalone => connected)")
5408 result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5409 instance.disks, instance.name,
5411 for to_node, to_result in result.items():
5412 msg = to_result.RemoteFailMsg()
5414 warning("can't attach drbd disks on node %s: %s", to_node, msg,
5415 hint="please do a gnt-instance info to see the"
5418 # this can fail as the old devices are degraded and _WaitForSync
5419 # does a combined result over all disks, so we don't check its
5421 self.proc.LogStep(5, steps_total, "sync devices")
5422 _WaitForSync(self, instance, unlock=True)
5424 # so check manually all the devices
5425 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5426 cfg.SetDiskID(dev, pri_node)
5427 result = self.rpc.call_blockdev_find(pri_node, dev)
5428 msg = result.RemoteFailMsg()
5429 if not msg and not result.payload:
5430 msg = "disk not found"
5432 raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5434 if result.payload[5]:
5435 raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5437 self.proc.LogStep(6, steps_total, "removing old storage")
5438 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5439 info("remove logical volumes for disk/%d" % idx)
5441 cfg.SetDiskID(lv, old_node)
5442 msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5444 warning("Can't remove LV on old secondary: %s", msg,
5445 hint="Cleanup stale volumes by hand")
5447 def Exec(self, feedback_fn):
5448 """Execute disk replacement.
5450 This dispatches the disk replacement to the appropriate handler.
5453 instance = self.instance
5455 # Activate the instance disks if we're replacing them on a down instance
5456 if not instance.admin_up:
5457 _StartInstanceDisks(self, instance, True)
5459 if self.op.mode == constants.REPLACE_DISK_CHG:
5460 fn = self._ExecD8Secondary
5462 fn = self._ExecD8DiskOnly
5464 ret = fn(feedback_fn)
5466 # Deactivate the instance disks if we're replacing them on a down instance
5467 if not instance.admin_up:
5468 _SafeShutdownInstanceDisks(self, instance)
5473 class LUGrowDisk(LogicalUnit):
5474 """Grow a disk of an instance.
5478 HTYPE = constants.HTYPE_INSTANCE
5479 _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5482 def ExpandNames(self):
5483 self._ExpandAndLockInstance()
5484 self.needed_locks[locking.LEVEL_NODE] = []
5485 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5487 def DeclareLocks(self, level):
5488 if level == locking.LEVEL_NODE:
5489 self._LockInstancesNodes()
5491 def BuildHooksEnv(self):
5494 This runs on the master, the primary and all the secondaries.
5498 "DISK": self.op.disk,
5499 "AMOUNT": self.op.amount,
5501 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5503 self.cfg.GetMasterNode(),
5504 self.instance.primary_node,
5508 def CheckPrereq(self):
5509 """Check prerequisites.
5511 This checks that the instance is in the cluster.
5514 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5515 assert instance is not None, \
5516 "Cannot retrieve locked instance %s" % self.op.instance_name
5517 nodenames = list(instance.all_nodes)
5518 for node in nodenames:
5519 _CheckNodeOnline(self, node)
5522 self.instance = instance
5524 if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5525 raise errors.OpPrereqError("Instance's disk layout does not support"
5528 self.disk = instance.FindDisk(self.op.disk)
5530 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5531 instance.hypervisor)
5532 for node in nodenames:
5533 info = nodeinfo[node]
5534 if info.failed or not info.data:
5535 raise errors.OpPrereqError("Cannot get current information"
5536 " from node '%s'" % node)
5537 vg_free = info.data.get('vg_free', None)
5538 if not isinstance(vg_free, int):
5539 raise errors.OpPrereqError("Can't compute free disk space on"
5541 if self.op.amount > vg_free:
5542 raise errors.OpPrereqError("Not enough disk space on target node %s:"
5543 " %d MiB available, %d MiB required" %
5544 (node, vg_free, self.op.amount))
5546 def Exec(self, feedback_fn):
5547 """Execute disk grow.
5550 instance = self.instance
5552 for node in instance.all_nodes:
5553 self.cfg.SetDiskID(disk, node)
5554 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5555 msg = result.RemoteFailMsg()
5557 raise errors.OpExecError("Grow request failed to node %s: %s" %
5559 disk.RecordGrow(self.op.amount)
5560 self.cfg.Update(instance)
5561 if self.op.wait_for_sync:
5562 disk_abort = not _WaitForSync(self, instance)
5564 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5565 " status.\nPlease check the instance.")
5568 class LUQueryInstanceData(NoHooksLU):
5569 """Query runtime instance data.
5572 _OP_REQP = ["instances", "static"]
5575 def ExpandNames(self):
5576 self.needed_locks = {}
5577 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5579 if not isinstance(self.op.instances, list):
5580 raise errors.OpPrereqError("Invalid argument type 'instances'")
5582 if self.op.instances:
5583 self.wanted_names = []
5584 for name in self.op.instances:
5585 full_name = self.cfg.ExpandInstanceName(name)
5586 if full_name is None:
5587 raise errors.OpPrereqError("Instance '%s' not known" % name)
5588 self.wanted_names.append(full_name)
5589 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5591 self.wanted_names = None
5592 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5594 self.needed_locks[locking.LEVEL_NODE] = []
5595 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5597 def DeclareLocks(self, level):
5598 if level == locking.LEVEL_NODE:
5599 self._LockInstancesNodes()
5601 def CheckPrereq(self):
5602 """Check prerequisites.
5604 This only checks the optional instance list against the existing names.
5607 if self.wanted_names is None:
5608 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5610 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5611 in self.wanted_names]
5614 def _ComputeDiskStatus(self, instance, snode, dev):
5615 """Compute block device status.
5618 static = self.op.static
5620 self.cfg.SetDiskID(dev, instance.primary_node)
5621 dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5622 if dev_pstatus.offline:
5625 msg = dev_pstatus.RemoteFailMsg()
5627 raise errors.OpExecError("Can't compute disk status for %s: %s" %
5628 (instance.name, msg))
5629 dev_pstatus = dev_pstatus.payload
5633 if dev.dev_type in constants.LDS_DRBD:
5634 # we change the snode then (otherwise we use the one passed in)
5635 if dev.logical_id[0] == instance.primary_node:
5636 snode = dev.logical_id[1]
5638 snode = dev.logical_id[0]
5640 if snode and not static:
5641 self.cfg.SetDiskID(dev, snode)
5642 dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5643 if dev_sstatus.offline:
5646 msg = dev_sstatus.RemoteFailMsg()
5648 raise errors.OpExecError("Can't compute disk status for %s: %s" %
5649 (instance.name, msg))
5650 dev_sstatus = dev_sstatus.payload
5655 dev_children = [self._ComputeDiskStatus(instance, snode, child)
5656 for child in dev.children]
5661 "iv_name": dev.iv_name,
5662 "dev_type": dev.dev_type,
5663 "logical_id": dev.logical_id,
5664 "physical_id": dev.physical_id,
5665 "pstatus": dev_pstatus,
5666 "sstatus": dev_sstatus,
5667 "children": dev_children,
5673 def Exec(self, feedback_fn):
5674 """Gather and return data"""
5677 cluster = self.cfg.GetClusterInfo()
5679 for instance in self.wanted_instances:
5680 if not self.op.static:
5681 remote_info = self.rpc.call_instance_info(instance.primary_node,
5683 instance.hypervisor)
5685 remote_info = remote_info.data
5686 if remote_info and "state" in remote_info:
5689 remote_state = "down"
5692 if instance.admin_up:
5695 config_state = "down"
5697 disks = [self._ComputeDiskStatus(instance, None, device)
5698 for device in instance.disks]
5701 "name": instance.name,
5702 "config_state": config_state,
5703 "run_state": remote_state,
5704 "pnode": instance.primary_node,
5705 "snodes": instance.secondary_nodes,
5707 "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5709 "hypervisor": instance.hypervisor,
5710 "network_port": instance.network_port,
5711 "hv_instance": instance.hvparams,
5712 "hv_actual": cluster.FillHV(instance),
5713 "be_instance": instance.beparams,
5714 "be_actual": cluster.FillBE(instance),
5717 result[instance.name] = idict
5722 class LUSetInstanceParams(LogicalUnit):
5723 """Modifies an instances's parameters.
5726 HPATH = "instance-modify"
5727 HTYPE = constants.HTYPE_INSTANCE
5728 _OP_REQP = ["instance_name"]
5731 def CheckArguments(self):
5732 if not hasattr(self.op, 'nics'):
5734 if not hasattr(self.op, 'disks'):
5736 if not hasattr(self.op, 'beparams'):
5737 self.op.beparams = {}
5738 if not hasattr(self.op, 'hvparams'):
5739 self.op.hvparams = {}
5740 self.op.force = getattr(self.op, "force", False)
5741 if not (self.op.nics or self.op.disks or
5742 self.op.hvparams or self.op.beparams):
5743 raise errors.OpPrereqError("No changes submitted")
5747 for disk_op, disk_dict in self.op.disks:
5748 if disk_op == constants.DDM_REMOVE:
5751 elif disk_op == constants.DDM_ADD:
5754 if not isinstance(disk_op, int):
5755 raise errors.OpPrereqError("Invalid disk index")
5756 if disk_op == constants.DDM_ADD:
5757 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5758 if mode not in constants.DISK_ACCESS_SET:
5759 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5760 size = disk_dict.get('size', None)
5762 raise errors.OpPrereqError("Required disk parameter size missing")
5765 except ValueError, err:
5766 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5768 disk_dict['size'] = size
5770 # modification of disk
5771 if 'size' in disk_dict:
5772 raise errors.OpPrereqError("Disk size change not possible, use"
5775 if disk_addremove > 1:
5776 raise errors.OpPrereqError("Only one disk add or remove operation"
5777 " supported at a time")
5781 for nic_op, nic_dict in self.op.nics:
5782 if nic_op == constants.DDM_REMOVE:
5785 elif nic_op == constants.DDM_ADD:
5788 if not isinstance(nic_op, int):
5789 raise errors.OpPrereqError("Invalid nic index")
5791 # nic_dict should be a dict
5792 nic_ip = nic_dict.get('ip', None)
5793 if nic_ip is not None:
5794 if nic_ip.lower() == constants.VALUE_NONE:
5795 nic_dict['ip'] = None
5797 if not utils.IsValidIP(nic_ip):
5798 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5800 if nic_op == constants.DDM_ADD:
5801 nic_bridge = nic_dict.get('bridge', None)
5802 if nic_bridge is None:
5803 nic_dict['bridge'] = self.cfg.GetDefBridge()
5804 nic_mac = nic_dict.get('mac', None)
5806 nic_dict['mac'] = constants.VALUE_AUTO
5808 if 'mac' in nic_dict:
5809 nic_mac = nic_dict['mac']
5810 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5811 if not utils.IsValidMac(nic_mac):
5812 raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5813 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5814 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5815 " modifying an existing nic")
5817 if nic_addremove > 1:
5818 raise errors.OpPrereqError("Only one NIC add or remove operation"
5819 " supported at a time")
5821 def ExpandNames(self):
5822 self._ExpandAndLockInstance()
5823 self.needed_locks[locking.LEVEL_NODE] = []
5824 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5826 def DeclareLocks(self, level):
5827 if level == locking.LEVEL_NODE:
5828 self._LockInstancesNodes()
5830 def BuildHooksEnv(self):
5833 This runs on the master, primary and secondaries.
5837 if constants.BE_MEMORY in self.be_new:
5838 args['memory'] = self.be_new[constants.BE_MEMORY]
5839 if constants.BE_VCPUS in self.be_new:
5840 args['vcpus'] = self.be_new[constants.BE_VCPUS]
5841 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5842 # information at all.
5845 nic_override = dict(self.op.nics)
5846 for idx, nic in enumerate(self.instance.nics):
5847 if idx in nic_override:
5848 this_nic_override = nic_override[idx]
5850 this_nic_override = {}
5851 if 'ip' in this_nic_override:
5852 ip = this_nic_override['ip']
5855 if 'bridge' in this_nic_override:
5856 bridge = this_nic_override['bridge']
5859 if 'mac' in this_nic_override:
5860 mac = this_nic_override['mac']
5863 args['nics'].append((ip, bridge, mac))
5864 if constants.DDM_ADD in nic_override:
5865 ip = nic_override[constants.DDM_ADD].get('ip', None)
5866 bridge = nic_override[constants.DDM_ADD]['bridge']
5867 mac = nic_override[constants.DDM_ADD]['mac']
5868 args['nics'].append((ip, bridge, mac))
5869 elif constants.DDM_REMOVE in nic_override:
5870 del args['nics'][-1]
5872 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5873 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5876 def CheckPrereq(self):
5877 """Check prerequisites.
5879 This only checks the instance list against the existing names.
5882 force = self.force = self.op.force
5884 # checking the new params on the primary/secondary nodes
5886 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5887 assert self.instance is not None, \
5888 "Cannot retrieve locked instance %s" % self.op.instance_name
5889 pnode = instance.primary_node
5890 nodelist = list(instance.all_nodes)
5892 # hvparams processing
5893 if self.op.hvparams:
5894 i_hvdict = copy.deepcopy(instance.hvparams)
5895 for key, val in self.op.hvparams.iteritems():
5896 if val == constants.VALUE_DEFAULT:
5903 cluster = self.cfg.GetClusterInfo()
5904 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5905 hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5908 hypervisor.GetHypervisor(
5909 instance.hypervisor).CheckParameterSyntax(hv_new)
5910 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5911 self.hv_new = hv_new # the new actual values
5912 self.hv_inst = i_hvdict # the new dict (without defaults)
5914 self.hv_new = self.hv_inst = {}
5916 # beparams processing
5917 if self.op.beparams:
5918 i_bedict = copy.deepcopy(instance.beparams)
5919 for key, val in self.op.beparams.iteritems():
5920 if val == constants.VALUE_DEFAULT:
5927 cluster = self.cfg.GetClusterInfo()
5928 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5929 be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5931 self.be_new = be_new # the new actual values
5932 self.be_inst = i_bedict # the new dict (without defaults)
5934 self.be_new = self.be_inst = {}
5938 if constants.BE_MEMORY in self.op.beparams and not self.force:
5939 mem_check_list = [pnode]
5940 if be_new[constants.BE_AUTO_BALANCE]:
5941 # either we changed auto_balance to yes or it was from before
5942 mem_check_list.extend(instance.secondary_nodes)
5943 instance_info = self.rpc.call_instance_info(pnode, instance.name,
5944 instance.hypervisor)
5945 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5946 instance.hypervisor)
5947 if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5948 # Assume the primary node is unreachable and go ahead
5949 self.warn.append("Can't get info from primary node %s" % pnode)
5951 if not instance_info.failed and instance_info.data:
5952 current_mem = int(instance_info.data['memory'])
5954 # Assume instance not running
5955 # (there is a slight race condition here, but it's not very probable,
5956 # and we have no other way to check)
5958 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5959 nodeinfo[pnode].data['memory_free'])
5961 raise errors.OpPrereqError("This change will prevent the instance"
5962 " from starting, due to %d MB of memory"
5963 " missing on its primary node" % miss_mem)
5965 if be_new[constants.BE_AUTO_BALANCE]:
5966 for node, nres in nodeinfo.iteritems():
5967 if node not in instance.secondary_nodes:
5969 if nres.failed or not isinstance(nres.data, dict):
5970 self.warn.append("Can't get info from secondary node %s" % node)
5971 elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5972 self.warn.append("Not enough memory to failover instance to"
5973 " secondary node %s" % node)
5976 for nic_op, nic_dict in self.op.nics:
5977 if nic_op == constants.DDM_REMOVE:
5978 if not instance.nics:
5979 raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5981 if nic_op != constants.DDM_ADD:
5983 if nic_op < 0 or nic_op >= len(instance.nics):
5984 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5986 (nic_op, len(instance.nics)))
5987 if 'bridge' in nic_dict:
5988 nic_bridge = nic_dict['bridge']
5989 if nic_bridge is None:
5990 raise errors.OpPrereqError('Cannot set the nic bridge to None')
5991 if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5992 msg = ("Bridge '%s' doesn't exist on one of"
5993 " the instance nodes" % nic_bridge)
5995 self.warn.append(msg)
5997 raise errors.OpPrereqError(msg)
5998 if 'mac' in nic_dict:
5999 nic_mac = nic_dict['mac']
6001 raise errors.OpPrereqError('Cannot set the nic mac to None')
6002 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6003 # otherwise generate the mac
6004 nic_dict['mac'] = self.cfg.GenerateMAC()
6006 # or validate/reserve the current one
6007 if self.cfg.IsMacInUse(nic_mac):
6008 raise errors.OpPrereqError("MAC address %s already in use"
6009 " in cluster" % nic_mac)
6012 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6013 raise errors.OpPrereqError("Disk operations not supported for"
6014 " diskless instances")
6015 for disk_op, disk_dict in self.op.disks:
6016 if disk_op == constants.DDM_REMOVE:
6017 if len(instance.disks) == 1:
6018 raise errors.OpPrereqError("Cannot remove the last disk of"
6020 ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6021 ins_l = ins_l[pnode]
6022 if ins_l.failed or not isinstance(ins_l.data, list):
6023 raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6024 if instance.name in ins_l.data:
6025 raise errors.OpPrereqError("Instance is running, can't remove"
6028 if (disk_op == constants.DDM_ADD and
6029 len(instance.nics) >= constants.MAX_DISKS):
6030 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6031 " add more" % constants.MAX_DISKS)
6032 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6034 if disk_op < 0 or disk_op >= len(instance.disks):
6035 raise errors.OpPrereqError("Invalid disk index %s, valid values"
6037 (disk_op, len(instance.disks)))
6041 def Exec(self, feedback_fn):
6042 """Modifies an instance.
6044 All parameters take effect only at the next restart of the instance.
6047 # Process here the warnings from CheckPrereq, as we don't have a
6048 # feedback_fn there.
6049 for warn in self.warn:
6050 feedback_fn("WARNING: %s" % warn)
6053 instance = self.instance
6055 for disk_op, disk_dict in self.op.disks:
6056 if disk_op == constants.DDM_REMOVE:
6057 # remove the last disk
6058 device = instance.disks.pop()
6059 device_idx = len(instance.disks)
6060 for node, disk in device.ComputeNodeTree(instance.primary_node):
6061 self.cfg.SetDiskID(disk, node)
6062 msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6064 self.LogWarning("Could not remove disk/%d on node %s: %s,"
6065 " continuing anyway", device_idx, node, msg)
6066 result.append(("disk/%d" % device_idx, "remove"))
6067 elif disk_op == constants.DDM_ADD:
6069 if instance.disk_template == constants.DT_FILE:
6070 file_driver, file_path = instance.disks[0].logical_id
6071 file_path = os.path.dirname(file_path)
6073 file_driver = file_path = None
6074 disk_idx_base = len(instance.disks)
6075 new_disk = _GenerateDiskTemplate(self,
6076 instance.disk_template,
6077 instance.name, instance.primary_node,
6078 instance.secondary_nodes,
6083 instance.disks.append(new_disk)
6084 info = _GetInstanceInfoText(instance)
6086 logging.info("Creating volume %s for instance %s",
6087 new_disk.iv_name, instance.name)
6088 # Note: this needs to be kept in sync with _CreateDisks
6090 for node in instance.all_nodes:
6091 f_create = node == instance.primary_node
6093 _CreateBlockDev(self, node, instance, new_disk,
6094 f_create, info, f_create)
6095 except errors.OpExecError, err:
6096 self.LogWarning("Failed to create volume %s (%s) on"
6098 new_disk.iv_name, new_disk, node, err)
6099 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6100 (new_disk.size, new_disk.mode)))
6102 # change a given disk
6103 instance.disks[disk_op].mode = disk_dict['mode']
6104 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6106 for nic_op, nic_dict in self.op.nics:
6107 if nic_op == constants.DDM_REMOVE:
6108 # remove the last nic
6109 del instance.nics[-1]
6110 result.append(("nic.%d" % len(instance.nics), "remove"))
6111 elif nic_op == constants.DDM_ADD:
6112 # mac and bridge should be set, by now
6113 mac = nic_dict['mac']
6114 bridge = nic_dict['bridge']
6115 new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6117 instance.nics.append(new_nic)
6118 result.append(("nic.%d" % (len(instance.nics) - 1),
6119 "add:mac=%s,ip=%s,bridge=%s" %
6120 (new_nic.mac, new_nic.ip, new_nic.bridge)))
6122 # change a given nic
6123 for key in 'mac', 'ip', 'bridge':
6125 setattr(instance.nics[nic_op], key, nic_dict[key])
6126 result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6129 if self.op.hvparams:
6130 instance.hvparams = self.hv_inst
6131 for key, val in self.op.hvparams.iteritems():
6132 result.append(("hv/%s" % key, val))
6135 if self.op.beparams:
6136 instance.beparams = self.be_inst
6137 for key, val in self.op.beparams.iteritems():
6138 result.append(("be/%s" % key, val))
6140 self.cfg.Update(instance)
6145 class LUQueryExports(NoHooksLU):
6146 """Query the exports list
6149 _OP_REQP = ['nodes']
6152 def ExpandNames(self):
6153 self.needed_locks = {}
6154 self.share_locks[locking.LEVEL_NODE] = 1
6155 if not self.op.nodes:
6156 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6158 self.needed_locks[locking.LEVEL_NODE] = \
6159 _GetWantedNodes(self, self.op.nodes)
6161 def CheckPrereq(self):
6162 """Check prerequisites.
6165 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6167 def Exec(self, feedback_fn):
6168 """Compute the list of all the exported system images.
6171 @return: a dictionary with the structure node->(export-list)
6172 where export-list is a list of the instances exported on
6176 rpcresult = self.rpc.call_export_list(self.nodes)
6178 for node in rpcresult:
6179 if rpcresult[node].failed:
6180 result[node] = False
6182 result[node] = rpcresult[node].data
6187 class LUExportInstance(LogicalUnit):
6188 """Export an instance to an image in the cluster.
6191 HPATH = "instance-export"
6192 HTYPE = constants.HTYPE_INSTANCE
6193 _OP_REQP = ["instance_name", "target_node", "shutdown"]
6196 def ExpandNames(self):
6197 self._ExpandAndLockInstance()
6198 # FIXME: lock only instance primary and destination node
6200 # Sad but true, for now we have do lock all nodes, as we don't know where
6201 # the previous export might be, and and in this LU we search for it and
6202 # remove it from its current node. In the future we could fix this by:
6203 # - making a tasklet to search (share-lock all), then create the new one,
6204 # then one to remove, after
6205 # - removing the removal operation altoghether
6206 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6208 def DeclareLocks(self, level):
6209 """Last minute lock declaration."""
6210 # All nodes are locked anyway, so nothing to do here.
6212 def BuildHooksEnv(self):
6215 This will run on the master, primary node and target node.
6219 "EXPORT_NODE": self.op.target_node,
6220 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6222 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6223 nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6224 self.op.target_node]
6227 def CheckPrereq(self):
6228 """Check prerequisites.
6230 This checks that the instance and node names are valid.
6233 instance_name = self.op.instance_name
6234 self.instance = self.cfg.GetInstanceInfo(instance_name)
6235 assert self.instance is not None, \
6236 "Cannot retrieve locked instance %s" % self.op.instance_name
6237 _CheckNodeOnline(self, self.instance.primary_node)
6239 self.dst_node = self.cfg.GetNodeInfo(
6240 self.cfg.ExpandNodeName(self.op.target_node))
6242 if self.dst_node is None:
6243 # This is wrong node name, not a non-locked node
6244 raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6245 _CheckNodeOnline(self, self.dst_node.name)
6246 _CheckNodeNotDrained(self, self.dst_node.name)
6248 # instance disk type verification
6249 for disk in self.instance.disks:
6250 if disk.dev_type == constants.LD_FILE:
6251 raise errors.OpPrereqError("Export not supported for instances with"
6252 " file-based disks")
6254 def Exec(self, feedback_fn):
6255 """Export an instance to an image in the cluster.
6258 instance = self.instance
6259 dst_node = self.dst_node
6260 src_node = instance.primary_node
6261 if self.op.shutdown:
6262 # shutdown the instance, but not the disks
6263 result = self.rpc.call_instance_shutdown(src_node, instance)
6264 msg = result.RemoteFailMsg()
6266 raise errors.OpExecError("Could not shutdown instance %s on"
6268 (instance.name, src_node, msg))
6270 vgname = self.cfg.GetVGName()
6274 # set the disks ID correctly since call_instance_start needs the
6275 # correct drbd minor to create the symlinks
6276 for disk in instance.disks:
6277 self.cfg.SetDiskID(disk, src_node)
6280 for disk in instance.disks:
6281 # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6282 new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6283 if new_dev_name.failed or not new_dev_name.data:
6284 self.LogWarning("Could not snapshot block device %s on node %s",
6285 disk.logical_id[1], src_node)
6286 snap_disks.append(False)
6288 new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6289 logical_id=(vgname, new_dev_name.data),
6290 physical_id=(vgname, new_dev_name.data),
6291 iv_name=disk.iv_name)
6292 snap_disks.append(new_dev)
6295 if self.op.shutdown and instance.admin_up:
6296 result = self.rpc.call_instance_start(src_node, instance, None, None)
6297 msg = result.RemoteFailMsg()
6299 _ShutdownInstanceDisks(self, instance)
6300 raise errors.OpExecError("Could not start instance: %s" % msg)
6302 # TODO: check for size
6304 cluster_name = self.cfg.GetClusterName()
6305 for idx, dev in enumerate(snap_disks):
6307 result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6308 instance, cluster_name, idx)
6309 if result.failed or not result.data:
6310 self.LogWarning("Could not export block device %s from node %s to"
6311 " node %s", dev.logical_id[1], src_node,
6313 msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6315 self.LogWarning("Could not remove snapshot block device %s from node"
6316 " %s: %s", dev.logical_id[1], src_node, msg)
6318 result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6319 if result.failed or not result.data:
6320 self.LogWarning("Could not finalize export for instance %s on node %s",
6321 instance.name, dst_node.name)
6323 nodelist = self.cfg.GetNodeList()
6324 nodelist.remove(dst_node.name)
6326 # on one-node clusters nodelist will be empty after the removal
6327 # if we proceed the backup would be removed because OpQueryExports
6328 # substitutes an empty list with the full cluster node list.
6330 exportlist = self.rpc.call_export_list(nodelist)
6331 for node in exportlist:
6332 if exportlist[node].failed:
6334 if instance.name in exportlist[node].data:
6335 if not self.rpc.call_export_remove(node, instance.name):
6336 self.LogWarning("Could not remove older export for instance %s"
6337 " on node %s", instance.name, node)
6340 class LURemoveExport(NoHooksLU):
6341 """Remove exports related to the named instance.
6344 _OP_REQP = ["instance_name"]
6347 def ExpandNames(self):
6348 self.needed_locks = {}
6349 # We need all nodes to be locked in order for RemoveExport to work, but we
6350 # don't need to lock the instance itself, as nothing will happen to it (and
6351 # we can remove exports also for a removed instance)
6352 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6354 def CheckPrereq(self):
6355 """Check prerequisites.
6359 def Exec(self, feedback_fn):
6360 """Remove any export.
6363 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6364 # If the instance was not found we'll try with the name that was passed in.
6365 # This will only work if it was an FQDN, though.
6367 if not instance_name:
6369 instance_name = self.op.instance_name
6371 exportlist = self.rpc.call_export_list(self.acquired_locks[
6372 locking.LEVEL_NODE])
6374 for node in exportlist:
6375 if exportlist[node].failed:
6376 self.LogWarning("Failed to query node %s, continuing" % node)
6378 if instance_name in exportlist[node].data:
6380 result = self.rpc.call_export_remove(node, instance_name)
6381 if result.failed or not result.data:
6382 logging.error("Could not remove export for instance %s"
6383 " on node %s", instance_name, node)
6385 if fqdn_warn and not found:
6386 feedback_fn("Export not found. If trying to remove an export belonging"
6387 " to a deleted instance please use its Fully Qualified"
6391 class TagsLU(NoHooksLU):
6394 This is an abstract class which is the parent of all the other tags LUs.
6398 def ExpandNames(self):
6399 self.needed_locks = {}
6400 if self.op.kind == constants.TAG_NODE:
6401 name = self.cfg.ExpandNodeName(self.op.name)
6403 raise errors.OpPrereqError("Invalid node name (%s)" %
6406 self.needed_locks[locking.LEVEL_NODE] = name
6407 elif self.op.kind == constants.TAG_INSTANCE:
6408 name = self.cfg.ExpandInstanceName(self.op.name)
6410 raise errors.OpPrereqError("Invalid instance name (%s)" %
6413 self.needed_locks[locking.LEVEL_INSTANCE] = name
6415 def CheckPrereq(self):
6416 """Check prerequisites.
6419 if self.op.kind == constants.TAG_CLUSTER:
6420 self.target = self.cfg.GetClusterInfo()
6421 elif self.op.kind == constants.TAG_NODE:
6422 self.target = self.cfg.GetNodeInfo(self.op.name)
6423 elif self.op.kind == constants.TAG_INSTANCE:
6424 self.target = self.cfg.GetInstanceInfo(self.op.name)
6426 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6430 class LUGetTags(TagsLU):
6431 """Returns the tags of a given object.
6434 _OP_REQP = ["kind", "name"]
6437 def Exec(self, feedback_fn):
6438 """Returns the tag list.
6441 return list(self.target.GetTags())
6444 class LUSearchTags(NoHooksLU):
6445 """Searches the tags for a given pattern.
6448 _OP_REQP = ["pattern"]
6451 def ExpandNames(self):
6452 self.needed_locks = {}
6454 def CheckPrereq(self):
6455 """Check prerequisites.
6457 This checks the pattern passed for validity by compiling it.
6461 self.re = re.compile(self.op.pattern)
6462 except re.error, err:
6463 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6464 (self.op.pattern, err))
6466 def Exec(self, feedback_fn):
6467 """Returns the tag list.
6471 tgts = [("/cluster", cfg.GetClusterInfo())]
6472 ilist = cfg.GetAllInstancesInfo().values()
6473 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6474 nlist = cfg.GetAllNodesInfo().values()
6475 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6477 for path, target in tgts:
6478 for tag in target.GetTags():
6479 if self.re.search(tag):
6480 results.append((path, tag))
6484 class LUAddTags(TagsLU):
6485 """Sets a tag on a given object.
6488 _OP_REQP = ["kind", "name", "tags"]
6491 def CheckPrereq(self):
6492 """Check prerequisites.
6494 This checks the type and length of the tag name and value.
6497 TagsLU.CheckPrereq(self)
6498 for tag in self.op.tags:
6499 objects.TaggableObject.ValidateTag(tag)
6501 def Exec(self, feedback_fn):
6506 for tag in self.op.tags:
6507 self.target.AddTag(tag)
6508 except errors.TagError, err:
6509 raise errors.OpExecError("Error while setting tag: %s" % str(err))
6511 self.cfg.Update(self.target)
6512 except errors.ConfigurationError:
6513 raise errors.OpRetryError("There has been a modification to the"
6514 " config file and the operation has been"
6515 " aborted. Please retry.")
6518 class LUDelTags(TagsLU):
6519 """Delete a list of tags from a given object.
6522 _OP_REQP = ["kind", "name", "tags"]
6525 def CheckPrereq(self):
6526 """Check prerequisites.
6528 This checks that we have the given tag.
6531 TagsLU.CheckPrereq(self)
6532 for tag in self.op.tags:
6533 objects.TaggableObject.ValidateTag(tag)
6534 del_tags = frozenset(self.op.tags)
6535 cur_tags = self.target.GetTags()
6536 if not del_tags <= cur_tags:
6537 diff_tags = del_tags - cur_tags
6538 diff_names = ["'%s'" % tag for tag in diff_tags]
6540 raise errors.OpPrereqError("Tag(s) %s not found" %
6541 (",".join(diff_names)))
6543 def Exec(self, feedback_fn):
6544 """Remove the tag from the object.
6547 for tag in self.op.tags:
6548 self.target.RemoveTag(tag)
6550 self.cfg.Update(self.target)
6551 except errors.ConfigurationError:
6552 raise errors.OpRetryError("There has been a modification to the"
6553 " config file and the operation has been"
6554 " aborted. Please retry.")
6557 class LUTestDelay(NoHooksLU):
6558 """Sleep for a specified amount of time.
6560 This LU sleeps on the master and/or nodes for a specified amount of
6564 _OP_REQP = ["duration", "on_master", "on_nodes"]
6567 def ExpandNames(self):
6568 """Expand names and set required locks.
6570 This expands the node list, if any.
6573 self.needed_locks = {}
6574 if self.op.on_nodes:
6575 # _GetWantedNodes can be used here, but is not always appropriate to use
6576 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6578 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6579 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6581 def CheckPrereq(self):
6582 """Check prerequisites.
6586 def Exec(self, feedback_fn):
6587 """Do the actual sleep.
6590 if self.op.on_master:
6591 if not utils.TestDelay(self.op.duration):
6592 raise errors.OpExecError("Error during master delay test")
6593 if self.op.on_nodes:
6594 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6596 raise errors.OpExecError("Complete failure from rpc call")
6597 for node, node_result in result.items():
6599 if not node_result.data:
6600 raise errors.OpExecError("Failure during rpc call to node %s,"
6601 " result: %s" % (node, node_result.data))
6604 class IAllocator(object):
6605 """IAllocator framework.
6607 An IAllocator instance has three sets of attributes:
6608 - cfg that is needed to query the cluster
6609 - input data (all members of the _KEYS class attribute are required)
6610 - four buffer attributes (in|out_data|text), that represent the
6611 input (to the external script) in text and data structure format,
6612 and the output from it, again in two formats
6613 - the result variables from the script (success, info, nodes) for
6618 "mem_size", "disks", "disk_template",
6619 "os", "tags", "nics", "vcpus", "hypervisor",
6625 def __init__(self, lu, mode, name, **kwargs):
6627 # init buffer variables
6628 self.in_text = self.out_text = self.in_data = self.out_data = None
6629 # init all input fields so that pylint is happy
6632 self.mem_size = self.disks = self.disk_template = None
6633 self.os = self.tags = self.nics = self.vcpus = None
6634 self.hypervisor = None
6635 self.relocate_from = None
6637 self.required_nodes = None
6638 # init result fields
6639 self.success = self.info = self.nodes = None
6640 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6641 keyset = self._ALLO_KEYS
6642 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6643 keyset = self._RELO_KEYS
6645 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6646 " IAllocator" % self.mode)
6648 if key not in keyset:
6649 raise errors.ProgrammerError("Invalid input parameter '%s' to"
6650 " IAllocator" % key)
6651 setattr(self, key, kwargs[key])
6653 if key not in kwargs:
6654 raise errors.ProgrammerError("Missing input parameter '%s' to"
6655 " IAllocator" % key)
6656 self._BuildInputData()
6658 def _ComputeClusterData(self):
6659 """Compute the generic allocator input data.
6661 This is the data that is independent of the actual operation.
6665 cluster_info = cfg.GetClusterInfo()
6668 "version": constants.IALLOCATOR_VERSION,
6669 "cluster_name": cfg.GetClusterName(),
6670 "cluster_tags": list(cluster_info.GetTags()),
6671 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6672 # we don't have job IDs
6674 iinfo = cfg.GetAllInstancesInfo().values()
6675 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6679 node_list = cfg.GetNodeList()
6681 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6682 hypervisor_name = self.hypervisor
6683 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6684 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6686 node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6688 node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6689 cluster_info.enabled_hypervisors)
6690 for nname, nresult in node_data.items():
6691 # first fill in static (config-based) values
6692 ninfo = cfg.GetNodeInfo(nname)
6694 "tags": list(ninfo.GetTags()),
6695 "primary_ip": ninfo.primary_ip,
6696 "secondary_ip": ninfo.secondary_ip,
6697 "offline": ninfo.offline,
6698 "drained": ninfo.drained,
6699 "master_candidate": ninfo.master_candidate,
6702 if not ninfo.offline:
6704 if not isinstance(nresult.data, dict):
6705 raise errors.OpExecError("Can't get data for node %s" % nname)
6706 remote_info = nresult.data
6707 for attr in ['memory_total', 'memory_free', 'memory_dom0',
6708 'vg_size', 'vg_free', 'cpu_total']:
6709 if attr not in remote_info:
6710 raise errors.OpExecError("Node '%s' didn't return attribute"
6711 " '%s'" % (nname, attr))
6713 remote_info[attr] = int(remote_info[attr])
6714 except ValueError, err:
6715 raise errors.OpExecError("Node '%s' returned invalid value"
6716 " for '%s': %s" % (nname, attr, err))
6717 # compute memory used by primary instances
6718 i_p_mem = i_p_up_mem = 0
6719 for iinfo, beinfo in i_list:
6720 if iinfo.primary_node == nname:
6721 i_p_mem += beinfo[constants.BE_MEMORY]
6722 if iinfo.name not in node_iinfo[nname].data:
6725 i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6726 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6727 remote_info['memory_free'] -= max(0, i_mem_diff)
6730 i_p_up_mem += beinfo[constants.BE_MEMORY]
6732 # compute memory used by instances
6734 "total_memory": remote_info['memory_total'],
6735 "reserved_memory": remote_info['memory_dom0'],
6736 "free_memory": remote_info['memory_free'],
6737 "total_disk": remote_info['vg_size'],
6738 "free_disk": remote_info['vg_free'],
6739 "total_cpus": remote_info['cpu_total'],
6740 "i_pri_memory": i_p_mem,
6741 "i_pri_up_memory": i_p_up_mem,
6745 node_results[nname] = pnr
6746 data["nodes"] = node_results
6750 for iinfo, beinfo in i_list:
6751 nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6752 for n in iinfo.nics]
6754 "tags": list(iinfo.GetTags()),
6755 "admin_up": iinfo.admin_up,
6756 "vcpus": beinfo[constants.BE_VCPUS],
6757 "memory": beinfo[constants.BE_MEMORY],
6759 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6761 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6762 "disk_template": iinfo.disk_template,
6763 "hypervisor": iinfo.hypervisor,
6765 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6767 instance_data[iinfo.name] = pir
6769 data["instances"] = instance_data
6773 def _AddNewInstance(self):
6774 """Add new instance data to allocator structure.
6776 This in combination with _AllocatorGetClusterData will create the
6777 correct structure needed as input for the allocator.
6779 The checks for the completeness of the opcode must have already been
6785 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6787 if self.disk_template in constants.DTS_NET_MIRROR:
6788 self.required_nodes = 2
6790 self.required_nodes = 1
6794 "disk_template": self.disk_template,
6797 "vcpus": self.vcpus,
6798 "memory": self.mem_size,
6799 "disks": self.disks,
6800 "disk_space_total": disk_space,
6802 "required_nodes": self.required_nodes,
6804 data["request"] = request
6806 def _AddRelocateInstance(self):
6807 """Add relocate instance data to allocator structure.
6809 This in combination with _IAllocatorGetClusterData will create the
6810 correct structure needed as input for the allocator.
6812 The checks for the completeness of the opcode must have already been
6816 instance = self.lu.cfg.GetInstanceInfo(self.name)
6817 if instance is None:
6818 raise errors.ProgrammerError("Unknown instance '%s' passed to"
6819 " IAllocator" % self.name)
6821 if instance.disk_template not in constants.DTS_NET_MIRROR:
6822 raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6824 if len(instance.secondary_nodes) != 1:
6825 raise errors.OpPrereqError("Instance has not exactly one secondary node")
6827 self.required_nodes = 1
6828 disk_sizes = [{'size': disk.size} for disk in instance.disks]
6829 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6834 "disk_space_total": disk_space,
6835 "required_nodes": self.required_nodes,
6836 "relocate_from": self.relocate_from,
6838 self.in_data["request"] = request
6840 def _BuildInputData(self):
6841 """Build input data structures.
6844 self._ComputeClusterData()
6846 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6847 self._AddNewInstance()
6849 self._AddRelocateInstance()
6851 self.in_text = serializer.Dump(self.in_data)
6853 def Run(self, name, validate=True, call_fn=None):
6854 """Run an instance allocator and return the results.
6858 call_fn = self.lu.rpc.call_iallocator_runner
6861 result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6864 if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6865 raise errors.OpExecError("Invalid result from master iallocator runner")
6867 rcode, stdout, stderr, fail = result.data
6869 if rcode == constants.IARUN_NOTFOUND:
6870 raise errors.OpExecError("Can't find allocator '%s'" % name)
6871 elif rcode == constants.IARUN_FAILURE:
6872 raise errors.OpExecError("Instance allocator call failed: %s,"
6873 " output: %s" % (fail, stdout+stderr))
6874 self.out_text = stdout
6876 self._ValidateResult()
6878 def _ValidateResult(self):
6879 """Process the allocator results.
6881 This will process and if successful save the result in
6882 self.out_data and the other parameters.
6886 rdict = serializer.Load(self.out_text)
6887 except Exception, err:
6888 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6890 if not isinstance(rdict, dict):
6891 raise errors.OpExecError("Can't parse iallocator results: not a dict")
6893 for key in "success", "info", "nodes":
6894 if key not in rdict:
6895 raise errors.OpExecError("Can't parse iallocator results:"
6896 " missing key '%s'" % key)
6897 setattr(self, key, rdict[key])
6899 if not isinstance(rdict["nodes"], list):
6900 raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6902 self.out_data = rdict
6905 class LUTestAllocator(NoHooksLU):
6906 """Run allocator tests.
6908 This LU runs the allocator tests
6911 _OP_REQP = ["direction", "mode", "name"]
6913 def CheckPrereq(self):
6914 """Check prerequisites.
6916 This checks the opcode parameters depending on the director and mode test.
6919 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6920 for attr in ["name", "mem_size", "disks", "disk_template",
6921 "os", "tags", "nics", "vcpus"]:
6922 if not hasattr(self.op, attr):
6923 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6925 iname = self.cfg.ExpandInstanceName(self.op.name)
6926 if iname is not None:
6927 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6929 if not isinstance(self.op.nics, list):
6930 raise errors.OpPrereqError("Invalid parameter 'nics'")
6931 for row in self.op.nics:
6932 if (not isinstance(row, dict) or
6935 "bridge" not in row):
6936 raise errors.OpPrereqError("Invalid contents of the"
6937 " 'nics' parameter")
6938 if not isinstance(self.op.disks, list):
6939 raise errors.OpPrereqError("Invalid parameter 'disks'")
6940 for row in self.op.disks:
6941 if (not isinstance(row, dict) or
6942 "size" not in row or
6943 not isinstance(row["size"], int) or
6944 "mode" not in row or
6945 row["mode"] not in ['r', 'w']):
6946 raise errors.OpPrereqError("Invalid contents of the"
6947 " 'disks' parameter")
6948 if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6949 self.op.hypervisor = self.cfg.GetHypervisorType()
6950 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6951 if not hasattr(self.op, "name"):
6952 raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6953 fname = self.cfg.ExpandInstanceName(self.op.name)
6955 raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6957 self.op.name = fname
6958 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6960 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6963 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6964 if not hasattr(self.op, "allocator") or self.op.allocator is None:
6965 raise errors.OpPrereqError("Missing allocator name")
6966 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6967 raise errors.OpPrereqError("Wrong allocator test '%s'" %
6970 def Exec(self, feedback_fn):
6971 """Run the allocator test.
6974 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6975 ial = IAllocator(self,
6978 mem_size=self.op.mem_size,
6979 disks=self.op.disks,
6980 disk_template=self.op.disk_template,
6984 vcpus=self.op.vcpus,
6985 hypervisor=self.op.hypervisor,
6988 ial = IAllocator(self,
6991 relocate_from=list(self.relocate_from),
6994 if self.op.direction == constants.IALLOCATOR_DIR_IN:
6995 result = ial.in_text
6997 ial.Run(self.op.allocator, validate=False)
6998 result = ial.out_text