4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0613,W0201
37 from ganeti import ssh
38 from ganeti import utils
39 from ganeti import errors
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import constants
43 from ganeti import objects
44 from ganeti import opcodes
45 from ganeti import serializer
46 from ganeti import ssconf
49 class LogicalUnit(object):
50 """Logical Unit base class.
52 Subclasses must follow these rules:
53 - implement ExpandNames
54 - implement CheckPrereq
56 - implement BuildHooksEnv
57 - redefine HPATH and HTYPE
58 - optionally redefine their run requirements:
59 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61 Note that all commands require root permissions.
69 def __init__(self, processor, op, context, rpc):
70 """Constructor for LogicalUnit.
72 This needs to be overriden in derived classes in order to check op
78 self.cfg = context.cfg
79 self.context = context
81 # Dicts used to declare locking needs to mcpu
82 self.needed_locks = None
83 self.acquired_locks = {}
84 self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 self.remove_locks = {}
87 # Used to force good behavior when calling helper functions
88 self.recalculate_locks = {}
91 self.LogWarning = processor.LogWarning
92 self.LogInfo = processor.LogInfo
94 for attr_name in self._OP_REQP:
95 attr_val = getattr(op, attr_name, None)
97 raise errors.OpPrereqError("Required parameter '%s' missing" %
102 """Returns the SshRunner object
106 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
109 ssh = property(fget=__GetSSH)
111 def CheckArguments(self):
112 """Check syntactic validity for the opcode arguments.
114 This method is for doing a simple syntactic check and ensure
115 validity of opcode parameters, without any cluster-related
116 checks. While the same can be accomplished in ExpandNames and/or
117 CheckPrereq, doing these separate is better because:
119 - ExpandNames is left as as purely a lock-related function
120 - CheckPrereq is run after we have aquired locks (and possible
123 The function is allowed to change the self.op attribute so that
124 later methods can no longer worry about missing parameters.
129 def ExpandNames(self):
130 """Expand names for this LU.
132 This method is called before starting to execute the opcode, and it should
133 update all the parameters of the opcode to their canonical form (e.g. a
134 short node name must be fully expanded after this method has successfully
135 completed). This way locking, hooks, logging, ecc. can work correctly.
137 LUs which implement this method must also populate the self.needed_locks
138 member, as a dict with lock levels as keys, and a list of needed lock names
141 - use an empty dict if you don't need any lock
142 - if you don't need any lock at a particular level omit that level
143 - don't put anything for the BGL level
144 - if you want all locks at a level use locking.ALL_SET as a value
146 If you need to share locks (rather than acquire them exclusively) at one
147 level you can modify self.share_locks, setting a true value (usually 1) for
148 that level. By default locks are not shared.
152 # Acquire all nodes and one instance
153 self.needed_locks = {
154 locking.LEVEL_NODE: locking.ALL_SET,
155 locking.LEVEL_INSTANCE: ['instance1.example.tld'],
157 # Acquire just two nodes
158 self.needed_locks = {
159 locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
162 self.needed_locks = {} # No, you can't leave it to the default value None
165 # The implementation of this method is mandatory only if the new LU is
166 # concurrent, so that old LUs don't need to be changed all at the same
169 self.needed_locks = {} # Exclusive LUs don't need locks.
171 raise NotImplementedError
173 def DeclareLocks(self, level):
174 """Declare LU locking needs for a level
176 While most LUs can just declare their locking needs at ExpandNames time,
177 sometimes there's the need to calculate some locks after having acquired
178 the ones before. This function is called just before acquiring locks at a
179 particular level, but after acquiring the ones at lower levels, and permits
180 such calculations. It can be used to modify self.needed_locks, and by
181 default it does nothing.
183 This function is only called if you have something already set in
184 self.needed_locks for the level.
186 @param level: Locking level which is going to be locked
187 @type level: member of ganeti.locking.LEVELS
191 def CheckPrereq(self):
192 """Check prerequisites for this LU.
194 This method should check that the prerequisites for the execution
195 of this LU are fulfilled. It can do internode communication, but
196 it should be idempotent - no cluster or system changes are
199 The method should raise errors.OpPrereqError in case something is
200 not fulfilled. Its return value is ignored.
202 This method should also update all the parameters of the opcode to
203 their canonical form if it hasn't been done by ExpandNames before.
206 raise NotImplementedError
208 def Exec(self, feedback_fn):
211 This method should implement the actual work. It should raise
212 errors.OpExecError for failures that are somewhat dealt with in
216 raise NotImplementedError
218 def BuildHooksEnv(self):
219 """Build hooks environment for this LU.
221 This method should return a three-node tuple consisting of: a dict
222 containing the environment that will be used for running the
223 specific hook for this LU, a list of node names on which the hook
224 should run before the execution, and a list of node names on which
225 the hook should run after the execution.
227 The keys of the dict must not have 'GANETI_' prefixed as this will
228 be handled in the hooks runner. Also note additional keys will be
229 added by the hooks runner. If the LU doesn't define any
230 environment, an empty dict (and not None) should be returned.
232 No nodes should be returned as an empty list (and not None).
234 Note that if the HPATH for a LU class is None, this function will
238 raise NotImplementedError
240 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241 """Notify the LU about the results of its hooks.
243 This method is called every time a hooks phase is executed, and notifies
244 the Logical Unit about the hooks' result. The LU can then use it to alter
245 its result based on the hooks. By default the method does nothing and the
246 previous result is passed back unchanged but any LU can define it if it
247 wants to use the local cluster hook-scripts somehow.
249 @param phase: one of L{constants.HOOKS_PHASE_POST} or
250 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251 @param hook_results: the results of the multi-node hooks rpc call
252 @param feedback_fn: function used send feedback back to the caller
253 @param lu_result: the previous Exec result this LU had, or None
255 @return: the new Exec result, based on the previous result
261 def _ExpandAndLockInstance(self):
262 """Helper function to expand and lock an instance.
264 Many LUs that work on an instance take its name in self.op.instance_name
265 and need to expand it and then declare the expanded name for locking. This
266 function does it, and then updates self.op.instance_name to the expanded
267 name. It also initializes needed_locks as a dict, if this hasn't been done
271 if self.needed_locks is None:
272 self.needed_locks = {}
274 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275 "_ExpandAndLockInstance called with instance-level locks set"
276 expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277 if expanded_name is None:
278 raise errors.OpPrereqError("Instance '%s' not known" %
279 self.op.instance_name)
280 self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281 self.op.instance_name = expanded_name
283 def _LockInstancesNodes(self, primary_only=False):
284 """Helper function to declare instances' nodes for locking.
286 This function should be called after locking one or more instances to lock
287 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288 with all primary or secondary nodes for instances already locked and
289 present in self.needed_locks[locking.LEVEL_INSTANCE].
291 It should be called from DeclareLocks, and for safety only works if
292 self.recalculate_locks[locking.LEVEL_NODE] is set.
294 In the future it may grow parameters to just lock some instance's nodes, or
295 to just lock primaries or secondary nodes, if needed.
297 If should be called in DeclareLocks in a way similar to::
299 if level == locking.LEVEL_NODE:
300 self._LockInstancesNodes()
302 @type primary_only: boolean
303 @param primary_only: only lock primary nodes of locked instances
306 assert locking.LEVEL_NODE in self.recalculate_locks, \
307 "_LockInstancesNodes helper function called with no nodes to recalculate"
309 # TODO: check if we're really been called with the instance locks held
311 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312 # future we might want to have different behaviors depending on the value
313 # of self.recalculate_locks[locking.LEVEL_NODE]
315 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316 instance = self.context.cfg.GetInstanceInfo(instance_name)
317 wanted_nodes.append(instance.primary_node)
319 wanted_nodes.extend(instance.secondary_nodes)
321 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
326 del self.recalculate_locks[locking.LEVEL_NODE]
329 class NoHooksLU(LogicalUnit):
330 """Simple LU which runs no hooks.
332 This LU is intended as a parent for other LogicalUnits which will
333 run no hooks, in order to reduce duplicate code.
340 def _GetWantedNodes(lu, nodes):
341 """Returns list of checked and expanded node names.
343 @type lu: L{LogicalUnit}
344 @param lu: the logical unit on whose behalf we execute
346 @param nodes: list of node names or None for all nodes
348 @return: the list of nodes, sorted
349 @raise errors.OpProgrammerError: if the nodes parameter is wrong type
352 if not isinstance(nodes, list):
353 raise errors.OpPrereqError("Invalid argument type 'nodes'")
356 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357 " non-empty list of nodes whose name is to be expanded.")
361 node = lu.cfg.ExpandNodeName(name)
363 raise errors.OpPrereqError("No such node name '%s'" % name)
366 return utils.NiceSort(wanted)
369 def _GetWantedInstances(lu, instances):
370 """Returns list of checked and expanded instance names.
372 @type lu: L{LogicalUnit}
373 @param lu: the logical unit on whose behalf we execute
374 @type instances: list
375 @param instances: list of instance names or None for all instances
377 @return: the list of instances, sorted
378 @raise errors.OpPrereqError: if the instances parameter is wrong type
379 @raise errors.OpPrereqError: if any of the passed instances is not found
382 if not isinstance(instances, list):
383 raise errors.OpPrereqError("Invalid argument type 'instances'")
388 for name in instances:
389 instance = lu.cfg.ExpandInstanceName(name)
391 raise errors.OpPrereqError("No such instance name '%s'" % name)
392 wanted.append(instance)
395 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
399 def _CheckOutputFields(static, dynamic, selected):
400 """Checks whether all selected fields are valid.
402 @type static: L{utils.FieldSet}
403 @param static: static fields set
404 @type dynamic: L{utils.FieldSet}
405 @param dynamic: dynamic fields set
412 delta = f.NonMatching(selected)
414 raise errors.OpPrereqError("Unknown output fields selected: %s"
418 def _CheckBooleanOpField(op, name):
419 """Validates boolean opcode parameters.
421 This will ensure that an opcode parameter is either a boolean value,
422 or None (but that it always exists).
425 val = getattr(op, name, None)
426 if not (val is None or isinstance(val, bool)):
427 raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
429 setattr(op, name, val)
432 def _CheckNodeOnline(lu, node):
433 """Ensure that a given node is online.
435 @param lu: the LU on behalf of which we make the check
436 @param node: the node to check
437 @raise errors.OpPrereqError: if the node is offline
440 if lu.cfg.GetNodeInfo(node).offline:
441 raise errors.OpPrereqError("Can't use offline node %s" % node)
444 def _CheckNodeNotDrained(lu, node):
445 """Ensure that a given node is not drained.
447 @param lu: the LU on behalf of which we make the check
448 @param node: the node to check
449 @raise errors.OpPrereqError: if the node is drained
452 if lu.cfg.GetNodeInfo(node).drained:
453 raise errors.OpPrereqError("Can't use drained node %s" % node)
456 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
457 memory, vcpus, nics, disk_template, disks):
458 """Builds instance related env variables for hooks
460 This builds the hook environment from individual variables.
463 @param name: the name of the instance
464 @type primary_node: string
465 @param primary_node: the name of the instance's primary node
466 @type secondary_nodes: list
467 @param secondary_nodes: list of secondary nodes as strings
468 @type os_type: string
469 @param os_type: the name of the instance's OS
470 @type status: boolean
471 @param status: the should_run status of the instance
473 @param memory: the memory size of the instance
475 @param vcpus: the count of VCPUs the instance has
477 @param nics: list of tuples (ip, bridge, mac) representing
478 the NICs the instance has
479 @type disk_template: string
480 @param disk_template: the distk template of the instance
482 @param disks: the list of (size, mode) pairs
484 @return: the hook environment for this instance
493 "INSTANCE_NAME": name,
494 "INSTANCE_PRIMARY": primary_node,
495 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
496 "INSTANCE_OS_TYPE": os_type,
497 "INSTANCE_STATUS": str_status,
498 "INSTANCE_MEMORY": memory,
499 "INSTANCE_VCPUS": vcpus,
500 "INSTANCE_DISK_TEMPLATE": disk_template,
504 nic_count = len(nics)
505 for idx, (ip, bridge, mac) in enumerate(nics):
508 env["INSTANCE_NIC%d_IP" % idx] = ip
509 env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
510 env["INSTANCE_NIC%d_MAC" % idx] = mac
514 env["INSTANCE_NIC_COUNT"] = nic_count
517 disk_count = len(disks)
518 for idx, (size, mode) in enumerate(disks):
519 env["INSTANCE_DISK%d_SIZE" % idx] = size
520 env["INSTANCE_DISK%d_MODE" % idx] = mode
524 env["INSTANCE_DISK_COUNT"] = disk_count
529 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
530 """Builds instance related env variables for hooks from an object.
532 @type lu: L{LogicalUnit}
533 @param lu: the logical unit on whose behalf we execute
534 @type instance: L{objects.Instance}
535 @param instance: the instance for which we should build the
538 @param override: dictionary with key/values that will override
541 @return: the hook environment dictionary
544 bep = lu.cfg.GetClusterInfo().FillBE(instance)
546 'name': instance.name,
547 'primary_node': instance.primary_node,
548 'secondary_nodes': instance.secondary_nodes,
549 'os_type': instance.os,
550 'status': instance.admin_up,
551 'memory': bep[constants.BE_MEMORY],
552 'vcpus': bep[constants.BE_VCPUS],
553 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
554 'disk_template': instance.disk_template,
555 'disks': [(disk.size, disk.mode) for disk in instance.disks],
558 args.update(override)
559 return _BuildInstanceHookEnv(**args)
562 def _AdjustCandidatePool(lu):
563 """Adjust the candidate pool after node operations.
566 mod_list = lu.cfg.MaintainCandidatePool()
568 lu.LogInfo("Promoted nodes to master candidate role: %s",
569 ", ".join(node.name for node in mod_list))
570 for name in mod_list:
571 lu.context.ReaddNode(name)
572 mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
574 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
578 def _CheckInstanceBridgesExist(lu, instance):
579 """Check that the brigdes needed by an instance exist.
582 # check bridges existance
583 brlist = [nic.bridge for nic in instance.nics]
584 result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
587 raise errors.OpPrereqError("One or more target bridges %s does not"
588 " exist on destination node '%s'" %
589 (brlist, instance.primary_node))
592 class LUDestroyCluster(NoHooksLU):
593 """Logical unit for destroying the cluster.
598 def CheckPrereq(self):
599 """Check prerequisites.
601 This checks whether the cluster is empty.
603 Any errors are signalled by raising errors.OpPrereqError.
606 master = self.cfg.GetMasterNode()
608 nodelist = self.cfg.GetNodeList()
609 if len(nodelist) != 1 or nodelist[0] != master:
610 raise errors.OpPrereqError("There are still %d node(s) in"
611 " this cluster." % (len(nodelist) - 1))
612 instancelist = self.cfg.GetInstanceList()
614 raise errors.OpPrereqError("There are still %d instance(s) in"
615 " this cluster." % len(instancelist))
617 def Exec(self, feedback_fn):
618 """Destroys the cluster.
621 master = self.cfg.GetMasterNode()
622 result = self.rpc.call_node_stop_master(master, False)
625 raise errors.OpExecError("Could not disable the master role")
626 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
627 utils.CreateBackup(priv_key)
628 utils.CreateBackup(pub_key)
632 class LUVerifyCluster(LogicalUnit):
633 """Verifies the cluster status.
636 HPATH = "cluster-verify"
637 HTYPE = constants.HTYPE_CLUSTER
638 _OP_REQP = ["skip_checks"]
641 def ExpandNames(self):
642 self.needed_locks = {
643 locking.LEVEL_NODE: locking.ALL_SET,
644 locking.LEVEL_INSTANCE: locking.ALL_SET,
646 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
648 def _VerifyNode(self, nodeinfo, file_list, local_cksum,
649 node_result, feedback_fn, master_files,
651 """Run multiple tests against a node.
655 - compares ganeti version
656 - checks vg existance and size > 20G
657 - checks config file checksum
658 - checks ssh to other nodes
660 @type nodeinfo: L{objects.Node}
661 @param nodeinfo: the node to check
662 @param file_list: required list of files
663 @param local_cksum: dictionary of local files and their checksums
664 @param node_result: the results from the node
665 @param feedback_fn: function used to accumulate results
666 @param master_files: list of files that only masters should have
667 @param drbd_map: the useddrbd minors for this node, in
668 form of minor: (instance, must_exist) which correspond to instances
669 and their running status
674 # main result, node_result should be a non-empty dict
675 if not node_result or not isinstance(node_result, dict):
676 feedback_fn(" - ERROR: unable to verify node %s." % (node,))
679 # compares ganeti version
680 local_version = constants.PROTOCOL_VERSION
681 remote_version = node_result.get('version', None)
682 if not (remote_version and isinstance(remote_version, (list, tuple)) and
683 len(remote_version) == 2):
684 feedback_fn(" - ERROR: connection to %s failed" % (node))
687 if local_version != remote_version[0]:
688 feedback_fn(" - ERROR: incompatible protocol versions: master %s,"
689 " node %s %s" % (local_version, node, remote_version[0]))
692 # node seems compatible, we can actually try to look into its results
696 # full package version
697 if constants.RELEASE_VERSION != remote_version[1]:
698 feedback_fn(" - WARNING: software version mismatch: master %s,"
700 (constants.RELEASE_VERSION, node, remote_version[1]))
702 # checks vg existence and size > 20G
704 vglist = node_result.get(constants.NV_VGLIST, None)
706 feedback_fn(" - ERROR: unable to check volume groups on node %s." %
710 vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
711 constants.MIN_VG_SIZE)
713 feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
716 # checks config file checksum
718 remote_cksum = node_result.get(constants.NV_FILELIST, None)
719 if not isinstance(remote_cksum, dict):
721 feedback_fn(" - ERROR: node hasn't returned file checksum data")
723 for file_name in file_list:
724 node_is_mc = nodeinfo.master_candidate
725 must_have_file = file_name not in master_files
726 if file_name not in remote_cksum:
727 if node_is_mc or must_have_file:
729 feedback_fn(" - ERROR: file '%s' missing" % file_name)
730 elif remote_cksum[file_name] != local_cksum[file_name]:
731 if node_is_mc or must_have_file:
733 feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
735 # not candidate and this is not a must-have file
737 feedback_fn(" - ERROR: non master-candidate has old/wrong file"
740 # all good, except non-master/non-must have combination
741 if not node_is_mc and not must_have_file:
742 feedback_fn(" - ERROR: file '%s' should not exist on non master"
743 " candidates" % file_name)
747 if constants.NV_NODELIST not in node_result:
749 feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
751 if node_result[constants.NV_NODELIST]:
753 for node in node_result[constants.NV_NODELIST]:
754 feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
755 (node, node_result[constants.NV_NODELIST][node]))
757 if constants.NV_NODENETTEST not in node_result:
759 feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
761 if node_result[constants.NV_NODENETTEST]:
763 nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
765 feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
766 (node, node_result[constants.NV_NODENETTEST][node]))
768 hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
769 if isinstance(hyp_result, dict):
770 for hv_name, hv_result in hyp_result.iteritems():
771 if hv_result is not None:
772 feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
773 (hv_name, hv_result))
775 # check used drbd list
776 used_minors = node_result.get(constants.NV_DRBDLIST, [])
777 if not isinstance(used_minors, (tuple, list)):
778 feedback_fn(" - ERROR: cannot parse drbd status file: %s" %
781 for minor, (iname, must_exist) in drbd_map.items():
782 if minor not in used_minors and must_exist:
783 feedback_fn(" - ERROR: drbd minor %d of instance %s is not active" %
786 for minor in used_minors:
787 if minor not in drbd_map:
788 feedback_fn(" - ERROR: unallocated drbd minor %d is in use" % minor)
793 def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
794 node_instance, feedback_fn, n_offline):
795 """Verify an instance.
797 This function checks to see if the required block devices are
798 available on the instance's node.
803 node_current = instanceconfig.primary_node
806 instanceconfig.MapLVsByNode(node_vol_should)
808 for node in node_vol_should:
809 if node in n_offline:
810 # ignore missing volumes on offline nodes
812 for volume in node_vol_should[node]:
813 if node not in node_vol_is or volume not in node_vol_is[node]:
814 feedback_fn(" - ERROR: volume %s missing on node %s" %
818 if instanceconfig.admin_up:
819 if ((node_current not in node_instance or
820 not instance in node_instance[node_current]) and
821 node_current not in n_offline):
822 feedback_fn(" - ERROR: instance %s not running on node %s" %
823 (instance, node_current))
826 for node in node_instance:
827 if (not node == node_current):
828 if instance in node_instance[node]:
829 feedback_fn(" - ERROR: instance %s should not run on node %s" %
835 def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
836 """Verify if there are any unknown volumes in the cluster.
838 The .os, .swap and backup volumes are ignored. All other volumes are
844 for node in node_vol_is:
845 for volume in node_vol_is[node]:
846 if node not in node_vol_should or volume not in node_vol_should[node]:
847 feedback_fn(" - ERROR: volume %s on node %s should not exist" %
852 def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
853 """Verify the list of running instances.
855 This checks what instances are running but unknown to the cluster.
859 for node in node_instance:
860 for runninginstance in node_instance[node]:
861 if runninginstance not in instancelist:
862 feedback_fn(" - ERROR: instance %s on node %s should not exist" %
863 (runninginstance, node))
867 def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
868 """Verify N+1 Memory Resilience.
870 Check that if one single node dies we can still start all the instances it
876 for node, nodeinfo in node_info.iteritems():
877 # This code checks that every node which is now listed as secondary has
878 # enough memory to host all instances it is supposed to should a single
879 # other node in the cluster fail.
880 # FIXME: not ready for failover to an arbitrary node
881 # FIXME: does not support file-backed instances
882 # WARNING: we currently take into account down instances as well as up
883 # ones, considering that even if they're down someone might want to start
884 # them even in the event of a node failure.
885 for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
887 for instance in instances:
888 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
889 if bep[constants.BE_AUTO_BALANCE]:
890 needed_mem += bep[constants.BE_MEMORY]
891 if nodeinfo['mfree'] < needed_mem:
892 feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
893 " failovers should node %s fail" % (node, prinode))
897 def CheckPrereq(self):
898 """Check prerequisites.
900 Transform the list of checks we're going to skip into a set and check that
901 all its members are valid.
904 self.skip_set = frozenset(self.op.skip_checks)
905 if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
906 raise errors.OpPrereqError("Invalid checks to be skipped specified")
908 def BuildHooksEnv(self):
911 Cluster-Verify hooks just rone in the post phase and their failure makes
912 the output be logged in the verify output and the verification to fail.
915 all_nodes = self.cfg.GetNodeList()
916 # TODO: populate the environment with useful information for verify hooks
918 return env, [], all_nodes
920 def Exec(self, feedback_fn):
921 """Verify integrity of cluster, performing various test on nodes.
925 feedback_fn("* Verifying global settings")
926 for msg in self.cfg.VerifyConfig():
927 feedback_fn(" - ERROR: %s" % msg)
929 vg_name = self.cfg.GetVGName()
930 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
931 nodelist = utils.NiceSort(self.cfg.GetNodeList())
932 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
933 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
934 instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
935 for iname in instancelist)
936 i_non_redundant = [] # Non redundant instances
937 i_non_a_balanced = [] # Non auto-balanced instances
938 n_offline = [] # List of offline nodes
939 n_drained = [] # List of nodes being drained
945 # FIXME: verify OS list
947 master_files = [constants.CLUSTER_CONF_FILE]
949 file_names = ssconf.SimpleStore().GetFileList()
950 file_names.append(constants.SSL_CERT_FILE)
951 file_names.append(constants.RAPI_CERT_FILE)
952 file_names.extend(master_files)
954 local_checksums = utils.FingerprintFiles(file_names)
956 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
957 node_verify_param = {
958 constants.NV_FILELIST: file_names,
959 constants.NV_NODELIST: [node.name for node in nodeinfo
960 if not node.offline],
961 constants.NV_HYPERVISOR: hypervisors,
962 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
963 node.secondary_ip) for node in nodeinfo
964 if not node.offline],
965 constants.NV_LVLIST: vg_name,
966 constants.NV_INSTANCELIST: hypervisors,
967 constants.NV_VGLIST: None,
968 constants.NV_VERSION: None,
969 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
970 constants.NV_DRBDLIST: None,
972 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
973 self.cfg.GetClusterName())
975 cluster = self.cfg.GetClusterInfo()
976 master_node = self.cfg.GetMasterNode()
977 all_drbd_map = self.cfg.ComputeDRBDMap()
979 for node_i in nodeinfo:
981 nresult = all_nvinfo[node].data
984 feedback_fn("* Skipping offline node %s" % (node,))
985 n_offline.append(node)
988 if node == master_node:
990 elif node_i.master_candidate:
991 ntype = "master candidate"
994 n_drained.append(node)
997 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
999 if all_nvinfo[node].failed or not isinstance(nresult, dict):
1000 feedback_fn(" - ERROR: connection to %s failed" % (node,))
1005 for minor, instance in all_drbd_map[node].items():
1006 instance = instanceinfo[instance]
1007 node_drbd[minor] = (instance.name, instance.admin_up)
1008 result = self._VerifyNode(node_i, file_names, local_checksums,
1009 nresult, feedback_fn, master_files,
1013 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1014 if isinstance(lvdata, basestring):
1015 feedback_fn(" - ERROR: LVM problem on node %s: %s" %
1016 (node, utils.SafeEncode(lvdata)))
1018 node_volume[node] = {}
1019 elif not isinstance(lvdata, dict):
1020 feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,))
1024 node_volume[node] = lvdata
1027 idata = nresult.get(constants.NV_INSTANCELIST, None)
1028 if not isinstance(idata, list):
1029 feedback_fn(" - ERROR: connection to %s failed (instancelist)" %
1034 node_instance[node] = idata
1037 nodeinfo = nresult.get(constants.NV_HVINFO, None)
1038 if not isinstance(nodeinfo, dict):
1039 feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,))
1045 "mfree": int(nodeinfo['memory_free']),
1046 "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1049 # dictionary holding all instances this node is secondary for,
1050 # grouped by their primary node. Each key is a cluster node, and each
1051 # value is a list of instances which have the key as primary and the
1052 # current node as secondary. this is handy to calculate N+1 memory
1053 # availability if you can only failover from a primary to its
1055 "sinst-by-pnode": {},
1058 feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
1062 node_vol_should = {}
1064 for instance in instancelist:
1065 feedback_fn("* Verifying instance %s" % instance)
1066 inst_config = instanceinfo[instance]
1067 result = self._VerifyInstance(instance, inst_config, node_volume,
1068 node_instance, feedback_fn, n_offline)
1070 inst_nodes_offline = []
1072 inst_config.MapLVsByNode(node_vol_should)
1074 instance_cfg[instance] = inst_config
1076 pnode = inst_config.primary_node
1077 if pnode in node_info:
1078 node_info[pnode]['pinst'].append(instance)
1079 elif pnode not in n_offline:
1080 feedback_fn(" - ERROR: instance %s, connection to primary node"
1081 " %s failed" % (instance, pnode))
1084 if pnode in n_offline:
1085 inst_nodes_offline.append(pnode)
1087 # If the instance is non-redundant we cannot survive losing its primary
1088 # node, so we are not N+1 compliant. On the other hand we have no disk
1089 # templates with more than one secondary so that situation is not well
1091 # FIXME: does not support file-backed instances
1092 if len(inst_config.secondary_nodes) == 0:
1093 i_non_redundant.append(instance)
1094 elif len(inst_config.secondary_nodes) > 1:
1095 feedback_fn(" - WARNING: multiple secondaries for instance %s"
1098 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1099 i_non_a_balanced.append(instance)
1101 for snode in inst_config.secondary_nodes:
1102 if snode in node_info:
1103 node_info[snode]['sinst'].append(instance)
1104 if pnode not in node_info[snode]['sinst-by-pnode']:
1105 node_info[snode]['sinst-by-pnode'][pnode] = []
1106 node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1107 elif snode not in n_offline:
1108 feedback_fn(" - ERROR: instance %s, connection to secondary node"
1109 " %s failed" % (instance, snode))
1111 if snode in n_offline:
1112 inst_nodes_offline.append(snode)
1114 if inst_nodes_offline:
1115 # warn that the instance lives on offline nodes, and set bad=True
1116 feedback_fn(" - ERROR: instance lives on offline node(s) %s" %
1117 ", ".join(inst_nodes_offline))
1120 feedback_fn("* Verifying orphan volumes")
1121 result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1125 feedback_fn("* Verifying remaining instances")
1126 result = self._VerifyOrphanInstances(instancelist, node_instance,
1130 if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1131 feedback_fn("* Verifying N+1 Memory redundancy")
1132 result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1135 feedback_fn("* Other Notes")
1137 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
1138 % len(i_non_redundant))
1140 if i_non_a_balanced:
1141 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
1142 % len(i_non_a_balanced))
1145 feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline))
1148 feedback_fn(" - NOTICE: %d drained node(s) found." % len(n_drained))
1152 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1153 """Analize the post-hooks' result
1155 This method analyses the hook result, handles it, and sends some
1156 nicely-formatted feedback back to the user.
1158 @param phase: one of L{constants.HOOKS_PHASE_POST} or
1159 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1160 @param hooks_results: the results of the multi-node hooks rpc call
1161 @param feedback_fn: function used send feedback back to the caller
1162 @param lu_result: previous Exec result
1163 @return: the new Exec result, based on the previous result
1167 # We only really run POST phase hooks, and are only interested in
1169 if phase == constants.HOOKS_PHASE_POST:
1170 # Used to change hooks' output to proper indentation
1171 indent_re = re.compile('^', re.M)
1172 feedback_fn("* Hooks Results")
1173 if not hooks_results:
1174 feedback_fn(" - ERROR: general communication failure")
1177 for node_name in hooks_results:
1178 show_node_header = True
1179 res = hooks_results[node_name]
1180 if res.failed or res.data is False or not isinstance(res.data, list):
1182 # no need to warn or set fail return value
1184 feedback_fn(" Communication failure in hooks execution")
1187 for script, hkr, output in res.data:
1188 if hkr == constants.HKR_FAIL:
1189 # The node header is only shown once, if there are
1190 # failing hooks on that node
1191 if show_node_header:
1192 feedback_fn(" Node %s:" % node_name)
1193 show_node_header = False
1194 feedback_fn(" ERROR: Script %s failed, output:" % script)
1195 output = indent_re.sub(' ', output)
1196 feedback_fn("%s" % output)
1202 class LUVerifyDisks(NoHooksLU):
1203 """Verifies the cluster disks status.
1209 def ExpandNames(self):
1210 self.needed_locks = {
1211 locking.LEVEL_NODE: locking.ALL_SET,
1212 locking.LEVEL_INSTANCE: locking.ALL_SET,
1214 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1216 def CheckPrereq(self):
1217 """Check prerequisites.
1219 This has no prerequisites.
1224 def Exec(self, feedback_fn):
1225 """Verify integrity of cluster disks.
1228 result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1230 vg_name = self.cfg.GetVGName()
1231 nodes = utils.NiceSort(self.cfg.GetNodeList())
1232 instances = [self.cfg.GetInstanceInfo(name)
1233 for name in self.cfg.GetInstanceList()]
1236 for inst in instances:
1238 if (not inst.admin_up or
1239 inst.disk_template not in constants.DTS_NET_MIRROR):
1241 inst.MapLVsByNode(inst_lvs)
1242 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1243 for node, vol_list in inst_lvs.iteritems():
1244 for vol in vol_list:
1245 nv_dict[(node, vol)] = inst
1250 node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1255 lvs = node_lvs[node]
1258 self.LogWarning("Connection to node %s failed: %s" %
1262 if isinstance(lvs, basestring):
1263 logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1264 res_nlvm[node] = lvs
1265 elif not isinstance(lvs, dict):
1266 logging.warning("Connection to node %s failed or invalid data"
1268 res_nodes.append(node)
1271 for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1272 inst = nv_dict.pop((node, lv_name), None)
1273 if (not lv_online and inst is not None
1274 and inst.name not in res_instances):
1275 res_instances.append(inst.name)
1277 # any leftover items in nv_dict are missing LVs, let's arrange the
1279 for key, inst in nv_dict.iteritems():
1280 if inst.name not in res_missing:
1281 res_missing[inst.name] = []
1282 res_missing[inst.name].append(key)
1287 class LURenameCluster(LogicalUnit):
1288 """Rename the cluster.
1291 HPATH = "cluster-rename"
1292 HTYPE = constants.HTYPE_CLUSTER
1295 def BuildHooksEnv(self):
1300 "OP_TARGET": self.cfg.GetClusterName(),
1301 "NEW_NAME": self.op.name,
1303 mn = self.cfg.GetMasterNode()
1304 return env, [mn], [mn]
1306 def CheckPrereq(self):
1307 """Verify that the passed name is a valid one.
1310 hostname = utils.HostInfo(self.op.name)
1312 new_name = hostname.name
1313 self.ip = new_ip = hostname.ip
1314 old_name = self.cfg.GetClusterName()
1315 old_ip = self.cfg.GetMasterIP()
1316 if new_name == old_name and new_ip == old_ip:
1317 raise errors.OpPrereqError("Neither the name nor the IP address of the"
1318 " cluster has changed")
1319 if new_ip != old_ip:
1320 if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1321 raise errors.OpPrereqError("The given cluster IP address (%s) is"
1322 " reachable on the network. Aborting." %
1325 self.op.name = new_name
1327 def Exec(self, feedback_fn):
1328 """Rename the cluster.
1331 clustername = self.op.name
1334 # shutdown the master IP
1335 master = self.cfg.GetMasterNode()
1336 result = self.rpc.call_node_stop_master(master, False)
1337 if result.failed or not result.data:
1338 raise errors.OpExecError("Could not disable the master role")
1341 cluster = self.cfg.GetClusterInfo()
1342 cluster.cluster_name = clustername
1343 cluster.master_ip = ip
1344 self.cfg.Update(cluster)
1346 # update the known hosts file
1347 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1348 node_list = self.cfg.GetNodeList()
1350 node_list.remove(master)
1353 result = self.rpc.call_upload_file(node_list,
1354 constants.SSH_KNOWN_HOSTS_FILE)
1355 for to_node, to_result in result.iteritems():
1356 if to_result.failed or not to_result.data:
1357 logging.error("Copy of file %s to node %s failed",
1358 constants.SSH_KNOWN_HOSTS_FILE, to_node)
1361 result = self.rpc.call_node_start_master(master, False)
1362 if result.failed or not result.data:
1363 self.LogWarning("Could not re-enable the master role on"
1364 " the master, please restart manually.")
1367 def _RecursiveCheckIfLVMBased(disk):
1368 """Check if the given disk or its children are lvm-based.
1370 @type disk: L{objects.Disk}
1371 @param disk: the disk to check
1373 @return: boolean indicating whether a LD_LV dev_type was found or not
1377 for chdisk in disk.children:
1378 if _RecursiveCheckIfLVMBased(chdisk):
1380 return disk.dev_type == constants.LD_LV
1383 class LUSetClusterParams(LogicalUnit):
1384 """Change the parameters of the cluster.
1387 HPATH = "cluster-modify"
1388 HTYPE = constants.HTYPE_CLUSTER
1392 def CheckParameters(self):
1396 if not hasattr(self.op, "candidate_pool_size"):
1397 self.op.candidate_pool_size = None
1398 if self.op.candidate_pool_size is not None:
1400 self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1401 except ValueError, err:
1402 raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1404 if self.op.candidate_pool_size < 1:
1405 raise errors.OpPrereqError("At least one master candidate needed")
1407 def ExpandNames(self):
1408 # FIXME: in the future maybe other cluster params won't require checking on
1409 # all nodes to be modified.
1410 self.needed_locks = {
1411 locking.LEVEL_NODE: locking.ALL_SET,
1413 self.share_locks[locking.LEVEL_NODE] = 1
1415 def BuildHooksEnv(self):
1420 "OP_TARGET": self.cfg.GetClusterName(),
1421 "NEW_VG_NAME": self.op.vg_name,
1423 mn = self.cfg.GetMasterNode()
1424 return env, [mn], [mn]
1426 def CheckPrereq(self):
1427 """Check prerequisites.
1429 This checks whether the given params don't conflict and
1430 if the given volume group is valid.
1433 if self.op.vg_name is not None and not self.op.vg_name:
1434 instances = self.cfg.GetAllInstancesInfo().values()
1435 for inst in instances:
1436 for disk in inst.disks:
1437 if _RecursiveCheckIfLVMBased(disk):
1438 raise errors.OpPrereqError("Cannot disable lvm storage while"
1439 " lvm-based instances exist")
1441 node_list = self.acquired_locks[locking.LEVEL_NODE]
1443 # if vg_name not None, checks given volume group on all nodes
1445 vglist = self.rpc.call_vg_list(node_list)
1446 for node in node_list:
1447 if vglist[node].failed:
1448 # ignoring down node
1449 self.LogWarning("Node %s unreachable/error, ignoring" % node)
1451 vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1453 constants.MIN_VG_SIZE)
1455 raise errors.OpPrereqError("Error on node '%s': %s" %
1458 self.cluster = cluster = self.cfg.GetClusterInfo()
1459 # validate beparams changes
1460 if self.op.beparams:
1461 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1462 self.new_beparams = cluster.FillDict(
1463 cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1465 # hypervisor list/parameters
1466 self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1467 if self.op.hvparams:
1468 if not isinstance(self.op.hvparams, dict):
1469 raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1470 for hv_name, hv_dict in self.op.hvparams.items():
1471 if hv_name not in self.new_hvparams:
1472 self.new_hvparams[hv_name] = hv_dict
1474 self.new_hvparams[hv_name].update(hv_dict)
1476 if self.op.enabled_hypervisors is not None:
1477 self.hv_list = self.op.enabled_hypervisors
1479 self.hv_list = cluster.enabled_hypervisors
1481 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1482 # either the enabled list has changed, or the parameters have, validate
1483 for hv_name, hv_params in self.new_hvparams.items():
1484 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1485 (self.op.enabled_hypervisors and
1486 hv_name in self.op.enabled_hypervisors)):
1487 # either this is a new hypervisor, or its parameters have changed
1488 hv_class = hypervisor.GetHypervisor(hv_name)
1489 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1490 hv_class.CheckParameterSyntax(hv_params)
1491 _CheckHVParams(self, node_list, hv_name, hv_params)
1493 def Exec(self, feedback_fn):
1494 """Change the parameters of the cluster.
1497 if self.op.vg_name is not None:
1498 if self.op.vg_name != self.cfg.GetVGName():
1499 self.cfg.SetVGName(self.op.vg_name)
1501 feedback_fn("Cluster LVM configuration already in desired"
1502 " state, not changing")
1503 if self.op.hvparams:
1504 self.cluster.hvparams = self.new_hvparams
1505 if self.op.enabled_hypervisors is not None:
1506 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1507 if self.op.beparams:
1508 self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1509 if self.op.candidate_pool_size is not None:
1510 self.cluster.candidate_pool_size = self.op.candidate_pool_size
1512 self.cfg.Update(self.cluster)
1514 # we want to update nodes after the cluster so that if any errors
1515 # happen, we have recorded and saved the cluster info
1516 if self.op.candidate_pool_size is not None:
1517 _AdjustCandidatePool(self)
1520 class LURedistributeConfig(NoHooksLU):
1521 """Force the redistribution of cluster configuration.
1523 This is a very simple LU.
1529 def ExpandNames(self):
1530 self.needed_locks = {
1531 locking.LEVEL_NODE: locking.ALL_SET,
1533 self.share_locks[locking.LEVEL_NODE] = 1
1535 def CheckPrereq(self):
1536 """Check prerequisites.
1540 def Exec(self, feedback_fn):
1541 """Redistribute the configuration.
1544 self.cfg.Update(self.cfg.GetClusterInfo())
1547 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1548 """Sleep and poll for an instance's disk to sync.
1551 if not instance.disks:
1555 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1557 node = instance.primary_node
1559 for dev in instance.disks:
1560 lu.cfg.SetDiskID(dev, node)
1566 cumul_degraded = False
1567 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1568 if rstats.failed or not rstats.data:
1569 lu.LogWarning("Can't get any data from node %s", node)
1572 raise errors.RemoteError("Can't contact node %s for mirror data,"
1573 " aborting." % node)
1576 rstats = rstats.data
1578 for i, mstat in enumerate(rstats):
1580 lu.LogWarning("Can't compute data for node %s/%s",
1581 node, instance.disks[i].iv_name)
1583 # we ignore the ldisk parameter
1584 perc_done, est_time, is_degraded, _ = mstat
1585 cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1586 if perc_done is not None:
1588 if est_time is not None:
1589 rem_time = "%d estimated seconds remaining" % est_time
1592 rem_time = "no time estimate"
1593 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1594 (instance.disks[i].iv_name, perc_done, rem_time))
1598 time.sleep(min(60, max_time))
1601 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1602 return not cumul_degraded
1605 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1606 """Check that mirrors are not degraded.
1608 The ldisk parameter, if True, will change the test from the
1609 is_degraded attribute (which represents overall non-ok status for
1610 the device(s)) to the ldisk (representing the local storage status).
1613 lu.cfg.SetDiskID(dev, node)
1620 if on_primary or dev.AssembleOnSecondary():
1621 rstats = lu.rpc.call_blockdev_find(node, dev)
1622 msg = rstats.RemoteFailMsg()
1624 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1626 elif not rstats.payload:
1627 lu.LogWarning("Can't find disk on node %s", node)
1630 result = result and (not rstats.payload[idx])
1632 for child in dev.children:
1633 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1638 class LUDiagnoseOS(NoHooksLU):
1639 """Logical unit for OS diagnose/query.
1642 _OP_REQP = ["output_fields", "names"]
1644 _FIELDS_STATIC = utils.FieldSet()
1645 _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1647 def ExpandNames(self):
1649 raise errors.OpPrereqError("Selective OS query not supported")
1651 _CheckOutputFields(static=self._FIELDS_STATIC,
1652 dynamic=self._FIELDS_DYNAMIC,
1653 selected=self.op.output_fields)
1655 # Lock all nodes, in shared mode
1656 self.needed_locks = {}
1657 self.share_locks[locking.LEVEL_NODE] = 1
1658 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1660 def CheckPrereq(self):
1661 """Check prerequisites.
1666 def _DiagnoseByOS(node_list, rlist):
1667 """Remaps a per-node return list into an a per-os per-node dictionary
1669 @param node_list: a list with the names of all nodes
1670 @param rlist: a map with node names as keys and OS objects as values
1673 @returns: a dictionary with osnames as keys and as value another map, with
1674 nodes as keys and list of OS objects as values, eg::
1676 {"debian-etch": {"node1": [<object>,...],
1677 "node2": [<object>,]}
1682 for node_name, nr in rlist.iteritems():
1683 if nr.failed or not nr.data:
1685 for os_obj in nr.data:
1686 if os_obj.name not in all_os:
1687 # build a list of nodes for this os containing empty lists
1688 # for each node in node_list
1689 all_os[os_obj.name] = {}
1690 for nname in node_list:
1691 all_os[os_obj.name][nname] = []
1692 all_os[os_obj.name][node_name].append(os_obj)
1695 def Exec(self, feedback_fn):
1696 """Compute the list of OSes.
1699 node_list = self.acquired_locks[locking.LEVEL_NODE]
1700 valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1701 if node in node_list]
1702 node_data = self.rpc.call_os_diagnose(valid_nodes)
1703 if node_data == False:
1704 raise errors.OpExecError("Can't gather the list of OSes")
1705 pol = self._DiagnoseByOS(valid_nodes, node_data)
1707 for os_name, os_data in pol.iteritems():
1709 for field in self.op.output_fields:
1712 elif field == "valid":
1713 val = utils.all([osl and osl[0] for osl in os_data.values()])
1714 elif field == "node_status":
1716 for node_name, nos_list in os_data.iteritems():
1717 val[node_name] = [(v.status, v.path) for v in nos_list]
1719 raise errors.ParameterError(field)
1726 class LURemoveNode(LogicalUnit):
1727 """Logical unit for removing a node.
1730 HPATH = "node-remove"
1731 HTYPE = constants.HTYPE_NODE
1732 _OP_REQP = ["node_name"]
1734 def BuildHooksEnv(self):
1737 This doesn't run on the target node in the pre phase as a failed
1738 node would then be impossible to remove.
1742 "OP_TARGET": self.op.node_name,
1743 "NODE_NAME": self.op.node_name,
1745 all_nodes = self.cfg.GetNodeList()
1746 all_nodes.remove(self.op.node_name)
1747 return env, all_nodes, all_nodes
1749 def CheckPrereq(self):
1750 """Check prerequisites.
1753 - the node exists in the configuration
1754 - it does not have primary or secondary instances
1755 - it's not the master
1757 Any errors are signalled by raising errors.OpPrereqError.
1760 node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1762 raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1764 instance_list = self.cfg.GetInstanceList()
1766 masternode = self.cfg.GetMasterNode()
1767 if node.name == masternode:
1768 raise errors.OpPrereqError("Node is the master node,"
1769 " you need to failover first.")
1771 for instance_name in instance_list:
1772 instance = self.cfg.GetInstanceInfo(instance_name)
1773 if node.name in instance.all_nodes:
1774 raise errors.OpPrereqError("Instance %s is still running on the node,"
1775 " please remove first." % instance_name)
1776 self.op.node_name = node.name
1779 def Exec(self, feedback_fn):
1780 """Removes the node from the cluster.
1784 logging.info("Stopping the node daemon and removing configs from node %s",
1787 self.context.RemoveNode(node.name)
1789 self.rpc.call_node_leave_cluster(node.name)
1791 # Promote nodes to master candidate as needed
1792 _AdjustCandidatePool(self)
1795 class LUQueryNodes(NoHooksLU):
1796 """Logical unit for querying nodes.
1799 _OP_REQP = ["output_fields", "names", "use_locking"]
1801 _FIELDS_DYNAMIC = utils.FieldSet(
1803 "mtotal", "mnode", "mfree",
1805 "ctotal", "cnodes", "csockets",
1808 _FIELDS_STATIC = utils.FieldSet(
1809 "name", "pinst_cnt", "sinst_cnt",
1810 "pinst_list", "sinst_list",
1811 "pip", "sip", "tags",
1819 def ExpandNames(self):
1820 _CheckOutputFields(static=self._FIELDS_STATIC,
1821 dynamic=self._FIELDS_DYNAMIC,
1822 selected=self.op.output_fields)
1824 self.needed_locks = {}
1825 self.share_locks[locking.LEVEL_NODE] = 1
1828 self.wanted = _GetWantedNodes(self, self.op.names)
1830 self.wanted = locking.ALL_SET
1832 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1833 self.do_locking = self.do_node_query and self.op.use_locking
1835 # if we don't request only static fields, we need to lock the nodes
1836 self.needed_locks[locking.LEVEL_NODE] = self.wanted
1839 def CheckPrereq(self):
1840 """Check prerequisites.
1843 # The validation of the node list is done in the _GetWantedNodes,
1844 # if non empty, and if empty, there's no validation to do
1847 def Exec(self, feedback_fn):
1848 """Computes the list of nodes and their attributes.
1851 all_info = self.cfg.GetAllNodesInfo()
1853 nodenames = self.acquired_locks[locking.LEVEL_NODE]
1854 elif self.wanted != locking.ALL_SET:
1855 nodenames = self.wanted
1856 missing = set(nodenames).difference(all_info.keys())
1858 raise errors.OpExecError(
1859 "Some nodes were removed before retrieving their data: %s" % missing)
1861 nodenames = all_info.keys()
1863 nodenames = utils.NiceSort(nodenames)
1864 nodelist = [all_info[name] for name in nodenames]
1866 # begin data gathering
1868 if self.do_node_query:
1870 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1871 self.cfg.GetHypervisorType())
1872 for name in nodenames:
1873 nodeinfo = node_data[name]
1874 if not nodeinfo.failed and nodeinfo.data:
1875 nodeinfo = nodeinfo.data
1876 fn = utils.TryConvert
1878 "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1879 "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1880 "mfree": fn(int, nodeinfo.get('memory_free', None)),
1881 "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1882 "dfree": fn(int, nodeinfo.get('vg_free', None)),
1883 "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1884 "bootid": nodeinfo.get('bootid', None),
1885 "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1886 "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1889 live_data[name] = {}
1891 live_data = dict.fromkeys(nodenames, {})
1893 node_to_primary = dict([(name, set()) for name in nodenames])
1894 node_to_secondary = dict([(name, set()) for name in nodenames])
1896 inst_fields = frozenset(("pinst_cnt", "pinst_list",
1897 "sinst_cnt", "sinst_list"))
1898 if inst_fields & frozenset(self.op.output_fields):
1899 instancelist = self.cfg.GetInstanceList()
1901 for instance_name in instancelist:
1902 inst = self.cfg.GetInstanceInfo(instance_name)
1903 if inst.primary_node in node_to_primary:
1904 node_to_primary[inst.primary_node].add(inst.name)
1905 for secnode in inst.secondary_nodes:
1906 if secnode in node_to_secondary:
1907 node_to_secondary[secnode].add(inst.name)
1909 master_node = self.cfg.GetMasterNode()
1911 # end data gathering
1914 for node in nodelist:
1916 for field in self.op.output_fields:
1919 elif field == "pinst_list":
1920 val = list(node_to_primary[node.name])
1921 elif field == "sinst_list":
1922 val = list(node_to_secondary[node.name])
1923 elif field == "pinst_cnt":
1924 val = len(node_to_primary[node.name])
1925 elif field == "sinst_cnt":
1926 val = len(node_to_secondary[node.name])
1927 elif field == "pip":
1928 val = node.primary_ip
1929 elif field == "sip":
1930 val = node.secondary_ip
1931 elif field == "tags":
1932 val = list(node.GetTags())
1933 elif field == "serial_no":
1934 val = node.serial_no
1935 elif field == "master_candidate":
1936 val = node.master_candidate
1937 elif field == "master":
1938 val = node.name == master_node
1939 elif field == "offline":
1941 elif field == "drained":
1943 elif self._FIELDS_DYNAMIC.Matches(field):
1944 val = live_data[node.name].get(field, None)
1946 raise errors.ParameterError(field)
1947 node_output.append(val)
1948 output.append(node_output)
1953 class LUQueryNodeVolumes(NoHooksLU):
1954 """Logical unit for getting volumes on node(s).
1957 _OP_REQP = ["nodes", "output_fields"]
1959 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1960 _FIELDS_STATIC = utils.FieldSet("node")
1962 def ExpandNames(self):
1963 _CheckOutputFields(static=self._FIELDS_STATIC,
1964 dynamic=self._FIELDS_DYNAMIC,
1965 selected=self.op.output_fields)
1967 self.needed_locks = {}
1968 self.share_locks[locking.LEVEL_NODE] = 1
1969 if not self.op.nodes:
1970 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1972 self.needed_locks[locking.LEVEL_NODE] = \
1973 _GetWantedNodes(self, self.op.nodes)
1975 def CheckPrereq(self):
1976 """Check prerequisites.
1978 This checks that the fields required are valid output fields.
1981 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1983 def Exec(self, feedback_fn):
1984 """Computes the list of nodes and their attributes.
1987 nodenames = self.nodes
1988 volumes = self.rpc.call_node_volumes(nodenames)
1990 ilist = [self.cfg.GetInstanceInfo(iname) for iname
1991 in self.cfg.GetInstanceList()]
1993 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1996 for node in nodenames:
1997 if node not in volumes or volumes[node].failed or not volumes[node].data:
2000 node_vols = volumes[node].data[:]
2001 node_vols.sort(key=lambda vol: vol['dev'])
2003 for vol in node_vols:
2005 for field in self.op.output_fields:
2008 elif field == "phys":
2012 elif field == "name":
2014 elif field == "size":
2015 val = int(float(vol['size']))
2016 elif field == "instance":
2018 if node not in lv_by_node[inst]:
2020 if vol['name'] in lv_by_node[inst][node]:
2026 raise errors.ParameterError(field)
2027 node_output.append(str(val))
2029 output.append(node_output)
2034 class LUAddNode(LogicalUnit):
2035 """Logical unit for adding node to the cluster.
2039 HTYPE = constants.HTYPE_NODE
2040 _OP_REQP = ["node_name"]
2042 def BuildHooksEnv(self):
2045 This will run on all nodes before, and on all nodes + the new node after.
2049 "OP_TARGET": self.op.node_name,
2050 "NODE_NAME": self.op.node_name,
2051 "NODE_PIP": self.op.primary_ip,
2052 "NODE_SIP": self.op.secondary_ip,
2054 nodes_0 = self.cfg.GetNodeList()
2055 nodes_1 = nodes_0 + [self.op.node_name, ]
2056 return env, nodes_0, nodes_1
2058 def CheckPrereq(self):
2059 """Check prerequisites.
2062 - the new node is not already in the config
2064 - its parameters (single/dual homed) matches the cluster
2066 Any errors are signalled by raising errors.OpPrereqError.
2069 node_name = self.op.node_name
2072 dns_data = utils.HostInfo(node_name)
2074 node = dns_data.name
2075 primary_ip = self.op.primary_ip = dns_data.ip
2076 secondary_ip = getattr(self.op, "secondary_ip", None)
2077 if secondary_ip is None:
2078 secondary_ip = primary_ip
2079 if not utils.IsValidIP(secondary_ip):
2080 raise errors.OpPrereqError("Invalid secondary IP given")
2081 self.op.secondary_ip = secondary_ip
2083 node_list = cfg.GetNodeList()
2084 if not self.op.readd and node in node_list:
2085 raise errors.OpPrereqError("Node %s is already in the configuration" %
2087 elif self.op.readd and node not in node_list:
2088 raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2090 for existing_node_name in node_list:
2091 existing_node = cfg.GetNodeInfo(existing_node_name)
2093 if self.op.readd and node == existing_node_name:
2094 if (existing_node.primary_ip != primary_ip or
2095 existing_node.secondary_ip != secondary_ip):
2096 raise errors.OpPrereqError("Readded node doesn't have the same IP"
2097 " address configuration as before")
2100 if (existing_node.primary_ip == primary_ip or
2101 existing_node.secondary_ip == primary_ip or
2102 existing_node.primary_ip == secondary_ip or
2103 existing_node.secondary_ip == secondary_ip):
2104 raise errors.OpPrereqError("New node ip address(es) conflict with"
2105 " existing node %s" % existing_node.name)
2107 # check that the type of the node (single versus dual homed) is the
2108 # same as for the master
2109 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2110 master_singlehomed = myself.secondary_ip == myself.primary_ip
2111 newbie_singlehomed = secondary_ip == primary_ip
2112 if master_singlehomed != newbie_singlehomed:
2113 if master_singlehomed:
2114 raise errors.OpPrereqError("The master has no private ip but the"
2115 " new node has one")
2117 raise errors.OpPrereqError("The master has a private ip but the"
2118 " new node doesn't have one")
2120 # checks reachablity
2121 if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2122 raise errors.OpPrereqError("Node not reachable by ping")
2124 if not newbie_singlehomed:
2125 # check reachability from my secondary ip to newbie's secondary ip
2126 if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2127 source=myself.secondary_ip):
2128 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2129 " based ping to noded port")
2131 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2132 mc_now, _ = self.cfg.GetMasterCandidateStats()
2133 master_candidate = mc_now < cp_size
2135 self.new_node = objects.Node(name=node,
2136 primary_ip=primary_ip,
2137 secondary_ip=secondary_ip,
2138 master_candidate=master_candidate,
2139 offline=False, drained=False)
2141 def Exec(self, feedback_fn):
2142 """Adds the new node to the cluster.
2145 new_node = self.new_node
2146 node = new_node.name
2148 # check connectivity
2149 result = self.rpc.call_version([node])[node]
2152 if constants.PROTOCOL_VERSION == result.data:
2153 logging.info("Communication to node %s fine, sw version %s match",
2156 raise errors.OpExecError("Version mismatch master version %s,"
2157 " node version %s" %
2158 (constants.PROTOCOL_VERSION, result.data))
2160 raise errors.OpExecError("Cannot get version from the new node")
2163 logging.info("Copy ssh key to node %s", node)
2164 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2166 keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2167 constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2173 keyarray.append(f.read())
2177 result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2179 keyarray[3], keyarray[4], keyarray[5])
2181 msg = result.RemoteFailMsg()
2183 raise errors.OpExecError("Cannot transfer ssh keys to the"
2184 " new node: %s" % msg)
2186 # Add node to our /etc/hosts, and add key to known_hosts
2187 utils.AddHostToEtcHosts(new_node.name)
2189 if new_node.secondary_ip != new_node.primary_ip:
2190 result = self.rpc.call_node_has_ip_address(new_node.name,
2191 new_node.secondary_ip)
2192 if result.failed or not result.data:
2193 raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2194 " you gave (%s). Please fix and re-run this"
2195 " command." % new_node.secondary_ip)
2197 node_verify_list = [self.cfg.GetMasterNode()]
2198 node_verify_param = {
2200 # TODO: do a node-net-test as well?
2203 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2204 self.cfg.GetClusterName())
2205 for verifier in node_verify_list:
2206 if result[verifier].failed or not result[verifier].data:
2207 raise errors.OpExecError("Cannot communicate with %s's node daemon"
2208 " for remote verification" % verifier)
2209 if result[verifier].data['nodelist']:
2210 for failed in result[verifier].data['nodelist']:
2211 feedback_fn("ssh/hostname verification failed %s -> %s" %
2212 (verifier, result[verifier].data['nodelist'][failed]))
2213 raise errors.OpExecError("ssh/hostname verification failed.")
2215 # Distribute updated /etc/hosts and known_hosts to all nodes,
2216 # including the node just added
2217 myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2218 dist_nodes = self.cfg.GetNodeList()
2219 if not self.op.readd:
2220 dist_nodes.append(node)
2221 if myself.name in dist_nodes:
2222 dist_nodes.remove(myself.name)
2224 logging.debug("Copying hosts and known_hosts to all nodes")
2225 for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2226 result = self.rpc.call_upload_file(dist_nodes, fname)
2227 for to_node, to_result in result.iteritems():
2228 if to_result.failed or not to_result.data:
2229 logging.error("Copy of file %s to node %s failed", fname, to_node)
2232 enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2233 if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2234 to_copy.append(constants.VNC_PASSWORD_FILE)
2236 for fname in to_copy:
2237 result = self.rpc.call_upload_file([node], fname)
2238 if result[node].failed or not result[node]:
2239 logging.error("Could not copy file %s to node %s", fname, node)
2242 self.context.ReaddNode(new_node)
2244 self.context.AddNode(new_node)
2247 class LUSetNodeParams(LogicalUnit):
2248 """Modifies the parameters of a node.
2251 HPATH = "node-modify"
2252 HTYPE = constants.HTYPE_NODE
2253 _OP_REQP = ["node_name"]
2256 def CheckArguments(self):
2257 node_name = self.cfg.ExpandNodeName(self.op.node_name)
2258 if node_name is None:
2259 raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2260 self.op.node_name = node_name
2261 _CheckBooleanOpField(self.op, 'master_candidate')
2262 _CheckBooleanOpField(self.op, 'offline')
2263 _CheckBooleanOpField(self.op, 'drained')
2264 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2265 if all_mods.count(None) == 3:
2266 raise errors.OpPrereqError("Please pass at least one modification")
2267 if all_mods.count(True) > 1:
2268 raise errors.OpPrereqError("Can't set the node into more than one"
2269 " state at the same time")
2271 def ExpandNames(self):
2272 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2274 def BuildHooksEnv(self):
2277 This runs on the master node.
2281 "OP_TARGET": self.op.node_name,
2282 "MASTER_CANDIDATE": str(self.op.master_candidate),
2283 "OFFLINE": str(self.op.offline),
2284 "DRAINED": str(self.op.drained),
2286 nl = [self.cfg.GetMasterNode(),
2290 def CheckPrereq(self):
2291 """Check prerequisites.
2293 This only checks the instance list against the existing names.
2296 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2298 if ((self.op.master_candidate == False or self.op.offline == True or
2299 self.op.drained == True) and node.master_candidate):
2300 # we will demote the node from master_candidate
2301 if self.op.node_name == self.cfg.GetMasterNode():
2302 raise errors.OpPrereqError("The master node has to be a"
2303 " master candidate, online and not drained")
2304 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2305 num_candidates, _ = self.cfg.GetMasterCandidateStats()
2306 if num_candidates <= cp_size:
2307 msg = ("Not enough master candidates (desired"
2308 " %d, new value will be %d)" % (cp_size, num_candidates-1))
2310 self.LogWarning(msg)
2312 raise errors.OpPrereqError(msg)
2314 if (self.op.master_candidate == True and
2315 ((node.offline and not self.op.offline == False) or
2316 (node.drained and not self.op.drained == False))):
2317 raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2318 " to master_candidate")
2322 def Exec(self, feedback_fn):
2331 if self.op.offline is not None:
2332 node.offline = self.op.offline
2333 result.append(("offline", str(self.op.offline)))
2334 if self.op.offline == True:
2335 if node.master_candidate:
2336 node.master_candidate = False
2338 result.append(("master_candidate", "auto-demotion due to offline"))
2340 node.drained = False
2341 result.append(("drained", "clear drained status due to offline"))
2343 if self.op.master_candidate is not None:
2344 node.master_candidate = self.op.master_candidate
2346 result.append(("master_candidate", str(self.op.master_candidate)))
2347 if self.op.master_candidate == False:
2348 rrc = self.rpc.call_node_demote_from_mc(node.name)
2349 msg = rrc.RemoteFailMsg()
2351 self.LogWarning("Node failed to demote itself: %s" % msg)
2353 if self.op.drained is not None:
2354 node.drained = self.op.drained
2355 result.append(("drained", str(self.op.drained)))
2356 if self.op.drained == True:
2357 if node.master_candidate:
2358 node.master_candidate = False
2360 result.append(("master_candidate", "auto-demotion due to drain"))
2362 node.offline = False
2363 result.append(("offline", "clear offline status due to drain"))
2365 # this will trigger configuration file update, if needed
2366 self.cfg.Update(node)
2367 # this will trigger job queue propagation or cleanup
2369 self.context.ReaddNode(node)
2374 class LUQueryClusterInfo(NoHooksLU):
2375 """Query cluster configuration.
2381 def ExpandNames(self):
2382 self.needed_locks = {}
2384 def CheckPrereq(self):
2385 """No prerequsites needed for this LU.
2390 def Exec(self, feedback_fn):
2391 """Return cluster config.
2394 cluster = self.cfg.GetClusterInfo()
2396 "software_version": constants.RELEASE_VERSION,
2397 "protocol_version": constants.PROTOCOL_VERSION,
2398 "config_version": constants.CONFIG_VERSION,
2399 "os_api_version": constants.OS_API_VERSION,
2400 "export_version": constants.EXPORT_VERSION,
2401 "architecture": (platform.architecture()[0], platform.machine()),
2402 "name": cluster.cluster_name,
2403 "master": cluster.master_node,
2404 "default_hypervisor": cluster.default_hypervisor,
2405 "enabled_hypervisors": cluster.enabled_hypervisors,
2406 "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2407 for hypervisor in cluster.enabled_hypervisors]),
2408 "beparams": cluster.beparams,
2409 "candidate_pool_size": cluster.candidate_pool_size,
2415 class LUQueryConfigValues(NoHooksLU):
2416 """Return configuration values.
2421 _FIELDS_DYNAMIC = utils.FieldSet()
2422 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2424 def ExpandNames(self):
2425 self.needed_locks = {}
2427 _CheckOutputFields(static=self._FIELDS_STATIC,
2428 dynamic=self._FIELDS_DYNAMIC,
2429 selected=self.op.output_fields)
2431 def CheckPrereq(self):
2432 """No prerequisites.
2437 def Exec(self, feedback_fn):
2438 """Dump a representation of the cluster config to the standard output.
2442 for field in self.op.output_fields:
2443 if field == "cluster_name":
2444 entry = self.cfg.GetClusterName()
2445 elif field == "master_node":
2446 entry = self.cfg.GetMasterNode()
2447 elif field == "drain_flag":
2448 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2450 raise errors.ParameterError(field)
2451 values.append(entry)
2455 class LUActivateInstanceDisks(NoHooksLU):
2456 """Bring up an instance's disks.
2459 _OP_REQP = ["instance_name"]
2462 def ExpandNames(self):
2463 self._ExpandAndLockInstance()
2464 self.needed_locks[locking.LEVEL_NODE] = []
2465 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2467 def DeclareLocks(self, level):
2468 if level == locking.LEVEL_NODE:
2469 self._LockInstancesNodes()
2471 def CheckPrereq(self):
2472 """Check prerequisites.
2474 This checks that the instance is in the cluster.
2477 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2478 assert self.instance is not None, \
2479 "Cannot retrieve locked instance %s" % self.op.instance_name
2480 _CheckNodeOnline(self, self.instance.primary_node)
2482 def Exec(self, feedback_fn):
2483 """Activate the disks.
2486 disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2488 raise errors.OpExecError("Cannot activate block devices")
2493 def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2494 """Prepare the block devices for an instance.
2496 This sets up the block devices on all nodes.
2498 @type lu: L{LogicalUnit}
2499 @param lu: the logical unit on whose behalf we execute
2500 @type instance: L{objects.Instance}
2501 @param instance: the instance for whose disks we assemble
2502 @type ignore_secondaries: boolean
2503 @param ignore_secondaries: if true, errors on secondary nodes
2504 won't result in an error return from the function
2505 @return: False if the operation failed, otherwise a list of
2506 (host, instance_visible_name, node_visible_name)
2507 with the mapping from node devices to instance devices
2512 iname = instance.name
2513 # With the two passes mechanism we try to reduce the window of
2514 # opportunity for the race condition of switching DRBD to primary
2515 # before handshaking occured, but we do not eliminate it
2517 # The proper fix would be to wait (with some limits) until the
2518 # connection has been made and drbd transitions from WFConnection
2519 # into any other network-connected state (Connected, SyncTarget,
2522 # 1st pass, assemble on all nodes in secondary mode
2523 for inst_disk in instance.disks:
2524 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2525 lu.cfg.SetDiskID(node_disk, node)
2526 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2527 msg = result.RemoteFailMsg()
2529 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2530 " (is_primary=False, pass=1): %s",
2531 inst_disk.iv_name, node, msg)
2532 if not ignore_secondaries:
2535 # FIXME: race condition on drbd migration to primary
2537 # 2nd pass, do only the primary node
2538 for inst_disk in instance.disks:
2539 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2540 if node != instance.primary_node:
2542 lu.cfg.SetDiskID(node_disk, node)
2543 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2544 msg = result.RemoteFailMsg()
2546 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2547 " (is_primary=True, pass=2): %s",
2548 inst_disk.iv_name, node, msg)
2550 device_info.append((instance.primary_node, inst_disk.iv_name,
2553 # leave the disks configured for the primary node
2554 # this is a workaround that would be fixed better by
2555 # improving the logical/physical id handling
2556 for disk in instance.disks:
2557 lu.cfg.SetDiskID(disk, instance.primary_node)
2559 return disks_ok, device_info
2562 def _StartInstanceDisks(lu, instance, force):
2563 """Start the disks of an instance.
2566 disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2567 ignore_secondaries=force)
2569 _ShutdownInstanceDisks(lu, instance)
2570 if force is not None and not force:
2571 lu.proc.LogWarning("", hint="If the message above refers to a"
2573 " you can retry the operation using '--force'.")
2574 raise errors.OpExecError("Disk consistency error")
2577 class LUDeactivateInstanceDisks(NoHooksLU):
2578 """Shutdown an instance's disks.
2581 _OP_REQP = ["instance_name"]
2584 def ExpandNames(self):
2585 self._ExpandAndLockInstance()
2586 self.needed_locks[locking.LEVEL_NODE] = []
2587 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2589 def DeclareLocks(self, level):
2590 if level == locking.LEVEL_NODE:
2591 self._LockInstancesNodes()
2593 def CheckPrereq(self):
2594 """Check prerequisites.
2596 This checks that the instance is in the cluster.
2599 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2600 assert self.instance is not None, \
2601 "Cannot retrieve locked instance %s" % self.op.instance_name
2603 def Exec(self, feedback_fn):
2604 """Deactivate the disks
2607 instance = self.instance
2608 _SafeShutdownInstanceDisks(self, instance)
2611 def _SafeShutdownInstanceDisks(lu, instance):
2612 """Shutdown block devices of an instance.
2614 This function checks if an instance is running, before calling
2615 _ShutdownInstanceDisks.
2618 ins_l = lu.rpc.call_instance_list([instance.primary_node],
2619 [instance.hypervisor])
2620 ins_l = ins_l[instance.primary_node]
2621 if ins_l.failed or not isinstance(ins_l.data, list):
2622 raise errors.OpExecError("Can't contact node '%s'" %
2623 instance.primary_node)
2625 if instance.name in ins_l.data:
2626 raise errors.OpExecError("Instance is running, can't shutdown"
2629 _ShutdownInstanceDisks(lu, instance)
2632 def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2633 """Shutdown block devices of an instance.
2635 This does the shutdown on all nodes of the instance.
2637 If the ignore_primary is false, errors on the primary node are
2642 for disk in instance.disks:
2643 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2644 lu.cfg.SetDiskID(top_disk, node)
2645 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2646 msg = result.RemoteFailMsg()
2648 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2649 disk.iv_name, node, msg)
2650 if not ignore_primary or node != instance.primary_node:
2655 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2656 """Checks if a node has enough free memory.
2658 This function check if a given node has the needed amount of free
2659 memory. In case the node has less memory or we cannot get the
2660 information from the node, this function raise an OpPrereqError
2663 @type lu: C{LogicalUnit}
2664 @param lu: a logical unit from which we get configuration data
2666 @param node: the node to check
2667 @type reason: C{str}
2668 @param reason: string to use in the error message
2669 @type requested: C{int}
2670 @param requested: the amount of memory in MiB to check for
2671 @type hypervisor_name: C{str}
2672 @param hypervisor_name: the hypervisor to ask for memory stats
2673 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2674 we cannot check the node
2677 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2678 nodeinfo[node].Raise()
2679 free_mem = nodeinfo[node].data.get('memory_free')
2680 if not isinstance(free_mem, int):
2681 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2682 " was '%s'" % (node, free_mem))
2683 if requested > free_mem:
2684 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2685 " needed %s MiB, available %s MiB" %
2686 (node, reason, requested, free_mem))
2689 class LUStartupInstance(LogicalUnit):
2690 """Starts an instance.
2693 HPATH = "instance-start"
2694 HTYPE = constants.HTYPE_INSTANCE
2695 _OP_REQP = ["instance_name", "force"]
2698 def ExpandNames(self):
2699 self._ExpandAndLockInstance()
2701 def BuildHooksEnv(self):
2704 This runs on master, primary and secondary nodes of the instance.
2708 "FORCE": self.op.force,
2710 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2711 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2714 def CheckPrereq(self):
2715 """Check prerequisites.
2717 This checks that the instance is in the cluster.
2720 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2721 assert self.instance is not None, \
2722 "Cannot retrieve locked instance %s" % self.op.instance_name
2724 _CheckNodeOnline(self, instance.primary_node)
2726 bep = self.cfg.GetClusterInfo().FillBE(instance)
2727 # check bridges existance
2728 _CheckInstanceBridgesExist(self, instance)
2730 _CheckNodeFreeMemory(self, instance.primary_node,
2731 "starting instance %s" % instance.name,
2732 bep[constants.BE_MEMORY], instance.hypervisor)
2734 def Exec(self, feedback_fn):
2735 """Start the instance.
2738 instance = self.instance
2739 force = self.op.force
2741 self.cfg.MarkInstanceUp(instance.name)
2743 node_current = instance.primary_node
2745 _StartInstanceDisks(self, instance, force)
2747 result = self.rpc.call_instance_start(node_current, instance)
2748 msg = result.RemoteFailMsg()
2750 _ShutdownInstanceDisks(self, instance)
2751 raise errors.OpExecError("Could not start instance: %s" % msg)
2754 class LURebootInstance(LogicalUnit):
2755 """Reboot an instance.
2758 HPATH = "instance-reboot"
2759 HTYPE = constants.HTYPE_INSTANCE
2760 _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2763 def ExpandNames(self):
2764 if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2765 constants.INSTANCE_REBOOT_HARD,
2766 constants.INSTANCE_REBOOT_FULL]:
2767 raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2768 (constants.INSTANCE_REBOOT_SOFT,
2769 constants.INSTANCE_REBOOT_HARD,
2770 constants.INSTANCE_REBOOT_FULL))
2771 self._ExpandAndLockInstance()
2773 def BuildHooksEnv(self):
2776 This runs on master, primary and secondary nodes of the instance.
2780 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2781 "REBOOT_TYPE": self.op.reboot_type,
2783 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2784 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2787 def CheckPrereq(self):
2788 """Check prerequisites.
2790 This checks that the instance is in the cluster.
2793 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2794 assert self.instance is not None, \
2795 "Cannot retrieve locked instance %s" % self.op.instance_name
2797 _CheckNodeOnline(self, instance.primary_node)
2799 # check bridges existance
2800 _CheckInstanceBridgesExist(self, instance)
2802 def Exec(self, feedback_fn):
2803 """Reboot the instance.
2806 instance = self.instance
2807 ignore_secondaries = self.op.ignore_secondaries
2808 reboot_type = self.op.reboot_type
2810 node_current = instance.primary_node
2812 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2813 constants.INSTANCE_REBOOT_HARD]:
2814 for disk in instance.disks:
2815 self.cfg.SetDiskID(disk, node_current)
2816 result = self.rpc.call_instance_reboot(node_current, instance,
2818 msg = result.RemoteFailMsg()
2820 raise errors.OpExecError("Could not reboot instance: %s" % msg)
2822 result = self.rpc.call_instance_shutdown(node_current, instance)
2823 msg = result.RemoteFailMsg()
2825 raise errors.OpExecError("Could not shutdown instance for"
2826 " full reboot: %s" % msg)
2827 _ShutdownInstanceDisks(self, instance)
2828 _StartInstanceDisks(self, instance, ignore_secondaries)
2829 result = self.rpc.call_instance_start(node_current, instance)
2830 msg = result.RemoteFailMsg()
2832 _ShutdownInstanceDisks(self, instance)
2833 raise errors.OpExecError("Could not start instance for"
2834 " full reboot: %s" % msg)
2836 self.cfg.MarkInstanceUp(instance.name)
2839 class LUShutdownInstance(LogicalUnit):
2840 """Shutdown an instance.
2843 HPATH = "instance-stop"
2844 HTYPE = constants.HTYPE_INSTANCE
2845 _OP_REQP = ["instance_name"]
2848 def ExpandNames(self):
2849 self._ExpandAndLockInstance()
2851 def BuildHooksEnv(self):
2854 This runs on master, primary and secondary nodes of the instance.
2857 env = _BuildInstanceHookEnvByObject(self, self.instance)
2858 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2861 def CheckPrereq(self):
2862 """Check prerequisites.
2864 This checks that the instance is in the cluster.
2867 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2868 assert self.instance is not None, \
2869 "Cannot retrieve locked instance %s" % self.op.instance_name
2870 _CheckNodeOnline(self, self.instance.primary_node)
2872 def Exec(self, feedback_fn):
2873 """Shutdown the instance.
2876 instance = self.instance
2877 node_current = instance.primary_node
2878 self.cfg.MarkInstanceDown(instance.name)
2879 result = self.rpc.call_instance_shutdown(node_current, instance)
2880 msg = result.RemoteFailMsg()
2882 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
2884 _ShutdownInstanceDisks(self, instance)
2887 class LUReinstallInstance(LogicalUnit):
2888 """Reinstall an instance.
2891 HPATH = "instance-reinstall"
2892 HTYPE = constants.HTYPE_INSTANCE
2893 _OP_REQP = ["instance_name"]
2896 def ExpandNames(self):
2897 self._ExpandAndLockInstance()
2899 def BuildHooksEnv(self):
2902 This runs on master, primary and secondary nodes of the instance.
2905 env = _BuildInstanceHookEnvByObject(self, self.instance)
2906 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2909 def CheckPrereq(self):
2910 """Check prerequisites.
2912 This checks that the instance is in the cluster and is not running.
2915 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2916 assert instance is not None, \
2917 "Cannot retrieve locked instance %s" % self.op.instance_name
2918 _CheckNodeOnline(self, instance.primary_node)
2920 if instance.disk_template == constants.DT_DISKLESS:
2921 raise errors.OpPrereqError("Instance '%s' has no disks" %
2922 self.op.instance_name)
2923 if instance.admin_up:
2924 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2925 self.op.instance_name)
2926 remote_info = self.rpc.call_instance_info(instance.primary_node,
2928 instance.hypervisor)
2929 if remote_info.failed or remote_info.data:
2930 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2931 (self.op.instance_name,
2932 instance.primary_node))
2934 self.op.os_type = getattr(self.op, "os_type", None)
2935 if self.op.os_type is not None:
2937 pnode = self.cfg.GetNodeInfo(
2938 self.cfg.ExpandNodeName(instance.primary_node))
2940 raise errors.OpPrereqError("Primary node '%s' is unknown" %
2942 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2944 if not isinstance(result.data, objects.OS):
2945 raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2946 " primary node" % self.op.os_type)
2948 self.instance = instance
2950 def Exec(self, feedback_fn):
2951 """Reinstall the instance.
2954 inst = self.instance
2956 if self.op.os_type is not None:
2957 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2958 inst.os = self.op.os_type
2959 self.cfg.Update(inst)
2961 _StartInstanceDisks(self, inst, None)
2963 feedback_fn("Running the instance OS create scripts...")
2964 result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2965 msg = result.RemoteFailMsg()
2967 raise errors.OpExecError("Could not install OS for instance %s"
2969 (inst.name, inst.primary_node, msg))
2971 _ShutdownInstanceDisks(self, inst)
2974 class LURenameInstance(LogicalUnit):
2975 """Rename an instance.
2978 HPATH = "instance-rename"
2979 HTYPE = constants.HTYPE_INSTANCE
2980 _OP_REQP = ["instance_name", "new_name"]
2982 def BuildHooksEnv(self):
2985 This runs on master, primary and secondary nodes of the instance.
2988 env = _BuildInstanceHookEnvByObject(self, self.instance)
2989 env["INSTANCE_NEW_NAME"] = self.op.new_name
2990 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2993 def CheckPrereq(self):
2994 """Check prerequisites.
2996 This checks that the instance is in the cluster and is not running.
2999 instance = self.cfg.GetInstanceInfo(
3000 self.cfg.ExpandInstanceName(self.op.instance_name))
3001 if instance is None:
3002 raise errors.OpPrereqError("Instance '%s' not known" %
3003 self.op.instance_name)
3004 _CheckNodeOnline(self, instance.primary_node)
3006 if instance.admin_up:
3007 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3008 self.op.instance_name)
3009 remote_info = self.rpc.call_instance_info(instance.primary_node,
3011 instance.hypervisor)
3013 if remote_info.data:
3014 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3015 (self.op.instance_name,
3016 instance.primary_node))
3017 self.instance = instance
3019 # new name verification
3020 name_info = utils.HostInfo(self.op.new_name)
3022 self.op.new_name = new_name = name_info.name
3023 instance_list = self.cfg.GetInstanceList()
3024 if new_name in instance_list:
3025 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3028 if not getattr(self.op, "ignore_ip", False):
3029 if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3030 raise errors.OpPrereqError("IP %s of instance %s already in use" %
3031 (name_info.ip, new_name))
3034 def Exec(self, feedback_fn):
3035 """Reinstall the instance.
3038 inst = self.instance
3039 old_name = inst.name
3041 if inst.disk_template == constants.DT_FILE:
3042 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3044 self.cfg.RenameInstance(inst.name, self.op.new_name)
3045 # Change the instance lock. This is definitely safe while we hold the BGL
3046 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3047 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3049 # re-read the instance from the configuration after rename
3050 inst = self.cfg.GetInstanceInfo(self.op.new_name)
3052 if inst.disk_template == constants.DT_FILE:
3053 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3054 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3055 old_file_storage_dir,
3056 new_file_storage_dir)
3059 raise errors.OpExecError("Could not connect to node '%s' to rename"
3060 " directory '%s' to '%s' (but the instance"
3061 " has been renamed in Ganeti)" % (
3062 inst.primary_node, old_file_storage_dir,
3063 new_file_storage_dir))
3065 if not result.data[0]:
3066 raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3067 " (but the instance has been renamed in"
3068 " Ganeti)" % (old_file_storage_dir,
3069 new_file_storage_dir))
3071 _StartInstanceDisks(self, inst, None)
3073 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3075 msg = result.RemoteFailMsg()
3077 msg = ("Could not run OS rename script for instance %s on node %s"
3078 " (but the instance has been renamed in Ganeti): %s" %
3079 (inst.name, inst.primary_node, msg))
3080 self.proc.LogWarning(msg)
3082 _ShutdownInstanceDisks(self, inst)
3085 class LURemoveInstance(LogicalUnit):
3086 """Remove an instance.
3089 HPATH = "instance-remove"
3090 HTYPE = constants.HTYPE_INSTANCE
3091 _OP_REQP = ["instance_name", "ignore_failures"]
3094 def ExpandNames(self):
3095 self._ExpandAndLockInstance()
3096 self.needed_locks[locking.LEVEL_NODE] = []
3097 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3099 def DeclareLocks(self, level):
3100 if level == locking.LEVEL_NODE:
3101 self._LockInstancesNodes()
3103 def BuildHooksEnv(self):
3106 This runs on master, primary and secondary nodes of the instance.
3109 env = _BuildInstanceHookEnvByObject(self, self.instance)
3110 nl = [self.cfg.GetMasterNode()]
3113 def CheckPrereq(self):
3114 """Check prerequisites.
3116 This checks that the instance is in the cluster.
3119 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3120 assert self.instance is not None, \
3121 "Cannot retrieve locked instance %s" % self.op.instance_name
3123 def Exec(self, feedback_fn):
3124 """Remove the instance.
3127 instance = self.instance
3128 logging.info("Shutting down instance %s on node %s",
3129 instance.name, instance.primary_node)
3131 result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3132 msg = result.RemoteFailMsg()
3134 if self.op.ignore_failures:
3135 feedback_fn("Warning: can't shutdown instance: %s" % msg)
3137 raise errors.OpExecError("Could not shutdown instance %s on"
3139 (instance.name, instance.primary_node, msg))
3141 logging.info("Removing block devices for instance %s", instance.name)
3143 if not _RemoveDisks(self, instance):
3144 if self.op.ignore_failures:
3145 feedback_fn("Warning: can't remove instance's disks")
3147 raise errors.OpExecError("Can't remove instance's disks")
3149 logging.info("Removing instance %s out of cluster config", instance.name)
3151 self.cfg.RemoveInstance(instance.name)
3152 self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3155 class LUQueryInstances(NoHooksLU):
3156 """Logical unit for querying instances.
3159 _OP_REQP = ["output_fields", "names", "use_locking"]
3161 _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3163 "disk_template", "ip", "mac", "bridge",
3164 "sda_size", "sdb_size", "vcpus", "tags",
3165 "network_port", "beparams",
3166 r"(disk)\.(size)/([0-9]+)",
3167 r"(disk)\.(sizes)", "disk_usage",
3168 r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3169 r"(nic)\.(macs|ips|bridges)",
3170 r"(disk|nic)\.(count)",
3171 "serial_no", "hypervisor", "hvparams",] +
3173 for name in constants.HVS_PARAMETERS] +
3175 for name in constants.BES_PARAMETERS])
3176 _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3179 def ExpandNames(self):
3180 _CheckOutputFields(static=self._FIELDS_STATIC,
3181 dynamic=self._FIELDS_DYNAMIC,
3182 selected=self.op.output_fields)
3184 self.needed_locks = {}
3185 self.share_locks[locking.LEVEL_INSTANCE] = 1
3186 self.share_locks[locking.LEVEL_NODE] = 1
3189 self.wanted = _GetWantedInstances(self, self.op.names)
3191 self.wanted = locking.ALL_SET
3193 self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3194 self.do_locking = self.do_node_query and self.op.use_locking
3196 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3197 self.needed_locks[locking.LEVEL_NODE] = []
3198 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3200 def DeclareLocks(self, level):
3201 if level == locking.LEVEL_NODE and self.do_locking:
3202 self._LockInstancesNodes()
3204 def CheckPrereq(self):
3205 """Check prerequisites.
3210 def Exec(self, feedback_fn):
3211 """Computes the list of nodes and their attributes.
3214 all_info = self.cfg.GetAllInstancesInfo()
3215 if self.wanted == locking.ALL_SET:
3216 # caller didn't specify instance names, so ordering is not important
3218 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3220 instance_names = all_info.keys()
3221 instance_names = utils.NiceSort(instance_names)
3223 # caller did specify names, so we must keep the ordering
3225 tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3227 tgt_set = all_info.keys()
3228 missing = set(self.wanted).difference(tgt_set)
3230 raise errors.OpExecError("Some instances were removed before"
3231 " retrieving their data: %s" % missing)
3232 instance_names = self.wanted
3234 instance_list = [all_info[iname] for iname in instance_names]
3236 # begin data gathering
3238 nodes = frozenset([inst.primary_node for inst in instance_list])
3239 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3243 if self.do_node_query:
3245 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3247 result = node_data[name]
3249 # offline nodes will be in both lists
3250 off_nodes.append(name)
3252 bad_nodes.append(name)
3255 live_data.update(result.data)
3256 # else no instance is alive
3258 live_data = dict([(name, {}) for name in instance_names])
3260 # end data gathering
3265 for instance in instance_list:
3267 i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3268 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3269 for field in self.op.output_fields:
3270 st_match = self._FIELDS_STATIC.Matches(field)
3275 elif field == "pnode":
3276 val = instance.primary_node
3277 elif field == "snodes":
3278 val = list(instance.secondary_nodes)
3279 elif field == "admin_state":
3280 val = instance.admin_up
3281 elif field == "oper_state":
3282 if instance.primary_node in bad_nodes:
3285 val = bool(live_data.get(instance.name))
3286 elif field == "status":
3287 if instance.primary_node in off_nodes:
3288 val = "ERROR_nodeoffline"
3289 elif instance.primary_node in bad_nodes:
3290 val = "ERROR_nodedown"
3292 running = bool(live_data.get(instance.name))
3294 if instance.admin_up:
3299 if instance.admin_up:
3303 elif field == "oper_ram":
3304 if instance.primary_node in bad_nodes:
3306 elif instance.name in live_data:
3307 val = live_data[instance.name].get("memory", "?")
3310 elif field == "disk_template":
3311 val = instance.disk_template
3313 val = instance.nics[0].ip
3314 elif field == "bridge":
3315 val = instance.nics[0].bridge
3316 elif field == "mac":
3317 val = instance.nics[0].mac
3318 elif field == "sda_size" or field == "sdb_size":
3319 idx = ord(field[2]) - ord('a')
3321 val = instance.FindDisk(idx).size
3322 except errors.OpPrereqError:
3324 elif field == "disk_usage": # total disk usage per node
3325 disk_sizes = [{'size': disk.size} for disk in instance.disks]
3326 val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3327 elif field == "tags":
3328 val = list(instance.GetTags())
3329 elif field == "serial_no":
3330 val = instance.serial_no
3331 elif field == "network_port":
3332 val = instance.network_port
3333 elif field == "hypervisor":
3334 val = instance.hypervisor
3335 elif field == "hvparams":
3337 elif (field.startswith(HVPREFIX) and
3338 field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3339 val = i_hv.get(field[len(HVPREFIX):], None)
3340 elif field == "beparams":
3342 elif (field.startswith(BEPREFIX) and
3343 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3344 val = i_be.get(field[len(BEPREFIX):], None)
3345 elif st_match and st_match.groups():
3346 # matches a variable list
3347 st_groups = st_match.groups()
3348 if st_groups and st_groups[0] == "disk":
3349 if st_groups[1] == "count":
3350 val = len(instance.disks)
3351 elif st_groups[1] == "sizes":
3352 val = [disk.size for disk in instance.disks]
3353 elif st_groups[1] == "size":
3355 val = instance.FindDisk(st_groups[2]).size
3356 except errors.OpPrereqError:
3359 assert False, "Unhandled disk parameter"
3360 elif st_groups[0] == "nic":
3361 if st_groups[1] == "count":
3362 val = len(instance.nics)
3363 elif st_groups[1] == "macs":
3364 val = [nic.mac for nic in instance.nics]
3365 elif st_groups[1] == "ips":
3366 val = [nic.ip for nic in instance.nics]
3367 elif st_groups[1] == "bridges":
3368 val = [nic.bridge for nic in instance.nics]
3371 nic_idx = int(st_groups[2])
3372 if nic_idx >= len(instance.nics):
3375 if st_groups[1] == "mac":
3376 val = instance.nics[nic_idx].mac
3377 elif st_groups[1] == "ip":
3378 val = instance.nics[nic_idx].ip
3379 elif st_groups[1] == "bridge":
3380 val = instance.nics[nic_idx].bridge
3382 assert False, "Unhandled NIC parameter"
3384 assert False, "Unhandled variable parameter"
3386 raise errors.ParameterError(field)
3393 class LUFailoverInstance(LogicalUnit):
3394 """Failover an instance.
3397 HPATH = "instance-failover"
3398 HTYPE = constants.HTYPE_INSTANCE
3399 _OP_REQP = ["instance_name", "ignore_consistency"]
3402 def ExpandNames(self):
3403 self._ExpandAndLockInstance()
3404 self.needed_locks[locking.LEVEL_NODE] = []
3405 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3407 def DeclareLocks(self, level):
3408 if level == locking.LEVEL_NODE:
3409 self._LockInstancesNodes()
3411 def BuildHooksEnv(self):
3414 This runs on master, primary and secondary nodes of the instance.
3418 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3420 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3421 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3424 def CheckPrereq(self):
3425 """Check prerequisites.
3427 This checks that the instance is in the cluster.
3430 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3431 assert self.instance is not None, \
3432 "Cannot retrieve locked instance %s" % self.op.instance_name
3434 bep = self.cfg.GetClusterInfo().FillBE(instance)
3435 if instance.disk_template not in constants.DTS_NET_MIRROR:
3436 raise errors.OpPrereqError("Instance's disk layout is not"
3437 " network mirrored, cannot failover.")
3439 secondary_nodes = instance.secondary_nodes
3440 if not secondary_nodes:
3441 raise errors.ProgrammerError("no secondary node but using "
3442 "a mirrored disk template")
3444 target_node = secondary_nodes[0]
3445 _CheckNodeOnline(self, target_node)
3446 _CheckNodeNotDrained(self, target_node)
3447 # check memory requirements on the secondary node
3448 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3449 instance.name, bep[constants.BE_MEMORY],
3450 instance.hypervisor)
3452 # check bridge existance
3453 brlist = [nic.bridge for nic in instance.nics]
3454 result = self.rpc.call_bridges_exist(target_node, brlist)
3457 raise errors.OpPrereqError("One or more target bridges %s does not"
3458 " exist on destination node '%s'" %
3459 (brlist, target_node))
3461 def Exec(self, feedback_fn):
3462 """Failover an instance.
3464 The failover is done by shutting it down on its present node and
3465 starting it on the secondary.
3468 instance = self.instance
3470 source_node = instance.primary_node
3471 target_node = instance.secondary_nodes[0]
3473 feedback_fn("* checking disk consistency between source and target")
3474 for dev in instance.disks:
3475 # for drbd, these are drbd over lvm
3476 if not _CheckDiskConsistency(self, dev, target_node, False):
3477 if instance.admin_up and not self.op.ignore_consistency:
3478 raise errors.OpExecError("Disk %s is degraded on target node,"
3479 " aborting failover." % dev.iv_name)
3481 feedback_fn("* shutting down instance on source node")
3482 logging.info("Shutting down instance %s on node %s",
3483 instance.name, source_node)
3485 result = self.rpc.call_instance_shutdown(source_node, instance)
3486 msg = result.RemoteFailMsg()
3488 if self.op.ignore_consistency:
3489 self.proc.LogWarning("Could not shutdown instance %s on node %s."
3490 " Proceeding anyway. Please make sure node"
3491 " %s is down. Error details: %s",
3492 instance.name, source_node, source_node, msg)
3494 raise errors.OpExecError("Could not shutdown instance %s on"
3496 (instance.name, source_node, msg))
3498 feedback_fn("* deactivating the instance's disks on source node")
3499 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3500 raise errors.OpExecError("Can't shut down the instance's disks.")
3502 instance.primary_node = target_node
3503 # distribute new instance config to the other nodes
3504 self.cfg.Update(instance)
3506 # Only start the instance if it's marked as up
3507 if instance.admin_up:
3508 feedback_fn("* activating the instance's disks on target node")
3509 logging.info("Starting instance %s on node %s",
3510 instance.name, target_node)
3512 disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3513 ignore_secondaries=True)
3515 _ShutdownInstanceDisks(self, instance)
3516 raise errors.OpExecError("Can't activate the instance's disks")
3518 feedback_fn("* starting the instance on the target node")
3519 result = self.rpc.call_instance_start(target_node, instance)
3520 msg = result.RemoteFailMsg()
3522 _ShutdownInstanceDisks(self, instance)
3523 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3524 (instance.name, target_node, msg))
3527 class LUMigrateInstance(LogicalUnit):
3528 """Migrate an instance.
3530 This is migration without shutting down, compared to the failover,
3531 which is done with shutdown.
3534 HPATH = "instance-migrate"
3535 HTYPE = constants.HTYPE_INSTANCE
3536 _OP_REQP = ["instance_name", "live", "cleanup"]
3540 def ExpandNames(self):
3541 self._ExpandAndLockInstance()
3542 self.needed_locks[locking.LEVEL_NODE] = []
3543 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3545 def DeclareLocks(self, level):
3546 if level == locking.LEVEL_NODE:
3547 self._LockInstancesNodes()
3549 def BuildHooksEnv(self):
3552 This runs on master, primary and secondary nodes of the instance.
3555 env = _BuildInstanceHookEnvByObject(self, self.instance)
3556 env["MIGRATE_LIVE"] = self.op.live
3557 env["MIGRATE_CLEANUP"] = self.op.cleanup
3558 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3561 def CheckPrereq(self):
3562 """Check prerequisites.
3564 This checks that the instance is in the cluster.
3567 instance = self.cfg.GetInstanceInfo(
3568 self.cfg.ExpandInstanceName(self.op.instance_name))
3569 if instance is None:
3570 raise errors.OpPrereqError("Instance '%s' not known" %
3571 self.op.instance_name)
3573 if instance.disk_template != constants.DT_DRBD8:
3574 raise errors.OpPrereqError("Instance's disk layout is not"
3575 " drbd8, cannot migrate.")
3577 secondary_nodes = instance.secondary_nodes
3578 if not secondary_nodes:
3579 raise errors.ConfigurationError("No secondary node but using"
3580 " drbd8 disk template")
3582 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3584 target_node = secondary_nodes[0]
3585 # check memory requirements on the secondary node
3586 _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3587 instance.name, i_be[constants.BE_MEMORY],
3588 instance.hypervisor)
3590 # check bridge existance
3591 brlist = [nic.bridge for nic in instance.nics]
3592 result = self.rpc.call_bridges_exist(target_node, brlist)
3593 if result.failed or not result.data:
3594 raise errors.OpPrereqError("One or more target bridges %s does not"
3595 " exist on destination node '%s'" %
3596 (brlist, target_node))
3598 if not self.op.cleanup:
3599 _CheckNodeNotDrained(self, target_node)
3600 result = self.rpc.call_instance_migratable(instance.primary_node,
3602 msg = result.RemoteFailMsg()
3604 raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3607 self.instance = instance
3609 def _WaitUntilSync(self):
3610 """Poll with custom rpc for disk sync.
3612 This uses our own step-based rpc call.
3615 self.feedback_fn("* wait until resync is done")
3619 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3621 self.instance.disks)
3623 for node, nres in result.items():
3624 msg = nres.RemoteFailMsg()
3626 raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3628 node_done, node_percent = nres.payload
3629 all_done = all_done and node_done
3630 if node_percent is not None:
3631 min_percent = min(min_percent, node_percent)
3633 if min_percent < 100:
3634 self.feedback_fn(" - progress: %.1f%%" % min_percent)
3637 def _EnsureSecondary(self, node):
3638 """Demote a node to secondary.
3641 self.feedback_fn("* switching node %s to secondary mode" % node)
3643 for dev in self.instance.disks:
3644 self.cfg.SetDiskID(dev, node)
3646 result = self.rpc.call_blockdev_close(node, self.instance.name,
3647 self.instance.disks)
3648 msg = result.RemoteFailMsg()
3650 raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3651 " error %s" % (node, msg))
3653 def _GoStandalone(self):
3654 """Disconnect from the network.
3657 self.feedback_fn("* changing into standalone mode")
3658 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3659 self.instance.disks)
3660 for node, nres in result.items():
3661 msg = nres.RemoteFailMsg()
3663 raise errors.OpExecError("Cannot disconnect disks node %s,"
3664 " error %s" % (node, msg))
3666 def _GoReconnect(self, multimaster):
3667 """Reconnect to the network.
3673 msg = "single-master"
3674 self.feedback_fn("* changing disks into %s mode" % msg)
3675 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3676 self.instance.disks,
3677 self.instance.name, multimaster)
3678 for node, nres in result.items():
3679 msg = nres.RemoteFailMsg()
3681 raise errors.OpExecError("Cannot change disks config on node %s,"
3682 " error: %s" % (node, msg))
3684 def _ExecCleanup(self):
3685 """Try to cleanup after a failed migration.
3687 The cleanup is done by:
3688 - check that the instance is running only on one node
3689 (and update the config if needed)
3690 - change disks on its secondary node to secondary
3691 - wait until disks are fully synchronized
3692 - disconnect from the network
3693 - change disks into single-master mode
3694 - wait again until disks are fully synchronized
3697 instance = self.instance
3698 target_node = self.target_node
3699 source_node = self.source_node
3701 # check running on only one node
3702 self.feedback_fn("* checking where the instance actually runs"
3703 " (if this hangs, the hypervisor might be in"
3705 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3706 for node, result in ins_l.items():
3708 if not isinstance(result.data, list):
3709 raise errors.OpExecError("Can't contact node '%s'" % node)
3711 runningon_source = instance.name in ins_l[source_node].data
3712 runningon_target = instance.name in ins_l[target_node].data
3714 if runningon_source and runningon_target:
3715 raise errors.OpExecError("Instance seems to be running on two nodes,"
3716 " or the hypervisor is confused. You will have"
3717 " to ensure manually that it runs only on one"
3718 " and restart this operation.")
3720 if not (runningon_source or runningon_target):
3721 raise errors.OpExecError("Instance does not seem to be running at all."
3722 " In this case, it's safer to repair by"
3723 " running 'gnt-instance stop' to ensure disk"
3724 " shutdown, and then restarting it.")
3726 if runningon_target:
3727 # the migration has actually succeeded, we need to update the config
3728 self.feedback_fn("* instance running on secondary node (%s),"
3729 " updating config" % target_node)
3730 instance.primary_node = target_node
3731 self.cfg.Update(instance)
3732 demoted_node = source_node
3734 self.feedback_fn("* instance confirmed to be running on its"
3735 " primary node (%s)" % source_node)
3736 demoted_node = target_node
3738 self._EnsureSecondary(demoted_node)
3740 self._WaitUntilSync()
3741 except errors.OpExecError:
3742 # we ignore here errors, since if the device is standalone, it
3743 # won't be able to sync
3745 self._GoStandalone()
3746 self._GoReconnect(False)
3747 self._WaitUntilSync()
3749 self.feedback_fn("* done")
3751 def _RevertDiskStatus(self):
3752 """Try to revert the disk status after a failed migration.
3755 target_node = self.target_node
3757 self._EnsureSecondary(target_node)
3758 self._GoStandalone()
3759 self._GoReconnect(False)
3760 self._WaitUntilSync()
3761 except errors.OpExecError, err:
3762 self.LogWarning("Migration failed and I can't reconnect the"
3763 " drives: error '%s'\n"
3764 "Please look and recover the instance status" %
3767 def _AbortMigration(self):
3768 """Call the hypervisor code to abort a started migration.
3771 instance = self.instance
3772 target_node = self.target_node
3773 migration_info = self.migration_info
3775 abort_result = self.rpc.call_finalize_migration(target_node,
3779 abort_msg = abort_result.RemoteFailMsg()
3781 logging.error("Aborting migration failed on target node %s: %s" %
3782 (target_node, abort_msg))
3783 # Don't raise an exception here, as we stil have to try to revert the
3784 # disk status, even if this step failed.
3786 def _ExecMigration(self):
3787 """Migrate an instance.
3789 The migrate is done by:
3790 - change the disks into dual-master mode
3791 - wait until disks are fully synchronized again
3792 - migrate the instance
3793 - change disks on the new secondary node (the old primary) to secondary
3794 - wait until disks are fully synchronized
3795 - change disks into single-master mode
3798 instance = self.instance
3799 target_node = self.target_node
3800 source_node = self.source_node
3802 self.feedback_fn("* checking disk consistency between source and target")
3803 for dev in instance.disks:
3804 if not _CheckDiskConsistency(self, dev, target_node, False):
3805 raise errors.OpExecError("Disk %s is degraded or not fully"
3806 " synchronized on target node,"
3807 " aborting migrate." % dev.iv_name)
3809 # First get the migration information from the remote node
3810 result = self.rpc.call_migration_info(source_node, instance)
3811 msg = result.RemoteFailMsg()
3813 log_err = ("Failed fetching source migration information from %s: %s" %
3815 logging.error(log_err)
3816 raise errors.OpExecError(log_err)
3818 self.migration_info = migration_info = result.payload
3820 # Then switch the disks to master/master mode
3821 self._EnsureSecondary(target_node)
3822 self._GoStandalone()
3823 self._GoReconnect(True)
3824 self._WaitUntilSync()
3826 self.feedback_fn("* preparing %s to accept the instance" % target_node)
3827 result = self.rpc.call_accept_instance(target_node,
3830 self.nodes_ip[target_node])
3832 msg = result.RemoteFailMsg()
3834 logging.error("Instance pre-migration failed, trying to revert"
3835 " disk status: %s", msg)
3836 self._AbortMigration()
3837 self._RevertDiskStatus()
3838 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3839 (instance.name, msg))
3841 self.feedback_fn("* migrating instance to %s" % target_node)
3843 result = self.rpc.call_instance_migrate(source_node, instance,
3844 self.nodes_ip[target_node],
3846 msg = result.RemoteFailMsg()
3848 logging.error("Instance migration failed, trying to revert"
3849 " disk status: %s", msg)
3850 self._AbortMigration()
3851 self._RevertDiskStatus()
3852 raise errors.OpExecError("Could not migrate instance %s: %s" %
3853 (instance.name, msg))
3856 instance.primary_node = target_node
3857 # distribute new instance config to the other nodes
3858 self.cfg.Update(instance)
3860 result = self.rpc.call_finalize_migration(target_node,
3864 msg = result.RemoteFailMsg()
3866 logging.error("Instance migration succeeded, but finalization failed:"
3868 raise errors.OpExecError("Could not finalize instance migration: %s" %
3871 self._EnsureSecondary(source_node)
3872 self._WaitUntilSync()
3873 self._GoStandalone()
3874 self._GoReconnect(False)
3875 self._WaitUntilSync()
3877 self.feedback_fn("* done")
3879 def Exec(self, feedback_fn):
3880 """Perform the migration.
3883 self.feedback_fn = feedback_fn
3885 self.source_node = self.instance.primary_node
3886 self.target_node = self.instance.secondary_nodes[0]
3887 self.all_nodes = [self.source_node, self.target_node]
3889 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3890 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3893 return self._ExecCleanup()
3895 return self._ExecMigration()
3898 def _CreateBlockDev(lu, node, instance, device, force_create,
3900 """Create a tree of block devices on a given node.
3902 If this device type has to be created on secondaries, create it and
3905 If not, just recurse to children keeping the same 'force' value.
3907 @param lu: the lu on whose behalf we execute
3908 @param node: the node on which to create the device
3909 @type instance: L{objects.Instance}
3910 @param instance: the instance which owns the device
3911 @type device: L{objects.Disk}
3912 @param device: the device to create
3913 @type force_create: boolean
3914 @param force_create: whether to force creation of this device; this
3915 will be change to True whenever we find a device which has
3916 CreateOnSecondary() attribute
3917 @param info: the extra 'metadata' we should attach to the device
3918 (this will be represented as a LVM tag)
3919 @type force_open: boolean
3920 @param force_open: this parameter will be passes to the
3921 L{backend.BlockdevCreate} function where it specifies
3922 whether we run on primary or not, and it affects both
3923 the child assembly and the device own Open() execution
3926 if device.CreateOnSecondary():
3930 for child in device.children:
3931 _CreateBlockDev(lu, node, instance, child, force_create,
3934 if not force_create:
3937 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3940 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3941 """Create a single block device on a given node.
3943 This will not recurse over children of the device, so they must be
3946 @param lu: the lu on whose behalf we execute
3947 @param node: the node on which to create the device
3948 @type instance: L{objects.Instance}
3949 @param instance: the instance which owns the device
3950 @type device: L{objects.Disk}
3951 @param device: the device to create
3952 @param info: the extra 'metadata' we should attach to the device
3953 (this will be represented as a LVM tag)
3954 @type force_open: boolean
3955 @param force_open: this parameter will be passes to the
3956 L{backend.BlockdevCreate} function where it specifies
3957 whether we run on primary or not, and it affects both
3958 the child assembly and the device own Open() execution
3961 lu.cfg.SetDiskID(device, node)
3962 result = lu.rpc.call_blockdev_create(node, device, device.size,
3963 instance.name, force_open, info)
3964 msg = result.RemoteFailMsg()
3966 raise errors.OpExecError("Can't create block device %s on"
3967 " node %s for instance %s: %s" %
3968 (device, node, instance.name, msg))
3969 if device.physical_id is None:
3970 device.physical_id = result.payload
3973 def _GenerateUniqueNames(lu, exts):
3974 """Generate a suitable LV name.
3976 This will generate a logical volume name for the given instance.
3981 new_id = lu.cfg.GenerateUniqueID()
3982 results.append("%s%s" % (new_id, val))
3986 def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3988 """Generate a drbd8 device complete with its children.
3991 port = lu.cfg.AllocatePort()
3992 vgname = lu.cfg.GetVGName()
3993 shared_secret = lu.cfg.GenerateDRBDSecret()
3994 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3995 logical_id=(vgname, names[0]))
3996 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3997 logical_id=(vgname, names[1]))
3998 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3999 logical_id=(primary, secondary, port,
4002 children=[dev_data, dev_meta],
4007 def _GenerateDiskTemplate(lu, template_name,
4008 instance_name, primary_node,
4009 secondary_nodes, disk_info,
4010 file_storage_dir, file_driver,
4012 """Generate the entire disk layout for a given template type.
4015 #TODO: compute space requirements
4017 vgname = lu.cfg.GetVGName()
4018 disk_count = len(disk_info)
4020 if template_name == constants.DT_DISKLESS:
4022 elif template_name == constants.DT_PLAIN:
4023 if len(secondary_nodes) != 0:
4024 raise errors.ProgrammerError("Wrong template configuration")
4026 names = _GenerateUniqueNames(lu, [".disk%d" % i
4027 for i in range(disk_count)])
4028 for idx, disk in enumerate(disk_info):
4029 disk_index = idx + base_index
4030 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4031 logical_id=(vgname, names[idx]),
4032 iv_name="disk/%d" % disk_index,
4034 disks.append(disk_dev)
4035 elif template_name == constants.DT_DRBD8:
4036 if len(secondary_nodes) != 1:
4037 raise errors.ProgrammerError("Wrong template configuration")
4038 remote_node = secondary_nodes[0]
4039 minors = lu.cfg.AllocateDRBDMinor(
4040 [primary_node, remote_node] * len(disk_info), instance_name)
4043 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4044 for i in range(disk_count)]):
4045 names.append(lv_prefix + "_data")
4046 names.append(lv_prefix + "_meta")
4047 for idx, disk in enumerate(disk_info):
4048 disk_index = idx + base_index
4049 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4050 disk["size"], names[idx*2:idx*2+2],
4051 "disk/%d" % disk_index,
4052 minors[idx*2], minors[idx*2+1])
4053 disk_dev.mode = disk["mode"]
4054 disks.append(disk_dev)
4055 elif template_name == constants.DT_FILE:
4056 if len(secondary_nodes) != 0:
4057 raise errors.ProgrammerError("Wrong template configuration")
4059 for idx, disk in enumerate(disk_info):
4060 disk_index = idx + base_index
4061 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4062 iv_name="disk/%d" % disk_index,
4063 logical_id=(file_driver,
4064 "%s/disk%d" % (file_storage_dir,
4067 disks.append(disk_dev)
4069 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4073 def _GetInstanceInfoText(instance):
4074 """Compute that text that should be added to the disk's metadata.
4077 return "originstname+%s" % instance.name
4080 def _CreateDisks(lu, instance):
4081 """Create all disks for an instance.
4083 This abstracts away some work from AddInstance.
4085 @type lu: L{LogicalUnit}
4086 @param lu: the logical unit on whose behalf we execute
4087 @type instance: L{objects.Instance}
4088 @param instance: the instance whose disks we should create
4090 @return: the success of the creation
4093 info = _GetInstanceInfoText(instance)
4094 pnode = instance.primary_node
4096 if instance.disk_template == constants.DT_FILE:
4097 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4098 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4100 if result.failed or not result.data:
4101 raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4103 if not result.data[0]:
4104 raise errors.OpExecError("Failed to create directory '%s'" %
4107 # Note: this needs to be kept in sync with adding of disks in
4108 # LUSetInstanceParams
4109 for device in instance.disks:
4110 logging.info("Creating volume %s for instance %s",
4111 device.iv_name, instance.name)
4113 for node in instance.all_nodes:
4114 f_create = node == pnode
4115 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4118 def _RemoveDisks(lu, instance):
4119 """Remove all disks for an instance.
4121 This abstracts away some work from `AddInstance()` and
4122 `RemoveInstance()`. Note that in case some of the devices couldn't
4123 be removed, the removal will continue with the other ones (compare
4124 with `_CreateDisks()`).
4126 @type lu: L{LogicalUnit}
4127 @param lu: the logical unit on whose behalf we execute
4128 @type instance: L{objects.Instance}
4129 @param instance: the instance whose disks we should remove
4131 @return: the success of the removal
4134 logging.info("Removing block devices for instance %s", instance.name)
4137 for device in instance.disks:
4138 for node, disk in device.ComputeNodeTree(instance.primary_node):
4139 lu.cfg.SetDiskID(disk, node)
4140 msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4142 lu.LogWarning("Could not remove block device %s on node %s,"
4143 " continuing anyway: %s", device.iv_name, node, msg)
4146 if instance.disk_template == constants.DT_FILE:
4147 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4148 result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4150 if result.failed or not result.data:
4151 logging.error("Could not remove directory '%s'", file_storage_dir)
4157 def _ComputeDiskSize(disk_template, disks):
4158 """Compute disk size requirements in the volume group
4161 # Required free disk space as a function of disk and swap space
4163 constants.DT_DISKLESS: None,
4164 constants.DT_PLAIN: sum(d["size"] for d in disks),
4165 # 128 MB are added for drbd metadata for each disk
4166 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4167 constants.DT_FILE: None,
4170 if disk_template not in req_size_dict:
4171 raise errors.ProgrammerError("Disk template '%s' size requirement"
4172 " is unknown" % disk_template)
4174 return req_size_dict[disk_template]
4177 def _CheckHVParams(lu, nodenames, hvname, hvparams):
4178 """Hypervisor parameter validation.
4180 This function abstract the hypervisor parameter validation to be
4181 used in both instance create and instance modify.
4183 @type lu: L{LogicalUnit}
4184 @param lu: the logical unit for which we check
4185 @type nodenames: list
4186 @param nodenames: the list of nodes on which we should check
4187 @type hvname: string
4188 @param hvname: the name of the hypervisor we should use
4189 @type hvparams: dict
4190 @param hvparams: the parameters which we need to check
4191 @raise errors.OpPrereqError: if the parameters are not valid
4194 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4197 for node in nodenames:
4201 msg = info.RemoteFailMsg()
4203 raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4207 class LUCreateInstance(LogicalUnit):
4208 """Create an instance.
4211 HPATH = "instance-add"
4212 HTYPE = constants.HTYPE_INSTANCE
4213 _OP_REQP = ["instance_name", "disks", "disk_template",
4215 "wait_for_sync", "ip_check", "nics",
4216 "hvparams", "beparams"]
4219 def _ExpandNode(self, node):
4220 """Expands and checks one node name.
4223 node_full = self.cfg.ExpandNodeName(node)
4224 if node_full is None:
4225 raise errors.OpPrereqError("Unknown node %s" % node)
4228 def ExpandNames(self):
4229 """ExpandNames for CreateInstance.
4231 Figure out the right locks for instance creation.
4234 self.needed_locks = {}
4236 # set optional parameters to none if they don't exist
4237 for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4238 if not hasattr(self.op, attr):
4239 setattr(self.op, attr, None)
4241 # cheap checks, mostly valid constants given
4243 # verify creation mode
4244 if self.op.mode not in (constants.INSTANCE_CREATE,
4245 constants.INSTANCE_IMPORT):
4246 raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4249 # disk template and mirror node verification
4250 if self.op.disk_template not in constants.DISK_TEMPLATES:
4251 raise errors.OpPrereqError("Invalid disk template name")
4253 if self.op.hypervisor is None:
4254 self.op.hypervisor = self.cfg.GetHypervisorType()
4256 cluster = self.cfg.GetClusterInfo()
4257 enabled_hvs = cluster.enabled_hypervisors
4258 if self.op.hypervisor not in enabled_hvs:
4259 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4260 " cluster (%s)" % (self.op.hypervisor,
4261 ",".join(enabled_hvs)))
4263 # check hypervisor parameter syntax (locally)
4264 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4265 filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4267 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4268 hv_type.CheckParameterSyntax(filled_hvp)
4270 # fill and remember the beparams dict
4271 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4272 self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4275 #### instance parameters check
4277 # instance name verification
4278 hostname1 = utils.HostInfo(self.op.instance_name)
4279 self.op.instance_name = instance_name = hostname1.name
4281 # this is just a preventive check, but someone might still add this
4282 # instance in the meantime, and creation will fail at lock-add time
4283 if instance_name in self.cfg.GetInstanceList():
4284 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4287 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4291 for nic in self.op.nics:
4292 # ip validity checks
4293 ip = nic.get("ip", None)
4294 if ip is None or ip.lower() == "none":
4296 elif ip.lower() == constants.VALUE_AUTO:
4297 nic_ip = hostname1.ip
4299 if not utils.IsValidIP(ip):
4300 raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4301 " like a valid IP" % ip)
4304 # MAC address verification
4305 mac = nic.get("mac", constants.VALUE_AUTO)
4306 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4307 if not utils.IsValidMac(mac.lower()):
4308 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4310 # bridge verification
4311 bridge = nic.get("bridge", None)
4313 bridge = self.cfg.GetDefBridge()
4314 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4316 # disk checks/pre-build
4318 for disk in self.op.disks:
4319 mode = disk.get("mode", constants.DISK_RDWR)
4320 if mode not in constants.DISK_ACCESS_SET:
4321 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4323 size = disk.get("size", None)
4325 raise errors.OpPrereqError("Missing disk size")
4329 raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4330 self.disks.append({"size": size, "mode": mode})
4332 # used in CheckPrereq for ip ping check
4333 self.check_ip = hostname1.ip
4335 # file storage checks
4336 if (self.op.file_driver and
4337 not self.op.file_driver in constants.FILE_DRIVER):
4338 raise errors.OpPrereqError("Invalid file driver name '%s'" %
4339 self.op.file_driver)
4341 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4342 raise errors.OpPrereqError("File storage directory path not absolute")
4344 ### Node/iallocator related checks
4345 if [self.op.iallocator, self.op.pnode].count(None) != 1:
4346 raise errors.OpPrereqError("One and only one of iallocator and primary"
4347 " node must be given")
4349 if self.op.iallocator:
4350 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4352 self.op.pnode = self._ExpandNode(self.op.pnode)
4353 nodelist = [self.op.pnode]
4354 if self.op.snode is not None:
4355 self.op.snode = self._ExpandNode(self.op.snode)
4356 nodelist.append(self.op.snode)
4357 self.needed_locks[locking.LEVEL_NODE] = nodelist
4359 # in case of import lock the source node too
4360 if self.op.mode == constants.INSTANCE_IMPORT:
4361 src_node = getattr(self.op, "src_node", None)
4362 src_path = getattr(self.op, "src_path", None)
4364 if src_path is None:
4365 self.op.src_path = src_path = self.op.instance_name
4367 if src_node is None:
4368 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4369 self.op.src_node = None
4370 if os.path.isabs(src_path):
4371 raise errors.OpPrereqError("Importing an instance from an absolute"
4372 " path requires a source node option.")
4374 self.op.src_node = src_node = self._ExpandNode(src_node)
4375 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4376 self.needed_locks[locking.LEVEL_NODE].append(src_node)
4377 if not os.path.isabs(src_path):
4378 self.op.src_path = src_path = \
4379 os.path.join(constants.EXPORT_DIR, src_path)
4381 else: # INSTANCE_CREATE
4382 if getattr(self.op, "os_type", None) is None:
4383 raise errors.OpPrereqError("No guest OS specified")
4385 def _RunAllocator(self):
4386 """Run the allocator based on input opcode.
4389 nics = [n.ToDict() for n in self.nics]
4390 ial = IAllocator(self,
4391 mode=constants.IALLOCATOR_MODE_ALLOC,
4392 name=self.op.instance_name,
4393 disk_template=self.op.disk_template,
4396 vcpus=self.be_full[constants.BE_VCPUS],
4397 mem_size=self.be_full[constants.BE_MEMORY],
4400 hypervisor=self.op.hypervisor,
4403 ial.Run(self.op.iallocator)
4406 raise errors.OpPrereqError("Can't compute nodes using"
4407 " iallocator '%s': %s" % (self.op.iallocator,
4409 if len(ial.nodes) != ial.required_nodes:
4410 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4411 " of nodes (%s), required %s" %
4412 (self.op.iallocator, len(ial.nodes),
4413 ial.required_nodes))
4414 self.op.pnode = ial.nodes[0]
4415 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4416 self.op.instance_name, self.op.iallocator,
4417 ", ".join(ial.nodes))
4418 if ial.required_nodes == 2:
4419 self.op.snode = ial.nodes[1]
4421 def BuildHooksEnv(self):
4424 This runs on master, primary and secondary nodes of the instance.
4428 "ADD_MODE": self.op.mode,
4430 if self.op.mode == constants.INSTANCE_IMPORT:
4431 env["SRC_NODE"] = self.op.src_node
4432 env["SRC_PATH"] = self.op.src_path
4433 env["SRC_IMAGES"] = self.src_images
4435 env.update(_BuildInstanceHookEnv(
4436 name=self.op.instance_name,
4437 primary_node=self.op.pnode,
4438 secondary_nodes=self.secondaries,
4439 status=self.op.start,
4440 os_type=self.op.os_type,
4441 memory=self.be_full[constants.BE_MEMORY],
4442 vcpus=self.be_full[constants.BE_VCPUS],
4443 nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4444 disk_template=self.op.disk_template,
4445 disks=[(d["size"], d["mode"]) for d in self.disks],
4448 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4453 def CheckPrereq(self):
4454 """Check prerequisites.
4457 if (not self.cfg.GetVGName() and
4458 self.op.disk_template not in constants.DTS_NOT_LVM):
4459 raise errors.OpPrereqError("Cluster does not support lvm-based"
4462 if self.op.mode == constants.INSTANCE_IMPORT:
4463 src_node = self.op.src_node
4464 src_path = self.op.src_path
4466 if src_node is None:
4467 exp_list = self.rpc.call_export_list(
4468 self.acquired_locks[locking.LEVEL_NODE])
4470 for node in exp_list:
4471 if not exp_list[node].failed and src_path in exp_list[node].data:
4473 self.op.src_node = src_node = node
4474 self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4478 raise errors.OpPrereqError("No export found for relative path %s" %
4481 _CheckNodeOnline(self, src_node)
4482 result = self.rpc.call_export_info(src_node, src_path)
4485 raise errors.OpPrereqError("No export found in dir %s" % src_path)
4487 export_info = result.data
4488 if not export_info.has_section(constants.INISECT_EXP):
4489 raise errors.ProgrammerError("Corrupted export config")
4491 ei_version = export_info.get(constants.INISECT_EXP, 'version')
4492 if (int(ei_version) != constants.EXPORT_VERSION):
4493 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4494 (ei_version, constants.EXPORT_VERSION))
4496 # Check that the new instance doesn't have less disks than the export
4497 instance_disks = len(self.disks)
4498 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4499 if instance_disks < export_disks:
4500 raise errors.OpPrereqError("Not enough disks to import."
4501 " (instance: %d, export: %d)" %
4502 (instance_disks, export_disks))
4504 self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4506 for idx in range(export_disks):
4507 option = 'disk%d_dump' % idx
4508 if export_info.has_option(constants.INISECT_INS, option):
4509 # FIXME: are the old os-es, disk sizes, etc. useful?
4510 export_name = export_info.get(constants.INISECT_INS, option)
4511 image = os.path.join(src_path, export_name)
4512 disk_images.append(image)
4514 disk_images.append(False)
4516 self.src_images = disk_images
4518 old_name = export_info.get(constants.INISECT_INS, 'name')
4519 # FIXME: int() here could throw a ValueError on broken exports
4520 exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4521 if self.op.instance_name == old_name:
4522 for idx, nic in enumerate(self.nics):
4523 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4524 nic_mac_ini = 'nic%d_mac' % idx
4525 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4527 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4528 # ip ping checks (we use the same ip that was resolved in ExpandNames)
4529 if self.op.start and not self.op.ip_check:
4530 raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4531 " adding an instance in start mode")
4533 if self.op.ip_check:
4534 if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4535 raise errors.OpPrereqError("IP %s of instance %s already in use" %
4536 (self.check_ip, self.op.instance_name))
4538 #### mac address generation
4539 # By generating here the mac address both the allocator and the hooks get
4540 # the real final mac address rather than the 'auto' or 'generate' value.
4541 # There is a race condition between the generation and the instance object
4542 # creation, which means that we know the mac is valid now, but we're not
4543 # sure it will be when we actually add the instance. If things go bad
4544 # adding the instance will abort because of a duplicate mac, and the
4545 # creation job will fail.
4546 for nic in self.nics:
4547 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4548 nic.mac = self.cfg.GenerateMAC()
4552 if self.op.iallocator is not None:
4553 self._RunAllocator()
4555 #### node related checks
4557 # check primary node
4558 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4559 assert self.pnode is not None, \
4560 "Cannot retrieve locked node %s" % self.op.pnode
4562 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4565 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4568 self.secondaries = []
4570 # mirror node verification
4571 if self.op.disk_template in constants.DTS_NET_MIRROR:
4572 if self.op.snode is None:
4573 raise errors.OpPrereqError("The networked disk templates need"
4575 if self.op.snode == pnode.name:
4576 raise errors.OpPrereqError("The secondary node cannot be"
4577 " the primary node.")
4578 _CheckNodeOnline(self, self.op.snode)
4579 _CheckNodeNotDrained(self, self.op.snode)
4580 self.secondaries.append(self.op.snode)
4582 nodenames = [pnode.name] + self.secondaries
4584 req_size = _ComputeDiskSize(self.op.disk_template,
4587 # Check lv size requirements
4588 if req_size is not None:
4589 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4591 for node in nodenames:
4592 info = nodeinfo[node]
4596 raise errors.OpPrereqError("Cannot get current information"
4597 " from node '%s'" % node)
4598 vg_free = info.get('vg_free', None)
4599 if not isinstance(vg_free, int):
4600 raise errors.OpPrereqError("Can't compute free disk space on"
4602 if req_size > info['vg_free']:
4603 raise errors.OpPrereqError("Not enough disk space on target node %s."
4604 " %d MB available, %d MB required" %
4605 (node, info['vg_free'], req_size))
4607 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4610 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4612 if not isinstance(result.data, objects.OS):
4613 raise errors.OpPrereqError("OS '%s' not in supported os list for"
4614 " primary node" % self.op.os_type)
4616 # bridge check on primary node
4617 bridges = [n.bridge for n in self.nics]
4618 result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4621 raise errors.OpPrereqError("One of the target bridges '%s' does not"
4622 " exist on destination node '%s'" %
4623 (",".join(bridges), pnode.name))
4625 # memory check on primary node
4627 _CheckNodeFreeMemory(self, self.pnode.name,
4628 "creating instance %s" % self.op.instance_name,
4629 self.be_full[constants.BE_MEMORY],
4632 def Exec(self, feedback_fn):
4633 """Create and add the instance to the cluster.
4636 instance = self.op.instance_name
4637 pnode_name = self.pnode.name
4639 ht_kind = self.op.hypervisor
4640 if ht_kind in constants.HTS_REQ_PORT:
4641 network_port = self.cfg.AllocatePort()
4645 ##if self.op.vnc_bind_address is None:
4646 ## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4648 # this is needed because os.path.join does not accept None arguments
4649 if self.op.file_storage_dir is None:
4650 string_file_storage_dir = ""
4652 string_file_storage_dir = self.op.file_storage_dir
4654 # build the full file storage dir path
4655 file_storage_dir = os.path.normpath(os.path.join(
4656 self.cfg.GetFileStorageDir(),
4657 string_file_storage_dir, instance))
4660 disks = _GenerateDiskTemplate(self,
4661 self.op.disk_template,
4662 instance, pnode_name,
4666 self.op.file_driver,
4669 iobj = objects.Instance(name=instance, os=self.op.os_type,
4670 primary_node=pnode_name,
4671 nics=self.nics, disks=disks,
4672 disk_template=self.op.disk_template,
4674 network_port=network_port,
4675 beparams=self.op.beparams,
4676 hvparams=self.op.hvparams,
4677 hypervisor=self.op.hypervisor,
4680 feedback_fn("* creating instance disks...")
4682 _CreateDisks(self, iobj)
4683 except errors.OpExecError:
4684 self.LogWarning("Device creation failed, reverting...")
4686 _RemoveDisks(self, iobj)
4688 self.cfg.ReleaseDRBDMinors(instance)
4691 feedback_fn("adding instance %s to cluster config" % instance)
4693 self.cfg.AddInstance(iobj)
4694 # Declare that we don't want to remove the instance lock anymore, as we've
4695 # added the instance to the config
4696 del self.remove_locks[locking.LEVEL_INSTANCE]
4697 # Unlock all the nodes
4698 if self.op.mode == constants.INSTANCE_IMPORT:
4699 nodes_keep = [self.op.src_node]
4700 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4701 if node != self.op.src_node]
4702 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4703 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4705 self.context.glm.release(locking.LEVEL_NODE)
4706 del self.acquired_locks[locking.LEVEL_NODE]
4708 if self.op.wait_for_sync:
4709 disk_abort = not _WaitForSync(self, iobj)
4710 elif iobj.disk_template in constants.DTS_NET_MIRROR:
4711 # make sure the disks are not degraded (still sync-ing is ok)
4713 feedback_fn("* checking mirrors status")
4714 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4719 _RemoveDisks(self, iobj)
4720 self.cfg.RemoveInstance(iobj.name)
4721 # Make sure the instance lock gets removed
4722 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4723 raise errors.OpExecError("There are some degraded disks for"
4726 feedback_fn("creating os for instance %s on node %s" %
4727 (instance, pnode_name))
4729 if iobj.disk_template != constants.DT_DISKLESS:
4730 if self.op.mode == constants.INSTANCE_CREATE:
4731 feedback_fn("* running the instance OS create scripts...")
4732 result = self.rpc.call_instance_os_add(pnode_name, iobj)
4733 msg = result.RemoteFailMsg()
4735 raise errors.OpExecError("Could not add os for instance %s"
4737 (instance, pnode_name, msg))
4739 elif self.op.mode == constants.INSTANCE_IMPORT:
4740 feedback_fn("* running the instance OS import scripts...")
4741 src_node = self.op.src_node
4742 src_images = self.src_images
4743 cluster_name = self.cfg.GetClusterName()
4744 import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4745 src_node, src_images,
4747 import_result.Raise()
4748 for idx, result in enumerate(import_result.data):
4750 self.LogWarning("Could not import the image %s for instance"
4751 " %s, disk %d, on node %s" %
4752 (src_images[idx], instance, idx, pnode_name))
4754 # also checked in the prereq part
4755 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4759 iobj.admin_up = True
4760 self.cfg.Update(iobj)
4761 logging.info("Starting instance %s on node %s", instance, pnode_name)
4762 feedback_fn("* starting instance...")
4763 result = self.rpc.call_instance_start(pnode_name, iobj)
4764 msg = result.RemoteFailMsg()
4766 raise errors.OpExecError("Could not start instance: %s" % msg)
4769 class LUConnectConsole(NoHooksLU):
4770 """Connect to an instance's console.
4772 This is somewhat special in that it returns the command line that
4773 you need to run on the master node in order to connect to the
4777 _OP_REQP = ["instance_name"]
4780 def ExpandNames(self):
4781 self._ExpandAndLockInstance()
4783 def CheckPrereq(self):
4784 """Check prerequisites.
4786 This checks that the instance is in the cluster.
4789 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4790 assert self.instance is not None, \
4791 "Cannot retrieve locked instance %s" % self.op.instance_name
4792 _CheckNodeOnline(self, self.instance.primary_node)
4794 def Exec(self, feedback_fn):
4795 """Connect to the console of an instance
4798 instance = self.instance
4799 node = instance.primary_node
4801 node_insts = self.rpc.call_instance_list([node],
4802 [instance.hypervisor])[node]
4805 if instance.name not in node_insts.data:
4806 raise errors.OpExecError("Instance %s is not running." % instance.name)
4808 logging.debug("Connecting to console of %s on %s", instance.name, node)
4810 hyper = hypervisor.GetHypervisor(instance.hypervisor)
4811 cluster = self.cfg.GetClusterInfo()
4812 # beparams and hvparams are passed separately, to avoid editing the
4813 # instance and then saving the defaults in the instance itself.
4814 hvparams = cluster.FillHV(instance)
4815 beparams = cluster.FillBE(instance)
4816 console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4819 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4822 class LUReplaceDisks(LogicalUnit):
4823 """Replace the disks of an instance.
4826 HPATH = "mirrors-replace"
4827 HTYPE = constants.HTYPE_INSTANCE
4828 _OP_REQP = ["instance_name", "mode", "disks"]
4831 def CheckArguments(self):
4832 if not hasattr(self.op, "remote_node"):
4833 self.op.remote_node = None
4834 if not hasattr(self.op, "iallocator"):
4835 self.op.iallocator = None
4837 # check for valid parameter combination
4838 cnt = [self.op.remote_node, self.op.iallocator].count(None)
4839 if self.op.mode == constants.REPLACE_DISK_CHG:
4841 raise errors.OpPrereqError("When changing the secondary either an"
4842 " iallocator script must be used or the"
4845 raise errors.OpPrereqError("Give either the iallocator or the new"
4846 " secondary, not both")
4847 else: # not replacing the secondary
4849 raise errors.OpPrereqError("The iallocator and new node options can"
4850 " be used only when changing the"
4853 def ExpandNames(self):
4854 self._ExpandAndLockInstance()
4856 if self.op.iallocator is not None:
4857 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4858 elif self.op.remote_node is not None:
4859 remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4860 if remote_node is None:
4861 raise errors.OpPrereqError("Node '%s' not known" %
4862 self.op.remote_node)
4863 self.op.remote_node = remote_node
4864 # Warning: do not remove the locking of the new secondary here
4865 # unless DRBD8.AddChildren is changed to work in parallel;
4866 # currently it doesn't since parallel invocations of
4867 # FindUnusedMinor will conflict
4868 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4869 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4871 self.needed_locks[locking.LEVEL_NODE] = []
4872 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4874 def DeclareLocks(self, level):
4875 # If we're not already locking all nodes in the set we have to declare the
4876 # instance's primary/secondary nodes.
4877 if (level == locking.LEVEL_NODE and
4878 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4879 self._LockInstancesNodes()
4881 def _RunAllocator(self):
4882 """Compute a new secondary node using an IAllocator.
4885 ial = IAllocator(self,
4886 mode=constants.IALLOCATOR_MODE_RELOC,
4887 name=self.op.instance_name,
4888 relocate_from=[self.sec_node])
4890 ial.Run(self.op.iallocator)
4893 raise errors.OpPrereqError("Can't compute nodes using"
4894 " iallocator '%s': %s" % (self.op.iallocator,
4896 if len(ial.nodes) != ial.required_nodes:
4897 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4898 " of nodes (%s), required %s" %
4899 (len(ial.nodes), ial.required_nodes))
4900 self.op.remote_node = ial.nodes[0]
4901 self.LogInfo("Selected new secondary for the instance: %s",
4902 self.op.remote_node)
4904 def BuildHooksEnv(self):
4907 This runs on the master, the primary and all the secondaries.
4911 "MODE": self.op.mode,
4912 "NEW_SECONDARY": self.op.remote_node,
4913 "OLD_SECONDARY": self.instance.secondary_nodes[0],
4915 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4917 self.cfg.GetMasterNode(),
4918 self.instance.primary_node,
4920 if self.op.remote_node is not None:
4921 nl.append(self.op.remote_node)
4924 def CheckPrereq(self):
4925 """Check prerequisites.
4927 This checks that the instance is in the cluster.
4930 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4931 assert instance is not None, \
4932 "Cannot retrieve locked instance %s" % self.op.instance_name
4933 self.instance = instance
4935 if instance.disk_template != constants.DT_DRBD8:
4936 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4939 if len(instance.secondary_nodes) != 1:
4940 raise errors.OpPrereqError("The instance has a strange layout,"
4941 " expected one secondary but found %d" %
4942 len(instance.secondary_nodes))
4944 self.sec_node = instance.secondary_nodes[0]
4946 if self.op.iallocator is not None:
4947 self._RunAllocator()
4949 remote_node = self.op.remote_node
4950 if remote_node is not None:
4951 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4952 assert self.remote_node_info is not None, \
4953 "Cannot retrieve locked node %s" % remote_node
4955 self.remote_node_info = None
4956 if remote_node == instance.primary_node:
4957 raise errors.OpPrereqError("The specified node is the primary node of"
4959 elif remote_node == self.sec_node:
4960 raise errors.OpPrereqError("The specified node is already the"
4961 " secondary node of the instance.")
4963 if self.op.mode == constants.REPLACE_DISK_PRI:
4964 n1 = self.tgt_node = instance.primary_node
4965 n2 = self.oth_node = self.sec_node
4966 elif self.op.mode == constants.REPLACE_DISK_SEC:
4967 n1 = self.tgt_node = self.sec_node
4968 n2 = self.oth_node = instance.primary_node
4969 elif self.op.mode == constants.REPLACE_DISK_CHG:
4970 n1 = self.new_node = remote_node
4971 n2 = self.oth_node = instance.primary_node
4972 self.tgt_node = self.sec_node
4973 _CheckNodeNotDrained(self, remote_node)
4975 raise errors.ProgrammerError("Unhandled disk replace mode")
4977 _CheckNodeOnline(self, n1)
4978 _CheckNodeOnline(self, n2)
4980 if not self.op.disks:
4981 self.op.disks = range(len(instance.disks))
4983 for disk_idx in self.op.disks:
4984 instance.FindDisk(disk_idx)
4986 def _ExecD8DiskOnly(self, feedback_fn):
4987 """Replace a disk on the primary or secondary for dbrd8.
4989 The algorithm for replace is quite complicated:
4991 1. for each disk to be replaced:
4993 1. create new LVs on the target node with unique names
4994 1. detach old LVs from the drbd device
4995 1. rename old LVs to name_replaced.<time_t>
4996 1. rename new LVs to old LVs
4997 1. attach the new LVs (with the old names now) to the drbd device
4999 1. wait for sync across all devices
5001 1. for each modified disk:
5003 1. remove old LVs (which have the name name_replaces.<time_t>)
5005 Failures are not very well handled.
5009 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5010 instance = self.instance
5012 vgname = self.cfg.GetVGName()
5015 tgt_node = self.tgt_node
5016 oth_node = self.oth_node
5018 # Step: check device activation
5019 self.proc.LogStep(1, steps_total, "check device existence")
5020 info("checking volume groups")
5021 my_vg = cfg.GetVGName()
5022 results = self.rpc.call_vg_list([oth_node, tgt_node])
5024 raise errors.OpExecError("Can't list volume groups on the nodes")
5025 for node in oth_node, tgt_node:
5027 if res.failed or not res.data or my_vg not in res.data:
5028 raise errors.OpExecError("Volume group '%s' not found on %s" %
5030 for idx, dev in enumerate(instance.disks):
5031 if idx not in self.op.disks:
5033 for node in tgt_node, oth_node:
5034 info("checking disk/%d on %s" % (idx, node))
5035 cfg.SetDiskID(dev, node)
5036 result = self.rpc.call_blockdev_find(node, dev)
5037 msg = result.RemoteFailMsg()
5038 if not msg and not result.payload:
5039 msg = "disk not found"
5041 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5044 # Step: check other node consistency
5045 self.proc.LogStep(2, steps_total, "check peer consistency")
5046 for idx, dev in enumerate(instance.disks):
5047 if idx not in self.op.disks:
5049 info("checking disk/%d consistency on %s" % (idx, oth_node))
5050 if not _CheckDiskConsistency(self, dev, oth_node,
5051 oth_node==instance.primary_node):
5052 raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5053 " to replace disks on this node (%s)" %
5054 (oth_node, tgt_node))
5056 # Step: create new storage
5057 self.proc.LogStep(3, steps_total, "allocate new storage")
5058 for idx, dev in enumerate(instance.disks):
5059 if idx not in self.op.disks:
5062 cfg.SetDiskID(dev, tgt_node)
5063 lv_names = [".disk%d_%s" % (idx, suf)
5064 for suf in ["data", "meta"]]
5065 names = _GenerateUniqueNames(self, lv_names)
5066 lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5067 logical_id=(vgname, names[0]))
5068 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5069 logical_id=(vgname, names[1]))
5070 new_lvs = [lv_data, lv_meta]
5071 old_lvs = dev.children
5072 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5073 info("creating new local storage on %s for %s" %
5074 (tgt_node, dev.iv_name))
5075 # we pass force_create=True to force the LVM creation
5076 for new_lv in new_lvs:
5077 _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5078 _GetInstanceInfoText(instance), False)
5080 # Step: for each lv, detach+rename*2+attach
5081 self.proc.LogStep(4, steps_total, "change drbd configuration")
5082 for dev, old_lvs, new_lvs in iv_names.itervalues():
5083 info("detaching %s drbd from local storage" % dev.iv_name)
5084 result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5087 raise errors.OpExecError("Can't detach drbd from local storage on node"
5088 " %s for device %s" % (tgt_node, dev.iv_name))
5090 #cfg.Update(instance)
5092 # ok, we created the new LVs, so now we know we have the needed
5093 # storage; as such, we proceed on the target node to rename
5094 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5095 # using the assumption that logical_id == physical_id (which in
5096 # turn is the unique_id on that node)
5098 # FIXME(iustin): use a better name for the replaced LVs
5099 temp_suffix = int(time.time())
5100 ren_fn = lambda d, suff: (d.physical_id[0],
5101 d.physical_id[1] + "_replaced-%s" % suff)
5102 # build the rename list based on what LVs exist on the node
5104 for to_ren in old_lvs:
5105 result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5106 if not result.RemoteFailMsg() and result.payload:
5108 rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5110 info("renaming the old LVs on the target node")
5111 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5114 raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5115 # now we rename the new LVs to the old LVs
5116 info("renaming the new LVs on the target node")
5117 rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5118 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5121 raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5123 for old, new in zip(old_lvs, new_lvs):
5124 new.logical_id = old.logical_id
5125 cfg.SetDiskID(new, tgt_node)
5127 for disk in old_lvs:
5128 disk.logical_id = ren_fn(disk, temp_suffix)
5129 cfg.SetDiskID(disk, tgt_node)
5131 # now that the new lvs have the old name, we can add them to the device
5132 info("adding new mirror component on %s" % tgt_node)
5133 result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5134 if result.failed or not result.data:
5135 for new_lv in new_lvs:
5136 msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5138 warning("Can't rollback device %s: %s", dev, msg,
5139 hint="cleanup manually the unused logical volumes")
5140 raise errors.OpExecError("Can't add local storage to drbd")
5142 dev.children = new_lvs
5143 cfg.Update(instance)
5145 # Step: wait for sync
5147 # this can fail as the old devices are degraded and _WaitForSync
5148 # does a combined result over all disks, so we don't check its
5150 self.proc.LogStep(5, steps_total, "sync devices")
5151 _WaitForSync(self, instance, unlock=True)
5153 # so check manually all the devices
5154 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5155 cfg.SetDiskID(dev, instance.primary_node)
5156 result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5157 msg = result.RemoteFailMsg()
5158 if not msg and not result.payload:
5159 msg = "disk not found"
5161 raise errors.OpExecError("Can't find DRBD device %s: %s" %
5163 if result.payload[5]:
5164 raise errors.OpExecError("DRBD device %s is degraded!" % name)
5166 # Step: remove old storage
5167 self.proc.LogStep(6, steps_total, "removing old storage")
5168 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5169 info("remove logical volumes for %s" % name)
5171 cfg.SetDiskID(lv, tgt_node)
5172 msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5174 warning("Can't remove old LV: %s" % msg,
5175 hint="manually remove unused LVs")
5178 def _ExecD8Secondary(self, feedback_fn):
5179 """Replace the secondary node for drbd8.
5181 The algorithm for replace is quite complicated:
5182 - for all disks of the instance:
5183 - create new LVs on the new node with same names
5184 - shutdown the drbd device on the old secondary
5185 - disconnect the drbd network on the primary
5186 - create the drbd device on the new secondary
5187 - network attach the drbd on the primary, using an artifice:
5188 the drbd code for Attach() will connect to the network if it
5189 finds a device which is connected to the good local disks but
5191 - wait for sync across all devices
5192 - remove all disks from the old secondary
5194 Failures are not very well handled.
5198 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5199 instance = self.instance
5203 old_node = self.tgt_node
5204 new_node = self.new_node
5205 pri_node = instance.primary_node
5207 old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5208 new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5209 pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5212 # Step: check device activation
5213 self.proc.LogStep(1, steps_total, "check device existence")
5214 info("checking volume groups")
5215 my_vg = cfg.GetVGName()
5216 results = self.rpc.call_vg_list([pri_node, new_node])
5217 for node in pri_node, new_node:
5219 if res.failed or not res.data or my_vg not in res.data:
5220 raise errors.OpExecError("Volume group '%s' not found on %s" %
5222 for idx, dev in enumerate(instance.disks):
5223 if idx not in self.op.disks:
5225 info("checking disk/%d on %s" % (idx, pri_node))
5226 cfg.SetDiskID(dev, pri_node)
5227 result = self.rpc.call_blockdev_find(pri_node, dev)
5228 msg = result.RemoteFailMsg()
5229 if not msg and not result.payload:
5230 msg = "disk not found"
5232 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5233 (idx, pri_node, msg))
5235 # Step: check other node consistency
5236 self.proc.LogStep(2, steps_total, "check peer consistency")
5237 for idx, dev in enumerate(instance.disks):
5238 if idx not in self.op.disks:
5240 info("checking disk/%d consistency on %s" % (idx, pri_node))
5241 if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5242 raise errors.OpExecError("Primary node (%s) has degraded storage,"
5243 " unsafe to replace the secondary" %
5246 # Step: create new storage
5247 self.proc.LogStep(3, steps_total, "allocate new storage")
5248 for idx, dev in enumerate(instance.disks):
5249 info("adding new local storage on %s for disk/%d" %
5251 # we pass force_create=True to force LVM creation
5252 for new_lv in dev.children:
5253 _CreateBlockDev(self, new_node, instance, new_lv, True,
5254 _GetInstanceInfoText(instance), False)
5256 # Step 4: dbrd minors and drbd setups changes
5257 # after this, we must manually remove the drbd minors on both the
5258 # error and the success paths
5259 minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5261 logging.debug("Allocated minors %s" % (minors,))
5262 self.proc.LogStep(4, steps_total, "changing drbd configuration")
5263 for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5265 info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5266 # create new devices on new_node; note that we create two IDs:
5267 # one without port, so the drbd will be activated without
5268 # networking information on the new node at this stage, and one
5269 # with network, for the latter activation in step 4
5270 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5271 if pri_node == o_node1:
5276 new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5277 new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5279 iv_names[idx] = (dev, dev.children, new_net_id)
5280 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5282 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5283 logical_id=new_alone_id,
5284 children=dev.children)
5286 _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5287 _GetInstanceInfoText(instance), False)
5288 except errors.BlockDeviceError:
5289 self.cfg.ReleaseDRBDMinors(instance.name)
5292 for idx, dev in enumerate(instance.disks):
5293 # we have new devices, shutdown the drbd on the old secondary
5294 info("shutting down drbd for disk/%d on old node" % idx)
5295 cfg.SetDiskID(dev, old_node)
5296 msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5298 warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5300 hint="Please cleanup this device manually as soon as possible")
5302 info("detaching primary drbds from the network (=> standalone)")
5303 result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5304 instance.disks)[pri_node]
5306 msg = result.RemoteFailMsg()
5308 # detaches didn't succeed (unlikely)
5309 self.cfg.ReleaseDRBDMinors(instance.name)
5310 raise errors.OpExecError("Can't detach the disks from the network on"
5311 " old node: %s" % (msg,))
5313 # if we managed to detach at least one, we update all the disks of
5314 # the instance to point to the new secondary
5315 info("updating instance configuration")
5316 for dev, _, new_logical_id in iv_names.itervalues():
5317 dev.logical_id = new_logical_id
5318 cfg.SetDiskID(dev, pri_node)
5319 cfg.Update(instance)
5321 # and now perform the drbd attach
5322 info("attaching primary drbds to new secondary (standalone => connected)")
5323 result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5324 instance.disks, instance.name,
5326 for to_node, to_result in result.items():
5327 msg = to_result.RemoteFailMsg()
5329 warning("can't attach drbd disks on node %s: %s", to_node, msg,
5330 hint="please do a gnt-instance info to see the"
5333 # this can fail as the old devices are degraded and _WaitForSync
5334 # does a combined result over all disks, so we don't check its
5336 self.proc.LogStep(5, steps_total, "sync devices")
5337 _WaitForSync(self, instance, unlock=True)
5339 # so check manually all the devices
5340 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5341 cfg.SetDiskID(dev, pri_node)
5342 result = self.rpc.call_blockdev_find(pri_node, dev)
5343 msg = result.RemoteFailMsg()
5344 if not msg and not result.payload:
5345 msg = "disk not found"
5347 raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5349 if result.payload[5]:
5350 raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5352 self.proc.LogStep(6, steps_total, "removing old storage")
5353 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5354 info("remove logical volumes for disk/%d" % idx)
5356 cfg.SetDiskID(lv, old_node)
5357 msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5359 warning("Can't remove LV on old secondary: %s", msg,
5360 hint="Cleanup stale volumes by hand")
5362 def Exec(self, feedback_fn):
5363 """Execute disk replacement.
5365 This dispatches the disk replacement to the appropriate handler.
5368 instance = self.instance
5370 # Activate the instance disks if we're replacing them on a down instance
5371 if not instance.admin_up:
5372 _StartInstanceDisks(self, instance, True)
5374 if self.op.mode == constants.REPLACE_DISK_CHG:
5375 fn = self._ExecD8Secondary
5377 fn = self._ExecD8DiskOnly
5379 ret = fn(feedback_fn)
5381 # Deactivate the instance disks if we're replacing them on a down instance
5382 if not instance.admin_up:
5383 _SafeShutdownInstanceDisks(self, instance)
5388 class LUGrowDisk(LogicalUnit):
5389 """Grow a disk of an instance.
5393 HTYPE = constants.HTYPE_INSTANCE
5394 _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5397 def ExpandNames(self):
5398 self._ExpandAndLockInstance()
5399 self.needed_locks[locking.LEVEL_NODE] = []
5400 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5402 def DeclareLocks(self, level):
5403 if level == locking.LEVEL_NODE:
5404 self._LockInstancesNodes()
5406 def BuildHooksEnv(self):
5409 This runs on the master, the primary and all the secondaries.
5413 "DISK": self.op.disk,
5414 "AMOUNT": self.op.amount,
5416 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5418 self.cfg.GetMasterNode(),
5419 self.instance.primary_node,
5423 def CheckPrereq(self):
5424 """Check prerequisites.
5426 This checks that the instance is in the cluster.
5429 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5430 assert instance is not None, \
5431 "Cannot retrieve locked instance %s" % self.op.instance_name
5432 nodenames = list(instance.all_nodes)
5433 for node in nodenames:
5434 _CheckNodeOnline(self, node)
5437 self.instance = instance
5439 if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5440 raise errors.OpPrereqError("Instance's disk layout does not support"
5443 self.disk = instance.FindDisk(self.op.disk)
5445 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5446 instance.hypervisor)
5447 for node in nodenames:
5448 info = nodeinfo[node]
5449 if info.failed or not info.data:
5450 raise errors.OpPrereqError("Cannot get current information"
5451 " from node '%s'" % node)
5452 vg_free = info.data.get('vg_free', None)
5453 if not isinstance(vg_free, int):
5454 raise errors.OpPrereqError("Can't compute free disk space on"
5456 if self.op.amount > vg_free:
5457 raise errors.OpPrereqError("Not enough disk space on target node %s:"
5458 " %d MiB available, %d MiB required" %
5459 (node, vg_free, self.op.amount))
5461 def Exec(self, feedback_fn):
5462 """Execute disk grow.
5465 instance = self.instance
5467 for node in instance.all_nodes:
5468 self.cfg.SetDiskID(disk, node)
5469 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5470 msg = result.RemoteFailMsg()
5472 raise errors.OpExecError("Grow request failed to node %s: %s" %
5474 disk.RecordGrow(self.op.amount)
5475 self.cfg.Update(instance)
5476 if self.op.wait_for_sync:
5477 disk_abort = not _WaitForSync(self, instance)
5479 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5480 " status.\nPlease check the instance.")
5483 class LUQueryInstanceData(NoHooksLU):
5484 """Query runtime instance data.
5487 _OP_REQP = ["instances", "static"]
5490 def ExpandNames(self):
5491 self.needed_locks = {}
5492 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5494 if not isinstance(self.op.instances, list):
5495 raise errors.OpPrereqError("Invalid argument type 'instances'")
5497 if self.op.instances:
5498 self.wanted_names = []
5499 for name in self.op.instances:
5500 full_name = self.cfg.ExpandInstanceName(name)
5501 if full_name is None:
5502 raise errors.OpPrereqError("Instance '%s' not known" % name)
5503 self.wanted_names.append(full_name)
5504 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5506 self.wanted_names = None
5507 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5509 self.needed_locks[locking.LEVEL_NODE] = []
5510 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5512 def DeclareLocks(self, level):
5513 if level == locking.LEVEL_NODE:
5514 self._LockInstancesNodes()
5516 def CheckPrereq(self):
5517 """Check prerequisites.
5519 This only checks the optional instance list against the existing names.
5522 if self.wanted_names is None:
5523 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5525 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5526 in self.wanted_names]
5529 def _ComputeDiskStatus(self, instance, snode, dev):
5530 """Compute block device status.
5533 static = self.op.static
5535 self.cfg.SetDiskID(dev, instance.primary_node)
5536 dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5537 if dev_pstatus.offline:
5540 msg = dev_pstatus.RemoteFailMsg()
5542 raise errors.OpExecError("Can't compute disk status for %s: %s" %
5543 (instance.name, msg))
5544 dev_pstatus = dev_pstatus.payload
5548 if dev.dev_type in constants.LDS_DRBD:
5549 # we change the snode then (otherwise we use the one passed in)
5550 if dev.logical_id[0] == instance.primary_node:
5551 snode = dev.logical_id[1]
5553 snode = dev.logical_id[0]
5555 if snode and not static:
5556 self.cfg.SetDiskID(dev, snode)
5557 dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5558 if dev_sstatus.offline:
5561 msg = dev_sstatus.RemoteFailMsg()
5563 raise errors.OpExecError("Can't compute disk status for %s: %s" %
5564 (instance.name, msg))
5565 dev_sstatus = dev_sstatus.payload
5570 dev_children = [self._ComputeDiskStatus(instance, snode, child)
5571 for child in dev.children]
5576 "iv_name": dev.iv_name,
5577 "dev_type": dev.dev_type,
5578 "logical_id": dev.logical_id,
5579 "physical_id": dev.physical_id,
5580 "pstatus": dev_pstatus,
5581 "sstatus": dev_sstatus,
5582 "children": dev_children,
5588 def Exec(self, feedback_fn):
5589 """Gather and return data"""
5592 cluster = self.cfg.GetClusterInfo()
5594 for instance in self.wanted_instances:
5595 if not self.op.static:
5596 remote_info = self.rpc.call_instance_info(instance.primary_node,
5598 instance.hypervisor)
5600 remote_info = remote_info.data
5601 if remote_info and "state" in remote_info:
5604 remote_state = "down"
5607 if instance.admin_up:
5610 config_state = "down"
5612 disks = [self._ComputeDiskStatus(instance, None, device)
5613 for device in instance.disks]
5616 "name": instance.name,
5617 "config_state": config_state,
5618 "run_state": remote_state,
5619 "pnode": instance.primary_node,
5620 "snodes": instance.secondary_nodes,
5622 "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5624 "hypervisor": instance.hypervisor,
5625 "network_port": instance.network_port,
5626 "hv_instance": instance.hvparams,
5627 "hv_actual": cluster.FillHV(instance),
5628 "be_instance": instance.beparams,
5629 "be_actual": cluster.FillBE(instance),
5632 result[instance.name] = idict
5637 class LUSetInstanceParams(LogicalUnit):
5638 """Modifies an instances's parameters.
5641 HPATH = "instance-modify"
5642 HTYPE = constants.HTYPE_INSTANCE
5643 _OP_REQP = ["instance_name"]
5646 def CheckArguments(self):
5647 if not hasattr(self.op, 'nics'):
5649 if not hasattr(self.op, 'disks'):
5651 if not hasattr(self.op, 'beparams'):
5652 self.op.beparams = {}
5653 if not hasattr(self.op, 'hvparams'):
5654 self.op.hvparams = {}
5655 self.op.force = getattr(self.op, "force", False)
5656 if not (self.op.nics or self.op.disks or
5657 self.op.hvparams or self.op.beparams):
5658 raise errors.OpPrereqError("No changes submitted")
5662 for disk_op, disk_dict in self.op.disks:
5663 if disk_op == constants.DDM_REMOVE:
5666 elif disk_op == constants.DDM_ADD:
5669 if not isinstance(disk_op, int):
5670 raise errors.OpPrereqError("Invalid disk index")
5671 if disk_op == constants.DDM_ADD:
5672 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5673 if mode not in constants.DISK_ACCESS_SET:
5674 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5675 size = disk_dict.get('size', None)
5677 raise errors.OpPrereqError("Required disk parameter size missing")
5680 except ValueError, err:
5681 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5683 disk_dict['size'] = size
5685 # modification of disk
5686 if 'size' in disk_dict:
5687 raise errors.OpPrereqError("Disk size change not possible, use"
5690 if disk_addremove > 1:
5691 raise errors.OpPrereqError("Only one disk add or remove operation"
5692 " supported at a time")
5696 for nic_op, nic_dict in self.op.nics:
5697 if nic_op == constants.DDM_REMOVE:
5700 elif nic_op == constants.DDM_ADD:
5703 if not isinstance(nic_op, int):
5704 raise errors.OpPrereqError("Invalid nic index")
5706 # nic_dict should be a dict
5707 nic_ip = nic_dict.get('ip', None)
5708 if nic_ip is not None:
5709 if nic_ip.lower() == constants.VALUE_NONE:
5710 nic_dict['ip'] = None
5712 if not utils.IsValidIP(nic_ip):
5713 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5715 if nic_op == constants.DDM_ADD:
5716 nic_bridge = nic_dict.get('bridge', None)
5717 if nic_bridge is None:
5718 nic_dict['bridge'] = self.cfg.GetDefBridge()
5719 nic_mac = nic_dict.get('mac', None)
5721 nic_dict['mac'] = constants.VALUE_AUTO
5723 if 'mac' in nic_dict:
5724 nic_mac = nic_dict['mac']
5725 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5726 if not utils.IsValidMac(nic_mac):
5727 raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5728 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5729 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5730 " modifying an existing nic")
5732 if nic_addremove > 1:
5733 raise errors.OpPrereqError("Only one NIC add or remove operation"
5734 " supported at a time")
5736 def ExpandNames(self):
5737 self._ExpandAndLockInstance()
5738 self.needed_locks[locking.LEVEL_NODE] = []
5739 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5741 def DeclareLocks(self, level):
5742 if level == locking.LEVEL_NODE:
5743 self._LockInstancesNodes()
5745 def BuildHooksEnv(self):
5748 This runs on the master, primary and secondaries.
5752 if constants.BE_MEMORY in self.be_new:
5753 args['memory'] = self.be_new[constants.BE_MEMORY]
5754 if constants.BE_VCPUS in self.be_new:
5755 args['vcpus'] = self.be_new[constants.BE_VCPUS]
5756 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5757 # information at all.
5760 nic_override = dict(self.op.nics)
5761 for idx, nic in enumerate(self.instance.nics):
5762 if idx in nic_override:
5763 this_nic_override = nic_override[idx]
5765 this_nic_override = {}
5766 if 'ip' in this_nic_override:
5767 ip = this_nic_override['ip']
5770 if 'bridge' in this_nic_override:
5771 bridge = this_nic_override['bridge']
5774 if 'mac' in this_nic_override:
5775 mac = this_nic_override['mac']
5778 args['nics'].append((ip, bridge, mac))
5779 if constants.DDM_ADD in nic_override:
5780 ip = nic_override[constants.DDM_ADD].get('ip', None)
5781 bridge = nic_override[constants.DDM_ADD]['bridge']
5782 mac = nic_override[constants.DDM_ADD]['mac']
5783 args['nics'].append((ip, bridge, mac))
5784 elif constants.DDM_REMOVE in nic_override:
5785 del args['nics'][-1]
5787 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5788 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5791 def CheckPrereq(self):
5792 """Check prerequisites.
5794 This only checks the instance list against the existing names.
5797 force = self.force = self.op.force
5799 # checking the new params on the primary/secondary nodes
5801 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5802 assert self.instance is not None, \
5803 "Cannot retrieve locked instance %s" % self.op.instance_name
5804 pnode = instance.primary_node
5805 nodelist = list(instance.all_nodes)
5807 # hvparams processing
5808 if self.op.hvparams:
5809 i_hvdict = copy.deepcopy(instance.hvparams)
5810 for key, val in self.op.hvparams.iteritems():
5811 if val == constants.VALUE_DEFAULT:
5818 cluster = self.cfg.GetClusterInfo()
5819 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5820 hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5823 hypervisor.GetHypervisor(
5824 instance.hypervisor).CheckParameterSyntax(hv_new)
5825 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5826 self.hv_new = hv_new # the new actual values
5827 self.hv_inst = i_hvdict # the new dict (without defaults)
5829 self.hv_new = self.hv_inst = {}
5831 # beparams processing
5832 if self.op.beparams:
5833 i_bedict = copy.deepcopy(instance.beparams)
5834 for key, val in self.op.beparams.iteritems():
5835 if val == constants.VALUE_DEFAULT:
5842 cluster = self.cfg.GetClusterInfo()
5843 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5844 be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5846 self.be_new = be_new # the new actual values
5847 self.be_inst = i_bedict # the new dict (without defaults)
5849 self.be_new = self.be_inst = {}
5853 if constants.BE_MEMORY in self.op.beparams and not self.force:
5854 mem_check_list = [pnode]
5855 if be_new[constants.BE_AUTO_BALANCE]:
5856 # either we changed auto_balance to yes or it was from before
5857 mem_check_list.extend(instance.secondary_nodes)
5858 instance_info = self.rpc.call_instance_info(pnode, instance.name,
5859 instance.hypervisor)
5860 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5861 instance.hypervisor)
5862 if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5863 # Assume the primary node is unreachable and go ahead
5864 self.warn.append("Can't get info from primary node %s" % pnode)
5866 if not instance_info.failed and instance_info.data:
5867 current_mem = instance_info.data['memory']
5869 # Assume instance not running
5870 # (there is a slight race condition here, but it's not very probable,
5871 # and we have no other way to check)
5873 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5874 nodeinfo[pnode].data['memory_free'])
5876 raise errors.OpPrereqError("This change will prevent the instance"
5877 " from starting, due to %d MB of memory"
5878 " missing on its primary node" % miss_mem)
5880 if be_new[constants.BE_AUTO_BALANCE]:
5881 for node, nres in nodeinfo.iteritems():
5882 if node not in instance.secondary_nodes:
5884 if nres.failed or not isinstance(nres.data, dict):
5885 self.warn.append("Can't get info from secondary node %s" % node)
5886 elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5887 self.warn.append("Not enough memory to failover instance to"
5888 " secondary node %s" % node)
5891 for nic_op, nic_dict in self.op.nics:
5892 if nic_op == constants.DDM_REMOVE:
5893 if not instance.nics:
5894 raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5896 if nic_op != constants.DDM_ADD:
5898 if nic_op < 0 or nic_op >= len(instance.nics):
5899 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5901 (nic_op, len(instance.nics)))
5902 if 'bridge' in nic_dict:
5903 nic_bridge = nic_dict['bridge']
5904 if nic_bridge is None:
5905 raise errors.OpPrereqError('Cannot set the nic bridge to None')
5906 if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5907 msg = ("Bridge '%s' doesn't exist on one of"
5908 " the instance nodes" % nic_bridge)
5910 self.warn.append(msg)
5912 raise errors.OpPrereqError(msg)
5913 if 'mac' in nic_dict:
5914 nic_mac = nic_dict['mac']
5916 raise errors.OpPrereqError('Cannot set the nic mac to None')
5917 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5918 # otherwise generate the mac
5919 nic_dict['mac'] = self.cfg.GenerateMAC()
5921 # or validate/reserve the current one
5922 if self.cfg.IsMacInUse(nic_mac):
5923 raise errors.OpPrereqError("MAC address %s already in use"
5924 " in cluster" % nic_mac)
5927 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5928 raise errors.OpPrereqError("Disk operations not supported for"
5929 " diskless instances")
5930 for disk_op, disk_dict in self.op.disks:
5931 if disk_op == constants.DDM_REMOVE:
5932 if len(instance.disks) == 1:
5933 raise errors.OpPrereqError("Cannot remove the last disk of"
5935 ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5936 ins_l = ins_l[pnode]
5937 if ins_l.failed or not isinstance(ins_l.data, list):
5938 raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5939 if instance.name in ins_l.data:
5940 raise errors.OpPrereqError("Instance is running, can't remove"
5943 if (disk_op == constants.DDM_ADD and
5944 len(instance.nics) >= constants.MAX_DISKS):
5945 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5946 " add more" % constants.MAX_DISKS)
5947 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5949 if disk_op < 0 or disk_op >= len(instance.disks):
5950 raise errors.OpPrereqError("Invalid disk index %s, valid values"
5952 (disk_op, len(instance.disks)))
5956 def Exec(self, feedback_fn):
5957 """Modifies an instance.
5959 All parameters take effect only at the next restart of the instance.
5962 # Process here the warnings from CheckPrereq, as we don't have a
5963 # feedback_fn there.
5964 for warn in self.warn:
5965 feedback_fn("WARNING: %s" % warn)
5968 instance = self.instance
5970 for disk_op, disk_dict in self.op.disks:
5971 if disk_op == constants.DDM_REMOVE:
5972 # remove the last disk
5973 device = instance.disks.pop()
5974 device_idx = len(instance.disks)
5975 for node, disk in device.ComputeNodeTree(instance.primary_node):
5976 self.cfg.SetDiskID(disk, node)
5977 msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
5979 self.LogWarning("Could not remove disk/%d on node %s: %s,"
5980 " continuing anyway", device_idx, node, msg)
5981 result.append(("disk/%d" % device_idx, "remove"))
5982 elif disk_op == constants.DDM_ADD:
5984 if instance.disk_template == constants.DT_FILE:
5985 file_driver, file_path = instance.disks[0].logical_id
5986 file_path = os.path.dirname(file_path)
5988 file_driver = file_path = None
5989 disk_idx_base = len(instance.disks)
5990 new_disk = _GenerateDiskTemplate(self,
5991 instance.disk_template,
5992 instance.name, instance.primary_node,
5993 instance.secondary_nodes,
5998 instance.disks.append(new_disk)
5999 info = _GetInstanceInfoText(instance)
6001 logging.info("Creating volume %s for instance %s",
6002 new_disk.iv_name, instance.name)
6003 # Note: this needs to be kept in sync with _CreateDisks
6005 for node in instance.all_nodes:
6006 f_create = node == instance.primary_node
6008 _CreateBlockDev(self, node, instance, new_disk,
6009 f_create, info, f_create)
6010 except errors.OpExecError, err:
6011 self.LogWarning("Failed to create volume %s (%s) on"
6013 new_disk.iv_name, new_disk, node, err)
6014 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6015 (new_disk.size, new_disk.mode)))
6017 # change a given disk
6018 instance.disks[disk_op].mode = disk_dict['mode']
6019 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6021 for nic_op, nic_dict in self.op.nics:
6022 if nic_op == constants.DDM_REMOVE:
6023 # remove the last nic
6024 del instance.nics[-1]
6025 result.append(("nic.%d" % len(instance.nics), "remove"))
6026 elif nic_op == constants.DDM_ADD:
6027 # mac and bridge should be set, by now
6028 mac = nic_dict['mac']
6029 bridge = nic_dict['bridge']
6030 new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6032 instance.nics.append(new_nic)
6033 result.append(("nic.%d" % (len(instance.nics) - 1),
6034 "add:mac=%s,ip=%s,bridge=%s" %
6035 (new_nic.mac, new_nic.ip, new_nic.bridge)))
6037 # change a given nic
6038 for key in 'mac', 'ip', 'bridge':
6040 setattr(instance.nics[nic_op], key, nic_dict[key])
6041 result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6044 if self.op.hvparams:
6045 instance.hvparams = self.hv_inst
6046 for key, val in self.op.hvparams.iteritems():
6047 result.append(("hv/%s" % key, val))
6050 if self.op.beparams:
6051 instance.beparams = self.be_inst
6052 for key, val in self.op.beparams.iteritems():
6053 result.append(("be/%s" % key, val))
6055 self.cfg.Update(instance)
6060 class LUQueryExports(NoHooksLU):
6061 """Query the exports list
6064 _OP_REQP = ['nodes']
6067 def ExpandNames(self):
6068 self.needed_locks = {}
6069 self.share_locks[locking.LEVEL_NODE] = 1
6070 if not self.op.nodes:
6071 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6073 self.needed_locks[locking.LEVEL_NODE] = \
6074 _GetWantedNodes(self, self.op.nodes)
6076 def CheckPrereq(self):
6077 """Check prerequisites.
6080 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6082 def Exec(self, feedback_fn):
6083 """Compute the list of all the exported system images.
6086 @return: a dictionary with the structure node->(export-list)
6087 where export-list is a list of the instances exported on
6091 rpcresult = self.rpc.call_export_list(self.nodes)
6093 for node in rpcresult:
6094 if rpcresult[node].failed:
6095 result[node] = False
6097 result[node] = rpcresult[node].data
6102 class LUExportInstance(LogicalUnit):
6103 """Export an instance to an image in the cluster.
6106 HPATH = "instance-export"
6107 HTYPE = constants.HTYPE_INSTANCE
6108 _OP_REQP = ["instance_name", "target_node", "shutdown"]
6111 def ExpandNames(self):
6112 self._ExpandAndLockInstance()
6113 # FIXME: lock only instance primary and destination node
6115 # Sad but true, for now we have do lock all nodes, as we don't know where
6116 # the previous export might be, and and in this LU we search for it and
6117 # remove it from its current node. In the future we could fix this by:
6118 # - making a tasklet to search (share-lock all), then create the new one,
6119 # then one to remove, after
6120 # - removing the removal operation altoghether
6121 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6123 def DeclareLocks(self, level):
6124 """Last minute lock declaration."""
6125 # All nodes are locked anyway, so nothing to do here.
6127 def BuildHooksEnv(self):
6130 This will run on the master, primary node and target node.
6134 "EXPORT_NODE": self.op.target_node,
6135 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6137 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6138 nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6139 self.op.target_node]
6142 def CheckPrereq(self):
6143 """Check prerequisites.
6145 This checks that the instance and node names are valid.
6148 instance_name = self.op.instance_name
6149 self.instance = self.cfg.GetInstanceInfo(instance_name)
6150 assert self.instance is not None, \
6151 "Cannot retrieve locked instance %s" % self.op.instance_name
6152 _CheckNodeOnline(self, self.instance.primary_node)
6154 self.dst_node = self.cfg.GetNodeInfo(
6155 self.cfg.ExpandNodeName(self.op.target_node))
6157 if self.dst_node is None:
6158 # This is wrong node name, not a non-locked node
6159 raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6160 _CheckNodeOnline(self, self.dst_node.name)
6161 _CheckNodeNotDrained(self, self.dst_node.name)
6163 # instance disk type verification
6164 for disk in self.instance.disks:
6165 if disk.dev_type == constants.LD_FILE:
6166 raise errors.OpPrereqError("Export not supported for instances with"
6167 " file-based disks")
6169 def Exec(self, feedback_fn):
6170 """Export an instance to an image in the cluster.
6173 instance = self.instance
6174 dst_node = self.dst_node
6175 src_node = instance.primary_node
6176 if self.op.shutdown:
6177 # shutdown the instance, but not the disks
6178 result = self.rpc.call_instance_shutdown(src_node, instance)
6179 msg = result.RemoteFailMsg()
6181 raise errors.OpExecError("Could not shutdown instance %s on"
6183 (instance.name, src_node, msg))
6185 vgname = self.cfg.GetVGName()
6189 # set the disks ID correctly since call_instance_start needs the
6190 # correct drbd minor to create the symlinks
6191 for disk in instance.disks:
6192 self.cfg.SetDiskID(disk, src_node)
6195 for disk in instance.disks:
6196 # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6197 new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6198 if new_dev_name.failed or not new_dev_name.data:
6199 self.LogWarning("Could not snapshot block device %s on node %s",
6200 disk.logical_id[1], src_node)
6201 snap_disks.append(False)
6203 new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6204 logical_id=(vgname, new_dev_name.data),
6205 physical_id=(vgname, new_dev_name.data),
6206 iv_name=disk.iv_name)
6207 snap_disks.append(new_dev)
6210 if self.op.shutdown and instance.admin_up:
6211 result = self.rpc.call_instance_start(src_node, instance)
6212 msg = result.RemoteFailMsg()
6214 _ShutdownInstanceDisks(self, instance)
6215 raise errors.OpExecError("Could not start instance: %s" % msg)
6217 # TODO: check for size
6219 cluster_name = self.cfg.GetClusterName()
6220 for idx, dev in enumerate(snap_disks):
6222 result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6223 instance, cluster_name, idx)
6224 if result.failed or not result.data:
6225 self.LogWarning("Could not export block device %s from node %s to"
6226 " node %s", dev.logical_id[1], src_node,
6228 msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6230 self.LogWarning("Could not remove snapshot block device %s from node"
6231 " %s: %s", dev.logical_id[1], src_node, msg)
6233 result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6234 if result.failed or not result.data:
6235 self.LogWarning("Could not finalize export for instance %s on node %s",
6236 instance.name, dst_node.name)
6238 nodelist = self.cfg.GetNodeList()
6239 nodelist.remove(dst_node.name)
6241 # on one-node clusters nodelist will be empty after the removal
6242 # if we proceed the backup would be removed because OpQueryExports
6243 # substitutes an empty list with the full cluster node list.
6245 exportlist = self.rpc.call_export_list(nodelist)
6246 for node in exportlist:
6247 if exportlist[node].failed:
6249 if instance.name in exportlist[node].data:
6250 if not self.rpc.call_export_remove(node, instance.name):
6251 self.LogWarning("Could not remove older export for instance %s"
6252 " on node %s", instance.name, node)
6255 class LURemoveExport(NoHooksLU):
6256 """Remove exports related to the named instance.
6259 _OP_REQP = ["instance_name"]
6262 def ExpandNames(self):
6263 self.needed_locks = {}
6264 # We need all nodes to be locked in order for RemoveExport to work, but we
6265 # don't need to lock the instance itself, as nothing will happen to it (and
6266 # we can remove exports also for a removed instance)
6267 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6269 def CheckPrereq(self):
6270 """Check prerequisites.
6274 def Exec(self, feedback_fn):
6275 """Remove any export.
6278 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6279 # If the instance was not found we'll try with the name that was passed in.
6280 # This will only work if it was an FQDN, though.
6282 if not instance_name:
6284 instance_name = self.op.instance_name
6286 exportlist = self.rpc.call_export_list(self.acquired_locks[
6287 locking.LEVEL_NODE])
6289 for node in exportlist:
6290 if exportlist[node].failed:
6291 self.LogWarning("Failed to query node %s, continuing" % node)
6293 if instance_name in exportlist[node].data:
6295 result = self.rpc.call_export_remove(node, instance_name)
6296 if result.failed or not result.data:
6297 logging.error("Could not remove export for instance %s"
6298 " on node %s", instance_name, node)
6300 if fqdn_warn and not found:
6301 feedback_fn("Export not found. If trying to remove an export belonging"
6302 " to a deleted instance please use its Fully Qualified"
6306 class TagsLU(NoHooksLU):
6309 This is an abstract class which is the parent of all the other tags LUs.
6313 def ExpandNames(self):
6314 self.needed_locks = {}
6315 if self.op.kind == constants.TAG_NODE:
6316 name = self.cfg.ExpandNodeName(self.op.name)
6318 raise errors.OpPrereqError("Invalid node name (%s)" %
6321 self.needed_locks[locking.LEVEL_NODE] = name
6322 elif self.op.kind == constants.TAG_INSTANCE:
6323 name = self.cfg.ExpandInstanceName(self.op.name)
6325 raise errors.OpPrereqError("Invalid instance name (%s)" %
6328 self.needed_locks[locking.LEVEL_INSTANCE] = name
6330 def CheckPrereq(self):
6331 """Check prerequisites.
6334 if self.op.kind == constants.TAG_CLUSTER:
6335 self.target = self.cfg.GetClusterInfo()
6336 elif self.op.kind == constants.TAG_NODE:
6337 self.target = self.cfg.GetNodeInfo(self.op.name)
6338 elif self.op.kind == constants.TAG_INSTANCE:
6339 self.target = self.cfg.GetInstanceInfo(self.op.name)
6341 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6345 class LUGetTags(TagsLU):
6346 """Returns the tags of a given object.
6349 _OP_REQP = ["kind", "name"]
6352 def Exec(self, feedback_fn):
6353 """Returns the tag list.
6356 return list(self.target.GetTags())
6359 class LUSearchTags(NoHooksLU):
6360 """Searches the tags for a given pattern.
6363 _OP_REQP = ["pattern"]
6366 def ExpandNames(self):
6367 self.needed_locks = {}
6369 def CheckPrereq(self):
6370 """Check prerequisites.
6372 This checks the pattern passed for validity by compiling it.
6376 self.re = re.compile(self.op.pattern)
6377 except re.error, err:
6378 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6379 (self.op.pattern, err))
6381 def Exec(self, feedback_fn):
6382 """Returns the tag list.
6386 tgts = [("/cluster", cfg.GetClusterInfo())]
6387 ilist = cfg.GetAllInstancesInfo().values()
6388 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6389 nlist = cfg.GetAllNodesInfo().values()
6390 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6392 for path, target in tgts:
6393 for tag in target.GetTags():
6394 if self.re.search(tag):
6395 results.append((path, tag))
6399 class LUAddTags(TagsLU):
6400 """Sets a tag on a given object.
6403 _OP_REQP = ["kind", "name", "tags"]
6406 def CheckPrereq(self):
6407 """Check prerequisites.
6409 This checks the type and length of the tag name and value.
6412 TagsLU.CheckPrereq(self)
6413 for tag in self.op.tags:
6414 objects.TaggableObject.ValidateTag(tag)
6416 def Exec(self, feedback_fn):
6421 for tag in self.op.tags:
6422 self.target.AddTag(tag)
6423 except errors.TagError, err:
6424 raise errors.OpExecError("Error while setting tag: %s" % str(err))
6426 self.cfg.Update(self.target)
6427 except errors.ConfigurationError:
6428 raise errors.OpRetryError("There has been a modification to the"
6429 " config file and the operation has been"
6430 " aborted. Please retry.")
6433 class LUDelTags(TagsLU):
6434 """Delete a list of tags from a given object.
6437 _OP_REQP = ["kind", "name", "tags"]
6440 def CheckPrereq(self):
6441 """Check prerequisites.
6443 This checks that we have the given tag.
6446 TagsLU.CheckPrereq(self)
6447 for tag in self.op.tags:
6448 objects.TaggableObject.ValidateTag(tag)
6449 del_tags = frozenset(self.op.tags)
6450 cur_tags = self.target.GetTags()
6451 if not del_tags <= cur_tags:
6452 diff_tags = del_tags - cur_tags
6453 diff_names = ["'%s'" % tag for tag in diff_tags]
6455 raise errors.OpPrereqError("Tag(s) %s not found" %
6456 (",".join(diff_names)))
6458 def Exec(self, feedback_fn):
6459 """Remove the tag from the object.
6462 for tag in self.op.tags:
6463 self.target.RemoveTag(tag)
6465 self.cfg.Update(self.target)
6466 except errors.ConfigurationError:
6467 raise errors.OpRetryError("There has been a modification to the"
6468 " config file and the operation has been"
6469 " aborted. Please retry.")
6472 class LUTestDelay(NoHooksLU):
6473 """Sleep for a specified amount of time.
6475 This LU sleeps on the master and/or nodes for a specified amount of
6479 _OP_REQP = ["duration", "on_master", "on_nodes"]
6482 def ExpandNames(self):
6483 """Expand names and set required locks.
6485 This expands the node list, if any.
6488 self.needed_locks = {}
6489 if self.op.on_nodes:
6490 # _GetWantedNodes can be used here, but is not always appropriate to use
6491 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6493 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6494 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6496 def CheckPrereq(self):
6497 """Check prerequisites.
6501 def Exec(self, feedback_fn):
6502 """Do the actual sleep.
6505 if self.op.on_master:
6506 if not utils.TestDelay(self.op.duration):
6507 raise errors.OpExecError("Error during master delay test")
6508 if self.op.on_nodes:
6509 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6511 raise errors.OpExecError("Complete failure from rpc call")
6512 for node, node_result in result.items():
6514 if not node_result.data:
6515 raise errors.OpExecError("Failure during rpc call to node %s,"
6516 " result: %s" % (node, node_result.data))
6519 class IAllocator(object):
6520 """IAllocator framework.
6522 An IAllocator instance has three sets of attributes:
6523 - cfg that is needed to query the cluster
6524 - input data (all members of the _KEYS class attribute are required)
6525 - four buffer attributes (in|out_data|text), that represent the
6526 input (to the external script) in text and data structure format,
6527 and the output from it, again in two formats
6528 - the result variables from the script (success, info, nodes) for
6533 "mem_size", "disks", "disk_template",
6534 "os", "tags", "nics", "vcpus", "hypervisor",
6540 def __init__(self, lu, mode, name, **kwargs):
6542 # init buffer variables
6543 self.in_text = self.out_text = self.in_data = self.out_data = None
6544 # init all input fields so that pylint is happy
6547 self.mem_size = self.disks = self.disk_template = None
6548 self.os = self.tags = self.nics = self.vcpus = None
6549 self.hypervisor = None
6550 self.relocate_from = None
6552 self.required_nodes = None
6553 # init result fields
6554 self.success = self.info = self.nodes = None
6555 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6556 keyset = self._ALLO_KEYS
6557 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6558 keyset = self._RELO_KEYS
6560 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6561 " IAllocator" % self.mode)
6563 if key not in keyset:
6564 raise errors.ProgrammerError("Invalid input parameter '%s' to"
6565 " IAllocator" % key)
6566 setattr(self, key, kwargs[key])
6568 if key not in kwargs:
6569 raise errors.ProgrammerError("Missing input parameter '%s' to"
6570 " IAllocator" % key)
6571 self._BuildInputData()
6573 def _ComputeClusterData(self):
6574 """Compute the generic allocator input data.
6576 This is the data that is independent of the actual operation.
6580 cluster_info = cfg.GetClusterInfo()
6584 "cluster_name": cfg.GetClusterName(),
6585 "cluster_tags": list(cluster_info.GetTags()),
6586 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6587 # we don't have job IDs
6589 iinfo = cfg.GetAllInstancesInfo().values()
6590 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6594 node_list = cfg.GetNodeList()
6596 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6597 hypervisor_name = self.hypervisor
6598 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6599 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6601 node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6603 node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6604 cluster_info.enabled_hypervisors)
6605 for nname, nresult in node_data.items():
6606 # first fill in static (config-based) values
6607 ninfo = cfg.GetNodeInfo(nname)
6609 "tags": list(ninfo.GetTags()),
6610 "primary_ip": ninfo.primary_ip,
6611 "secondary_ip": ninfo.secondary_ip,
6612 "offline": ninfo.offline,
6613 "drained": ninfo.drained,
6614 "master_candidate": ninfo.master_candidate,
6617 if not ninfo.offline:
6619 if not isinstance(nresult.data, dict):
6620 raise errors.OpExecError("Can't get data for node %s" % nname)
6621 remote_info = nresult.data
6622 for attr in ['memory_total', 'memory_free', 'memory_dom0',
6623 'vg_size', 'vg_free', 'cpu_total']:
6624 if attr not in remote_info:
6625 raise errors.OpExecError("Node '%s' didn't return attribute"
6626 " '%s'" % (nname, attr))
6628 remote_info[attr] = int(remote_info[attr])
6629 except ValueError, err:
6630 raise errors.OpExecError("Node '%s' returned invalid value"
6631 " for '%s': %s" % (nname, attr, err))
6632 # compute memory used by primary instances
6633 i_p_mem = i_p_up_mem = 0
6634 for iinfo, beinfo in i_list:
6635 if iinfo.primary_node == nname:
6636 i_p_mem += beinfo[constants.BE_MEMORY]
6637 if iinfo.name not in node_iinfo[nname].data:
6640 i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6641 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6642 remote_info['memory_free'] -= max(0, i_mem_diff)
6645 i_p_up_mem += beinfo[constants.BE_MEMORY]
6647 # compute memory used by instances
6649 "total_memory": remote_info['memory_total'],
6650 "reserved_memory": remote_info['memory_dom0'],
6651 "free_memory": remote_info['memory_free'],
6652 "total_disk": remote_info['vg_size'],
6653 "free_disk": remote_info['vg_free'],
6654 "total_cpus": remote_info['cpu_total'],
6655 "i_pri_memory": i_p_mem,
6656 "i_pri_up_memory": i_p_up_mem,
6660 node_results[nname] = pnr
6661 data["nodes"] = node_results
6665 for iinfo, beinfo in i_list:
6666 nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6667 for n in iinfo.nics]
6669 "tags": list(iinfo.GetTags()),
6670 "admin_up": iinfo.admin_up,
6671 "vcpus": beinfo[constants.BE_VCPUS],
6672 "memory": beinfo[constants.BE_MEMORY],
6674 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6676 "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6677 "disk_template": iinfo.disk_template,
6678 "hypervisor": iinfo.hypervisor,
6680 instance_data[iinfo.name] = pir
6682 data["instances"] = instance_data
6686 def _AddNewInstance(self):
6687 """Add new instance data to allocator structure.
6689 This in combination with _AllocatorGetClusterData will create the
6690 correct structure needed as input for the allocator.
6692 The checks for the completeness of the opcode must have already been
6698 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6700 if self.disk_template in constants.DTS_NET_MIRROR:
6701 self.required_nodes = 2
6703 self.required_nodes = 1
6707 "disk_template": self.disk_template,
6710 "vcpus": self.vcpus,
6711 "memory": self.mem_size,
6712 "disks": self.disks,
6713 "disk_space_total": disk_space,
6715 "required_nodes": self.required_nodes,
6717 data["request"] = request
6719 def _AddRelocateInstance(self):
6720 """Add relocate instance data to allocator structure.
6722 This in combination with _IAllocatorGetClusterData will create the
6723 correct structure needed as input for the allocator.
6725 The checks for the completeness of the opcode must have already been
6729 instance = self.lu.cfg.GetInstanceInfo(self.name)
6730 if instance is None:
6731 raise errors.ProgrammerError("Unknown instance '%s' passed to"
6732 " IAllocator" % self.name)
6734 if instance.disk_template not in constants.DTS_NET_MIRROR:
6735 raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6737 if len(instance.secondary_nodes) != 1:
6738 raise errors.OpPrereqError("Instance has not exactly one secondary node")
6740 self.required_nodes = 1
6741 disk_sizes = [{'size': disk.size} for disk in instance.disks]
6742 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6747 "disk_space_total": disk_space,
6748 "required_nodes": self.required_nodes,
6749 "relocate_from": self.relocate_from,
6751 self.in_data["request"] = request
6753 def _BuildInputData(self):
6754 """Build input data structures.
6757 self._ComputeClusterData()
6759 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6760 self._AddNewInstance()
6762 self._AddRelocateInstance()
6764 self.in_text = serializer.Dump(self.in_data)
6766 def Run(self, name, validate=True, call_fn=None):
6767 """Run an instance allocator and return the results.
6771 call_fn = self.lu.rpc.call_iallocator_runner
6774 result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6777 if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6778 raise errors.OpExecError("Invalid result from master iallocator runner")
6780 rcode, stdout, stderr, fail = result.data
6782 if rcode == constants.IARUN_NOTFOUND:
6783 raise errors.OpExecError("Can't find allocator '%s'" % name)
6784 elif rcode == constants.IARUN_FAILURE:
6785 raise errors.OpExecError("Instance allocator call failed: %s,"
6786 " output: %s" % (fail, stdout+stderr))
6787 self.out_text = stdout
6789 self._ValidateResult()
6791 def _ValidateResult(self):
6792 """Process the allocator results.
6794 This will process and if successful save the result in
6795 self.out_data and the other parameters.
6799 rdict = serializer.Load(self.out_text)
6800 except Exception, err:
6801 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6803 if not isinstance(rdict, dict):
6804 raise errors.OpExecError("Can't parse iallocator results: not a dict")
6806 for key in "success", "info", "nodes":
6807 if key not in rdict:
6808 raise errors.OpExecError("Can't parse iallocator results:"
6809 " missing key '%s'" % key)
6810 setattr(self, key, rdict[key])
6812 if not isinstance(rdict["nodes"], list):
6813 raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6815 self.out_data = rdict
6818 class LUTestAllocator(NoHooksLU):
6819 """Run allocator tests.
6821 This LU runs the allocator tests
6824 _OP_REQP = ["direction", "mode", "name"]
6826 def CheckPrereq(self):
6827 """Check prerequisites.
6829 This checks the opcode parameters depending on the director and mode test.
6832 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6833 for attr in ["name", "mem_size", "disks", "disk_template",
6834 "os", "tags", "nics", "vcpus"]:
6835 if not hasattr(self.op, attr):
6836 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6838 iname = self.cfg.ExpandInstanceName(self.op.name)
6839 if iname is not None:
6840 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6842 if not isinstance(self.op.nics, list):
6843 raise errors.OpPrereqError("Invalid parameter 'nics'")
6844 for row in self.op.nics:
6845 if (not isinstance(row, dict) or
6848 "bridge" not in row):
6849 raise errors.OpPrereqError("Invalid contents of the"
6850 " 'nics' parameter")
6851 if not isinstance(self.op.disks, list):
6852 raise errors.OpPrereqError("Invalid parameter 'disks'")
6853 for row in self.op.disks:
6854 if (not isinstance(row, dict) or
6855 "size" not in row or
6856 not isinstance(row["size"], int) or
6857 "mode" not in row or
6858 row["mode"] not in ['r', 'w']):
6859 raise errors.OpPrereqError("Invalid contents of the"
6860 " 'disks' parameter")
6861 if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6862 self.op.hypervisor = self.cfg.GetHypervisorType()
6863 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6864 if not hasattr(self.op, "name"):
6865 raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6866 fname = self.cfg.ExpandInstanceName(self.op.name)
6868 raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6870 self.op.name = fname
6871 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6873 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6876 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6877 if not hasattr(self.op, "allocator") or self.op.allocator is None:
6878 raise errors.OpPrereqError("Missing allocator name")
6879 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6880 raise errors.OpPrereqError("Wrong allocator test '%s'" %
6883 def Exec(self, feedback_fn):
6884 """Run the allocator test.
6887 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6888 ial = IAllocator(self,
6891 mem_size=self.op.mem_size,
6892 disks=self.op.disks,
6893 disk_template=self.op.disk_template,
6897 vcpus=self.op.vcpus,
6898 hypervisor=self.op.hypervisor,
6901 ial = IAllocator(self,
6904 relocate_from=list(self.relocate_from),
6907 if self.op.direction == constants.IALLOCATOR_DIR_IN:
6908 result = ial.in_text
6910 ial.Run(self.op.allocator, validate=False)
6911 result = ial.out_text