4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0613,W0201
37 from ganeti import ssh
38 from ganeti import utils
39 from ganeti import errors
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import constants
43 from ganeti import objects
44 from ganeti import opcodes
45 from ganeti import serializer
48 class LogicalUnit(object):
49 """Logical Unit base class.
51 Subclasses must follow these rules:
52 - implement ExpandNames
53 - implement CheckPrereq
55 - implement BuildHooksEnv
56 - redefine HPATH and HTYPE
57 - optionally redefine their run requirements:
58 REQ_MASTER: the LU needs to run on the master node
59 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61 Note that all commands require root permissions.
70 def __init__(self, processor, op, context, rpc):
71 """Constructor for LogicalUnit.
73 This needs to be overriden in derived classes in order to check op
79 self.cfg = context.cfg
80 self.context = context
82 # Dicts used to declare locking needs to mcpu
83 self.needed_locks = None
84 self.acquired_locks = {}
85 self.share_locks = dict(((i, 0) for i in locking.LEVELS))
87 self.remove_locks = {}
88 # Used to force good behavior when calling helper functions
89 self.recalculate_locks = {}
92 self.LogWarning = processor.LogWarning
93 self.LogInfo = processor.LogInfo
95 for attr_name in self._OP_REQP:
96 attr_val = getattr(op, attr_name, None)
98 raise errors.OpPrereqError("Required parameter '%s' missing" %
101 if not self.cfg.IsCluster():
102 raise errors.OpPrereqError("Cluster not initialized yet,"
103 " use 'gnt-cluster init' first.")
105 master = self.cfg.GetMasterNode()
106 if master != utils.HostInfo().name:
107 raise errors.OpPrereqError("Commands must be run on the master"
111 """Returns the SshRunner object
115 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
118 ssh = property(fget=__GetSSH)
120 def ExpandNames(self):
121 """Expand names for this LU.
123 This method is called before starting to execute the opcode, and it should
124 update all the parameters of the opcode to their canonical form (e.g. a
125 short node name must be fully expanded after this method has successfully
126 completed). This way locking, hooks, logging, ecc. can work correctly.
128 LUs which implement this method must also populate the self.needed_locks
129 member, as a dict with lock levels as keys, and a list of needed lock names
131 - Use an empty dict if you don't need any lock
132 - If you don't need any lock at a particular level omit that level
133 - Don't put anything for the BGL level
134 - If you want all locks at a level use locking.ALL_SET as a value
136 If you need to share locks (rather than acquire them exclusively) at one
137 level you can modify self.share_locks, setting a true value (usually 1) for
138 that level. By default locks are not shared.
141 # Acquire all nodes and one instance
142 self.needed_locks = {
143 locking.LEVEL_NODE: locking.ALL_SET,
144 locking.LEVEL_INSTANCE: ['instance1.example.tld'],
146 # Acquire just two nodes
147 self.needed_locks = {
148 locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
151 self.needed_locks = {} # No, you can't leave it to the default value None
154 # The implementation of this method is mandatory only if the new LU is
155 # concurrent, so that old LUs don't need to be changed all at the same
158 self.needed_locks = {} # Exclusive LUs don't need locks.
160 raise NotImplementedError
162 def DeclareLocks(self, level):
163 """Declare LU locking needs for a level
165 While most LUs can just declare their locking needs at ExpandNames time,
166 sometimes there's the need to calculate some locks after having acquired
167 the ones before. This function is called just before acquiring locks at a
168 particular level, but after acquiring the ones at lower levels, and permits
169 such calculations. It can be used to modify self.needed_locks, and by
170 default it does nothing.
172 This function is only called if you have something already set in
173 self.needed_locks for the level.
175 @param level: Locking level which is going to be locked
176 @type level: member of ganeti.locking.LEVELS
180 def CheckPrereq(self):
181 """Check prerequisites for this LU.
183 This method should check that the prerequisites for the execution
184 of this LU are fulfilled. It can do internode communication, but
185 it should be idempotent - no cluster or system changes are
188 The method should raise errors.OpPrereqError in case something is
189 not fulfilled. Its return value is ignored.
191 This method should also update all the parameters of the opcode to
192 their canonical form if it hasn't been done by ExpandNames before.
195 raise NotImplementedError
197 def Exec(self, feedback_fn):
200 This method should implement the actual work. It should raise
201 errors.OpExecError for failures that are somewhat dealt with in
205 raise NotImplementedError
207 def BuildHooksEnv(self):
208 """Build hooks environment for this LU.
210 This method should return a three-node tuple consisting of: a dict
211 containing the environment that will be used for running the
212 specific hook for this LU, a list of node names on which the hook
213 should run before the execution, and a list of node names on which
214 the hook should run after the execution.
216 The keys of the dict must not have 'GANETI_' prefixed as this will
217 be handled in the hooks runner. Also note additional keys will be
218 added by the hooks runner. If the LU doesn't define any
219 environment, an empty dict (and not None) should be returned.
221 No nodes should be returned as an empty list (and not None).
223 Note that if the HPATH for a LU class is None, this function will
227 raise NotImplementedError
229 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
230 """Notify the LU about the results of its hooks.
232 This method is called every time a hooks phase is executed, and notifies
233 the Logical Unit about the hooks' result. The LU can then use it to alter
234 its result based on the hooks. By default the method does nothing and the
235 previous result is passed back unchanged but any LU can define it if it
236 wants to use the local cluster hook-scripts somehow.
239 phase: the hooks phase that has just been run
240 hooks_results: the results of the multi-node hooks rpc call
241 feedback_fn: function to send feedback back to the caller
242 lu_result: the previous result this LU had, or None in the PRE phase.
247 def _ExpandAndLockInstance(self):
248 """Helper function to expand and lock an instance.
250 Many LUs that work on an instance take its name in self.op.instance_name
251 and need to expand it and then declare the expanded name for locking. This
252 function does it, and then updates self.op.instance_name to the expanded
253 name. It also initializes needed_locks as a dict, if this hasn't been done
257 if self.needed_locks is None:
258 self.needed_locks = {}
260 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
261 "_ExpandAndLockInstance called with instance-level locks set"
262 expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
263 if expanded_name is None:
264 raise errors.OpPrereqError("Instance '%s' not known" %
265 self.op.instance_name)
266 self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
267 self.op.instance_name = expanded_name
269 def _LockInstancesNodes(self, primary_only=False):
270 """Helper function to declare instances' nodes for locking.
272 This function should be called after locking one or more instances to lock
273 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
274 with all primary or secondary nodes for instances already locked and
275 present in self.needed_locks[locking.LEVEL_INSTANCE].
277 It should be called from DeclareLocks, and for safety only works if
278 self.recalculate_locks[locking.LEVEL_NODE] is set.
280 In the future it may grow parameters to just lock some instance's nodes, or
281 to just lock primaries or secondary nodes, if needed.
283 If should be called in DeclareLocks in a way similar to:
285 if level == locking.LEVEL_NODE:
286 self._LockInstancesNodes()
288 @type primary_only: boolean
289 @param primary_only: only lock primary nodes of locked instances
292 assert locking.LEVEL_NODE in self.recalculate_locks, \
293 "_LockInstancesNodes helper function called with no nodes to recalculate"
295 # TODO: check if we're really been called with the instance locks held
297 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
298 # future we might want to have different behaviors depending on the value
299 # of self.recalculate_locks[locking.LEVEL_NODE]
301 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
302 instance = self.context.cfg.GetInstanceInfo(instance_name)
303 wanted_nodes.append(instance.primary_node)
305 wanted_nodes.extend(instance.secondary_nodes)
307 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
308 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
309 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
310 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
312 del self.recalculate_locks[locking.LEVEL_NODE]
315 class NoHooksLU(LogicalUnit):
316 """Simple LU which runs no hooks.
318 This LU is intended as a parent for other LogicalUnits which will
319 run no hooks, in order to reduce duplicate code.
326 class _FieldSet(object):
327 """A simple field set.
329 Among the features are:
330 - checking if a string is among a list of static string or regex objects
331 - checking if a whole list of string matches
332 - returning the matching groups from a regex match
334 Internally, all fields are held as regular expression objects.
337 def __init__(self, *items):
338 self.items = [re.compile("^%s$" % value) for value in items]
340 def Extend(self, other_set):
341 """Extend the field set with the items from another one"""
342 self.items.extend(other_set.items)
344 def Matches(self, field):
345 """Checks if a field matches the current set
348 @param field: the string to match
349 @return: either False or a regular expression match object
352 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
356 def NonMatching(self, items):
357 """Returns the list of fields not matching the current set
360 @param items: the list of fields to check
362 @return: list of non-matching fields
365 return [val for val in items if not self.Matches(val)]
368 def _GetWantedNodes(lu, nodes):
369 """Returns list of checked and expanded node names.
372 nodes: List of nodes (strings) or None for all
375 if not isinstance(nodes, list):
376 raise errors.OpPrereqError("Invalid argument type 'nodes'")
379 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
380 " non-empty list of nodes whose name is to be expanded.")
384 node = lu.cfg.ExpandNodeName(name)
386 raise errors.OpPrereqError("No such node name '%s'" % name)
389 return utils.NiceSort(wanted)
392 def _GetWantedInstances(lu, instances):
393 """Returns list of checked and expanded instance names.
396 instances: List of instances (strings) or None for all
399 if not isinstance(instances, list):
400 raise errors.OpPrereqError("Invalid argument type 'instances'")
405 for name in instances:
406 instance = lu.cfg.ExpandInstanceName(name)
408 raise errors.OpPrereqError("No such instance name '%s'" % name)
409 wanted.append(instance)
412 wanted = lu.cfg.GetInstanceList()
413 return utils.NiceSort(wanted)
416 def _CheckOutputFields(static, dynamic, selected):
417 """Checks whether all selected fields are valid.
419 @type static: L{_FieldSet}
420 @param static: static fields set
421 @type dynamic: L{_FieldSet}
422 @param dynamic: dynamic fields set
429 delta = f.NonMatching(selected)
431 raise errors.OpPrereqError("Unknown output fields selected: %s"
435 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
436 memory, vcpus, nics):
437 """Builds instance related env variables for hooks from single variables.
440 secondary_nodes: List of secondary nodes as strings
444 "INSTANCE_NAME": name,
445 "INSTANCE_PRIMARY": primary_node,
446 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
447 "INSTANCE_OS_TYPE": os_type,
448 "INSTANCE_STATUS": status,
449 "INSTANCE_MEMORY": memory,
450 "INSTANCE_VCPUS": vcpus,
454 nic_count = len(nics)
455 for idx, (ip, bridge, mac) in enumerate(nics):
458 env["INSTANCE_NIC%d_IP" % idx] = ip
459 env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
460 env["INSTANCE_NIC%d_HWADDR" % idx] = mac
464 env["INSTANCE_NIC_COUNT"] = nic_count
469 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
470 """Builds instance related env variables for hooks from an object.
473 instance: objects.Instance object of instance
474 override: dict of values to override
476 bep = lu.cfg.GetClusterInfo().FillBE(instance)
478 'name': instance.name,
479 'primary_node': instance.primary_node,
480 'secondary_nodes': instance.secondary_nodes,
481 'os_type': instance.os,
482 'status': instance.os,
483 'memory': bep[constants.BE_MEMORY],
484 'vcpus': bep[constants.BE_VCPUS],
485 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
488 args.update(override)
489 return _BuildInstanceHookEnv(**args)
492 def _CheckInstanceBridgesExist(lu, instance):
493 """Check that the brigdes needed by an instance exist.
496 # check bridges existance
497 brlist = [nic.bridge for nic in instance.nics]
498 if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
499 raise errors.OpPrereqError("one or more target bridges %s does not"
500 " exist on destination node '%s'" %
501 (brlist, instance.primary_node))
504 class LUDestroyCluster(NoHooksLU):
505 """Logical unit for destroying the cluster.
510 def CheckPrereq(self):
511 """Check prerequisites.
513 This checks whether the cluster is empty.
515 Any errors are signalled by raising errors.OpPrereqError.
518 master = self.cfg.GetMasterNode()
520 nodelist = self.cfg.GetNodeList()
521 if len(nodelist) != 1 or nodelist[0] != master:
522 raise errors.OpPrereqError("There are still %d node(s) in"
523 " this cluster." % (len(nodelist) - 1))
524 instancelist = self.cfg.GetInstanceList()
526 raise errors.OpPrereqError("There are still %d instance(s) in"
527 " this cluster." % len(instancelist))
529 def Exec(self, feedback_fn):
530 """Destroys the cluster.
533 master = self.cfg.GetMasterNode()
534 if not self.rpc.call_node_stop_master(master, False):
535 raise errors.OpExecError("Could not disable the master role")
536 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
537 utils.CreateBackup(priv_key)
538 utils.CreateBackup(pub_key)
542 class LUVerifyCluster(LogicalUnit):
543 """Verifies the cluster status.
546 HPATH = "cluster-verify"
547 HTYPE = constants.HTYPE_CLUSTER
548 _OP_REQP = ["skip_checks"]
551 def ExpandNames(self):
552 self.needed_locks = {
553 locking.LEVEL_NODE: locking.ALL_SET,
554 locking.LEVEL_INSTANCE: locking.ALL_SET,
556 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
558 def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
559 remote_version, feedback_fn):
560 """Run multiple tests against a node.
563 - compares ganeti version
564 - checks vg existance and size > 20G
565 - checks config file checksum
566 - checks ssh to other nodes
569 node: name of the node to check
570 file_list: required list of files
571 local_cksum: dictionary of local files and their checksums
574 # compares ganeti version
575 local_version = constants.PROTOCOL_VERSION
576 if not remote_version:
577 feedback_fn(" - ERROR: connection to %s failed" % (node))
580 if local_version != remote_version:
581 feedback_fn(" - ERROR: sw version mismatch: master %s, node(%s) %s" %
582 (local_version, node, remote_version))
585 # checks vg existance and size > 20G
589 feedback_fn(" - ERROR: unable to check volume groups on node %s." %
593 vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
594 constants.MIN_VG_SIZE)
596 feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
600 feedback_fn(" - ERROR: unable to verify node %s." % (node,))
603 # checks config file checksum
606 if 'filelist' not in node_result:
608 feedback_fn(" - ERROR: node hasn't returned file checksum data")
610 remote_cksum = node_result['filelist']
611 for file_name in file_list:
612 if file_name not in remote_cksum:
614 feedback_fn(" - ERROR: file '%s' missing" % file_name)
615 elif remote_cksum[file_name] != local_cksum[file_name]:
617 feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
619 if 'nodelist' not in node_result:
621 feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
623 if node_result['nodelist']:
625 for node in node_result['nodelist']:
626 feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
627 (node, node_result['nodelist'][node]))
628 if 'node-net-test' not in node_result:
630 feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
632 if node_result['node-net-test']:
634 nlist = utils.NiceSort(node_result['node-net-test'].keys())
636 feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
637 (node, node_result['node-net-test'][node]))
639 hyp_result = node_result.get('hypervisor', None)
640 if isinstance(hyp_result, dict):
641 for hv_name, hv_result in hyp_result.iteritems():
642 if hv_result is not None:
643 feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
644 (hv_name, hv_result))
647 def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
648 node_instance, feedback_fn):
649 """Verify an instance.
651 This function checks to see if the required block devices are
652 available on the instance's node.
657 node_current = instanceconfig.primary_node
660 instanceconfig.MapLVsByNode(node_vol_should)
662 for node in node_vol_should:
663 for volume in node_vol_should[node]:
664 if node not in node_vol_is or volume not in node_vol_is[node]:
665 feedback_fn(" - ERROR: volume %s missing on node %s" %
669 if not instanceconfig.status == 'down':
670 if (node_current not in node_instance or
671 not instance in node_instance[node_current]):
672 feedback_fn(" - ERROR: instance %s not running on node %s" %
673 (instance, node_current))
676 for node in node_instance:
677 if (not node == node_current):
678 if instance in node_instance[node]:
679 feedback_fn(" - ERROR: instance %s should not run on node %s" %
685 def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
686 """Verify if there are any unknown volumes in the cluster.
688 The .os, .swap and backup volumes are ignored. All other volumes are
694 for node in node_vol_is:
695 for volume in node_vol_is[node]:
696 if node not in node_vol_should or volume not in node_vol_should[node]:
697 feedback_fn(" - ERROR: volume %s on node %s should not exist" %
702 def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
703 """Verify the list of running instances.
705 This checks what instances are running but unknown to the cluster.
709 for node in node_instance:
710 for runninginstance in node_instance[node]:
711 if runninginstance not in instancelist:
712 feedback_fn(" - ERROR: instance %s on node %s should not exist" %
713 (runninginstance, node))
717 def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
718 """Verify N+1 Memory Resilience.
720 Check that if one single node dies we can still start all the instances it
726 for node, nodeinfo in node_info.iteritems():
727 # This code checks that every node which is now listed as secondary has
728 # enough memory to host all instances it is supposed to should a single
729 # other node in the cluster fail.
730 # FIXME: not ready for failover to an arbitrary node
731 # FIXME: does not support file-backed instances
732 # WARNING: we currently take into account down instances as well as up
733 # ones, considering that even if they're down someone might want to start
734 # them even in the event of a node failure.
735 for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
737 for instance in instances:
738 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
739 if bep[constants.BE_AUTO_BALANCE]:
740 needed_mem += bep[constants.BE_MEMORY]
741 if nodeinfo['mfree'] < needed_mem:
742 feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
743 " failovers should node %s fail" % (node, prinode))
747 def CheckPrereq(self):
748 """Check prerequisites.
750 Transform the list of checks we're going to skip into a set and check that
751 all its members are valid.
754 self.skip_set = frozenset(self.op.skip_checks)
755 if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
756 raise errors.OpPrereqError("Invalid checks to be skipped specified")
758 def BuildHooksEnv(self):
761 Cluster-Verify hooks just rone in the post phase and their failure makes
762 the output be logged in the verify output and the verification to fail.
765 all_nodes = self.cfg.GetNodeList()
766 # TODO: populate the environment with useful information for verify hooks
768 return env, [], all_nodes
770 def Exec(self, feedback_fn):
771 """Verify integrity of cluster, performing various test on nodes.
775 feedback_fn("* Verifying global settings")
776 for msg in self.cfg.VerifyConfig():
777 feedback_fn(" - ERROR: %s" % msg)
779 vg_name = self.cfg.GetVGName()
780 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
781 nodelist = utils.NiceSort(self.cfg.GetNodeList())
782 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
783 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
784 i_non_redundant = [] # Non redundant instances
785 i_non_a_balanced = [] # Non auto-balanced instances
791 # FIXME: verify OS list
794 file_names.append(constants.SSL_CERT_FILE)
795 file_names.append(constants.CLUSTER_CONF_FILE)
796 local_checksums = utils.FingerprintFiles(file_names)
798 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
799 all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
800 all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
801 all_vglist = self.rpc.call_vg_list(nodelist)
802 node_verify_param = {
803 'filelist': file_names,
804 'nodelist': nodelist,
805 'hypervisor': hypervisors,
806 'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
807 for node in nodeinfo]
809 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
810 self.cfg.GetClusterName())
811 all_rversion = self.rpc.call_version(nodelist)
812 all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
813 self.cfg.GetHypervisorType())
815 cluster = self.cfg.GetClusterInfo()
816 for node in nodelist:
817 feedback_fn("* Verifying node %s" % node)
818 result = self._VerifyNode(node, file_names, local_checksums,
819 all_vglist[node], all_nvinfo[node],
820 all_rversion[node], feedback_fn)
824 volumeinfo = all_volumeinfo[node]
826 if isinstance(volumeinfo, basestring):
827 feedback_fn(" - ERROR: LVM problem on node %s: %s" %
828 (node, volumeinfo[-400:].encode('string_escape')))
830 node_volume[node] = {}
831 elif not isinstance(volumeinfo, dict):
832 feedback_fn(" - ERROR: connection to %s failed" % (node,))
836 node_volume[node] = volumeinfo
839 nodeinstance = all_instanceinfo[node]
840 if type(nodeinstance) != list:
841 feedback_fn(" - ERROR: connection to %s failed" % (node,))
845 node_instance[node] = nodeinstance
848 nodeinfo = all_ninfo[node]
849 if not isinstance(nodeinfo, dict):
850 feedback_fn(" - ERROR: connection to %s failed" % (node,))
856 "mfree": int(nodeinfo['memory_free']),
857 "dfree": int(nodeinfo['vg_free']),
860 # dictionary holding all instances this node is secondary for,
861 # grouped by their primary node. Each key is a cluster node, and each
862 # value is a list of instances which have the key as primary and the
863 # current node as secondary. this is handy to calculate N+1 memory
864 # availability if you can only failover from a primary to its
866 "sinst-by-pnode": {},
869 feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
875 for instance in instancelist:
876 feedback_fn("* Verifying instance %s" % instance)
877 inst_config = self.cfg.GetInstanceInfo(instance)
878 result = self._VerifyInstance(instance, inst_config, node_volume,
879 node_instance, feedback_fn)
882 inst_config.MapLVsByNode(node_vol_should)
884 instance_cfg[instance] = inst_config
886 pnode = inst_config.primary_node
887 if pnode in node_info:
888 node_info[pnode]['pinst'].append(instance)
890 feedback_fn(" - ERROR: instance %s, connection to primary node"
891 " %s failed" % (instance, pnode))
894 # If the instance is non-redundant we cannot survive losing its primary
895 # node, so we are not N+1 compliant. On the other hand we have no disk
896 # templates with more than one secondary so that situation is not well
898 # FIXME: does not support file-backed instances
899 if len(inst_config.secondary_nodes) == 0:
900 i_non_redundant.append(instance)
901 elif len(inst_config.secondary_nodes) > 1:
902 feedback_fn(" - WARNING: multiple secondaries for instance %s"
905 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
906 i_non_a_balanced.append(instance)
908 for snode in inst_config.secondary_nodes:
909 if snode in node_info:
910 node_info[snode]['sinst'].append(instance)
911 if pnode not in node_info[snode]['sinst-by-pnode']:
912 node_info[snode]['sinst-by-pnode'][pnode] = []
913 node_info[snode]['sinst-by-pnode'][pnode].append(instance)
915 feedback_fn(" - ERROR: instance %s, connection to secondary node"
916 " %s failed" % (instance, snode))
918 feedback_fn("* Verifying orphan volumes")
919 result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
923 feedback_fn("* Verifying remaining instances")
924 result = self._VerifyOrphanInstances(instancelist, node_instance,
928 if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
929 feedback_fn("* Verifying N+1 Memory redundancy")
930 result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
933 feedback_fn("* Other Notes")
935 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
936 % len(i_non_redundant))
939 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
940 % len(i_non_a_balanced))
944 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
945 """Analize the post-hooks' result, handle it, and send some
946 nicely-formatted feedback back to the user.
949 phase: the hooks phase that has just been run
950 hooks_results: the results of the multi-node hooks rpc call
951 feedback_fn: function to send feedback back to the caller
952 lu_result: previous Exec result
955 # We only really run POST phase hooks, and are only interested in
957 if phase == constants.HOOKS_PHASE_POST:
958 # Used to change hooks' output to proper indentation
959 indent_re = re.compile('^', re.M)
960 feedback_fn("* Hooks Results")
961 if not hooks_results:
962 feedback_fn(" - ERROR: general communication failure")
965 for node_name in hooks_results:
966 show_node_header = True
967 res = hooks_results[node_name]
968 if res is False or not isinstance(res, list):
969 feedback_fn(" Communication failure")
972 for script, hkr, output in res:
973 if hkr == constants.HKR_FAIL:
974 # The node header is only shown once, if there are
975 # failing hooks on that node
977 feedback_fn(" Node %s:" % node_name)
978 show_node_header = False
979 feedback_fn(" ERROR: Script %s failed, output:" % script)
980 output = indent_re.sub(' ', output)
981 feedback_fn("%s" % output)
987 class LUVerifyDisks(NoHooksLU):
988 """Verifies the cluster disks status.
994 def ExpandNames(self):
995 self.needed_locks = {
996 locking.LEVEL_NODE: locking.ALL_SET,
997 locking.LEVEL_INSTANCE: locking.ALL_SET,
999 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1001 def CheckPrereq(self):
1002 """Check prerequisites.
1004 This has no prerequisites.
1009 def Exec(self, feedback_fn):
1010 """Verify integrity of cluster disks.
1013 result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1015 vg_name = self.cfg.GetVGName()
1016 nodes = utils.NiceSort(self.cfg.GetNodeList())
1017 instances = [self.cfg.GetInstanceInfo(name)
1018 for name in self.cfg.GetInstanceList()]
1021 for inst in instances:
1023 if (inst.status != "up" or
1024 inst.disk_template not in constants.DTS_NET_MIRROR):
1026 inst.MapLVsByNode(inst_lvs)
1027 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1028 for node, vol_list in inst_lvs.iteritems():
1029 for vol in vol_list:
1030 nv_dict[(node, vol)] = inst
1035 node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1040 lvs = node_lvs[node]
1042 if isinstance(lvs, basestring):
1043 logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1044 res_nlvm[node] = lvs
1045 elif not isinstance(lvs, dict):
1046 logging.warning("Connection to node %s failed or invalid data"
1048 res_nodes.append(node)
1051 for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1052 inst = nv_dict.pop((node, lv_name), None)
1053 if (not lv_online and inst is not None
1054 and inst.name not in res_instances):
1055 res_instances.append(inst.name)
1057 # any leftover items in nv_dict are missing LVs, let's arrange the
1059 for key, inst in nv_dict.iteritems():
1060 if inst.name not in res_missing:
1061 res_missing[inst.name] = []
1062 res_missing[inst.name].append(key)
1067 class LURenameCluster(LogicalUnit):
1068 """Rename the cluster.
1071 HPATH = "cluster-rename"
1072 HTYPE = constants.HTYPE_CLUSTER
1075 def BuildHooksEnv(self):
1080 "OP_TARGET": self.cfg.GetClusterName(),
1081 "NEW_NAME": self.op.name,
1083 mn = self.cfg.GetMasterNode()
1084 return env, [mn], [mn]
1086 def CheckPrereq(self):
1087 """Verify that the passed name is a valid one.
1090 hostname = utils.HostInfo(self.op.name)
1092 new_name = hostname.name
1093 self.ip = new_ip = hostname.ip
1094 old_name = self.cfg.GetClusterName()
1095 old_ip = self.cfg.GetMasterIP()
1096 if new_name == old_name and new_ip == old_ip:
1097 raise errors.OpPrereqError("Neither the name nor the IP address of the"
1098 " cluster has changed")
1099 if new_ip != old_ip:
1100 if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1101 raise errors.OpPrereqError("The given cluster IP address (%s) is"
1102 " reachable on the network. Aborting." %
1105 self.op.name = new_name
1107 def Exec(self, feedback_fn):
1108 """Rename the cluster.
1111 clustername = self.op.name
1114 # shutdown the master IP
1115 master = self.cfg.GetMasterNode()
1116 if not self.rpc.call_node_stop_master(master, False):
1117 raise errors.OpExecError("Could not disable the master role")
1122 ss.SetKey(ss.SS_MASTER_IP, ip)
1123 ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1125 # Distribute updated ss config to all nodes
1126 myself = self.cfg.GetNodeInfo(master)
1127 dist_nodes = self.cfg.GetNodeList()
1128 if myself.name in dist_nodes:
1129 dist_nodes.remove(myself.name)
1131 logging.debug("Copying updated ssconf data to all nodes")
1132 for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1133 fname = ss.KeyToFilename(keyname)
1134 result = self.rpc.call_upload_file(dist_nodes, fname)
1135 for to_node in dist_nodes:
1136 if not result[to_node]:
1137 self.LogWarning("Copy of file %s to node %s failed",
1140 if not self.rpc.call_node_start_master(master, False):
1141 self.LogWarning("Could not re-enable the master role on"
1142 " the master, please restart manually.")
1145 def _RecursiveCheckIfLVMBased(disk):
1146 """Check if the given disk or its children are lvm-based.
1149 disk: ganeti.objects.Disk object
1152 boolean indicating whether a LD_LV dev_type was found or not
1156 for chdisk in disk.children:
1157 if _RecursiveCheckIfLVMBased(chdisk):
1159 return disk.dev_type == constants.LD_LV
1162 class LUSetClusterParams(LogicalUnit):
1163 """Change the parameters of the cluster.
1166 HPATH = "cluster-modify"
1167 HTYPE = constants.HTYPE_CLUSTER
1171 def ExpandNames(self):
1172 # FIXME: in the future maybe other cluster params won't require checking on
1173 # all nodes to be modified.
1174 self.needed_locks = {
1175 locking.LEVEL_NODE: locking.ALL_SET,
1177 self.share_locks[locking.LEVEL_NODE] = 1
1179 def BuildHooksEnv(self):
1184 "OP_TARGET": self.cfg.GetClusterName(),
1185 "NEW_VG_NAME": self.op.vg_name,
1187 mn = self.cfg.GetMasterNode()
1188 return env, [mn], [mn]
1190 def CheckPrereq(self):
1191 """Check prerequisites.
1193 This checks whether the given params don't conflict and
1194 if the given volume group is valid.
1197 # FIXME: This only works because there is only one parameter that can be
1198 # changed or removed.
1199 if self.op.vg_name is not None and not self.op.vg_name:
1200 instances = self.cfg.GetAllInstancesInfo().values()
1201 for inst in instances:
1202 for disk in inst.disks:
1203 if _RecursiveCheckIfLVMBased(disk):
1204 raise errors.OpPrereqError("Cannot disable lvm storage while"
1205 " lvm-based instances exist")
1207 node_list = self.acquired_locks[locking.LEVEL_NODE]
1209 # if vg_name not None, checks given volume group on all nodes
1211 vglist = self.rpc.call_vg_list(node_list)
1212 for node in node_list:
1213 vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1214 constants.MIN_VG_SIZE)
1216 raise errors.OpPrereqError("Error on node '%s': %s" %
1219 self.cluster = cluster = self.cfg.GetClusterInfo()
1220 # beparams changes do not need validation (we can't validate?),
1221 # but we still process here
1222 if self.op.beparams:
1223 self.new_beparams = cluster.FillDict(
1224 cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1226 # hypervisor list/parameters
1227 self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1228 if self.op.hvparams:
1229 if not isinstance(self.op.hvparams, dict):
1230 raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1231 for hv_name, hv_dict in self.op.hvparams.items():
1232 if hv_name not in self.new_hvparams:
1233 self.new_hvparams[hv_name] = hv_dict
1235 self.new_hvparams[hv_name].update(hv_dict)
1237 if self.op.enabled_hypervisors is not None:
1238 self.hv_list = self.op.enabled_hypervisors
1240 self.hv_list = cluster.enabled_hypervisors
1242 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1243 # either the enabled list has changed, or the parameters have, validate
1244 for hv_name, hv_params in self.new_hvparams.items():
1245 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1246 (self.op.enabled_hypervisors and
1247 hv_name in self.op.enabled_hypervisors)):
1248 # either this is a new hypervisor, or its parameters have changed
1249 hv_class = hypervisor.GetHypervisor(hv_name)
1250 hv_class.CheckParameterSyntax(hv_params)
1251 _CheckHVParams(self, node_list, hv_name, hv_params)
1253 def Exec(self, feedback_fn):
1254 """Change the parameters of the cluster.
1257 if self.op.vg_name is not None:
1258 if self.op.vg_name != self.cfg.GetVGName():
1259 self.cfg.SetVGName(self.op.vg_name)
1261 feedback_fn("Cluster LVM configuration already in desired"
1262 " state, not changing")
1263 if self.op.hvparams:
1264 self.cluster.hvparams = self.new_hvparams
1265 if self.op.enabled_hypervisors is not None:
1266 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1267 if self.op.beparams:
1268 self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1269 self.cfg.Update(self.cluster)
1272 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1273 """Sleep and poll for an instance's disk to sync.
1276 if not instance.disks:
1280 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1282 node = instance.primary_node
1284 for dev in instance.disks:
1285 lu.cfg.SetDiskID(dev, node)
1291 cumul_degraded = False
1292 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1294 lu.LogWarning("Can't get any data from node %s", node)
1297 raise errors.RemoteError("Can't contact node %s for mirror data,"
1298 " aborting." % node)
1302 for i in range(len(rstats)):
1305 lu.LogWarning("Can't compute data for node %s/%s",
1306 node, instance.disks[i].iv_name)
1308 # we ignore the ldisk parameter
1309 perc_done, est_time, is_degraded, _ = mstat
1310 cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1311 if perc_done is not None:
1313 if est_time is not None:
1314 rem_time = "%d estimated seconds remaining" % est_time
1317 rem_time = "no time estimate"
1318 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1319 (instance.disks[i].iv_name, perc_done, rem_time))
1323 time.sleep(min(60, max_time))
1326 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1327 return not cumul_degraded
1330 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1331 """Check that mirrors are not degraded.
1333 The ldisk parameter, if True, will change the test from the
1334 is_degraded attribute (which represents overall non-ok status for
1335 the device(s)) to the ldisk (representing the local storage status).
1338 lu.cfg.SetDiskID(dev, node)
1345 if on_primary or dev.AssembleOnSecondary():
1346 rstats = lu.rpc.call_blockdev_find(node, dev)
1348 logging.warning("Node %s: disk degraded, not found or node down", node)
1351 result = result and (not rstats[idx])
1353 for child in dev.children:
1354 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1359 class LUDiagnoseOS(NoHooksLU):
1360 """Logical unit for OS diagnose/query.
1363 _OP_REQP = ["output_fields", "names"]
1365 _FIELDS_STATIC = _FieldSet()
1366 _FIELDS_DYNAMIC = _FieldSet("name", "valid", "node_status")
1368 def ExpandNames(self):
1370 raise errors.OpPrereqError("Selective OS query not supported")
1372 _CheckOutputFields(static=self._FIELDS_STATIC,
1373 dynamic=self._FIELDS_DYNAMIC,
1374 selected=self.op.output_fields)
1376 # Lock all nodes, in shared mode
1377 self.needed_locks = {}
1378 self.share_locks[locking.LEVEL_NODE] = 1
1379 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1381 def CheckPrereq(self):
1382 """Check prerequisites.
1387 def _DiagnoseByOS(node_list, rlist):
1388 """Remaps a per-node return list into an a per-os per-node dictionary
1391 node_list: a list with the names of all nodes
1392 rlist: a map with node names as keys and OS objects as values
1395 map: a map with osnames as keys and as value another map, with
1397 keys and list of OS objects as values
1398 e.g. {"debian-etch": {"node1": [<object>,...],
1399 "node2": [<object>,]}
1404 for node_name, nr in rlist.iteritems():
1408 if os_obj.name not in all_os:
1409 # build a list of nodes for this os containing empty lists
1410 # for each node in node_list
1411 all_os[os_obj.name] = {}
1412 for nname in node_list:
1413 all_os[os_obj.name][nname] = []
1414 all_os[os_obj.name][node_name].append(os_obj)
1417 def Exec(self, feedback_fn):
1418 """Compute the list of OSes.
1421 node_list = self.acquired_locks[locking.LEVEL_NODE]
1422 node_data = self.rpc.call_os_diagnose(node_list)
1423 if node_data == False:
1424 raise errors.OpExecError("Can't gather the list of OSes")
1425 pol = self._DiagnoseByOS(node_list, node_data)
1427 for os_name, os_data in pol.iteritems():
1429 for field in self.op.output_fields:
1432 elif field == "valid":
1433 val = utils.all([osl and osl[0] for osl in os_data.values()])
1434 elif field == "node_status":
1436 for node_name, nos_list in os_data.iteritems():
1437 val[node_name] = [(v.status, v.path) for v in nos_list]
1439 raise errors.ParameterError(field)
1446 class LURemoveNode(LogicalUnit):
1447 """Logical unit for removing a node.
1450 HPATH = "node-remove"
1451 HTYPE = constants.HTYPE_NODE
1452 _OP_REQP = ["node_name"]
1454 def BuildHooksEnv(self):
1457 This doesn't run on the target node in the pre phase as a failed
1458 node would then be impossible to remove.
1462 "OP_TARGET": self.op.node_name,
1463 "NODE_NAME": self.op.node_name,
1465 all_nodes = self.cfg.GetNodeList()
1466 all_nodes.remove(self.op.node_name)
1467 return env, all_nodes, all_nodes
1469 def CheckPrereq(self):
1470 """Check prerequisites.
1473 - the node exists in the configuration
1474 - it does not have primary or secondary instances
1475 - it's not the master
1477 Any errors are signalled by raising errors.OpPrereqError.
1480 node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1482 raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1484 instance_list = self.cfg.GetInstanceList()
1486 masternode = self.cfg.GetMasterNode()
1487 if node.name == masternode:
1488 raise errors.OpPrereqError("Node is the master node,"
1489 " you need to failover first.")
1491 for instance_name in instance_list:
1492 instance = self.cfg.GetInstanceInfo(instance_name)
1493 if node.name == instance.primary_node:
1494 raise errors.OpPrereqError("Instance %s still running on the node,"
1495 " please remove first." % instance_name)
1496 if node.name in instance.secondary_nodes:
1497 raise errors.OpPrereqError("Instance %s has node as a secondary,"
1498 " please remove first." % instance_name)
1499 self.op.node_name = node.name
1502 def Exec(self, feedback_fn):
1503 """Removes the node from the cluster.
1507 logging.info("Stopping the node daemon and removing configs from node %s",
1510 self.context.RemoveNode(node.name)
1512 self.rpc.call_node_leave_cluster(node.name)
1515 class LUQueryNodes(NoHooksLU):
1516 """Logical unit for querying nodes.
1519 _OP_REQP = ["output_fields", "names"]
1521 _FIELDS_DYNAMIC = _FieldSet(
1523 "mtotal", "mnode", "mfree",
1528 _FIELDS_STATIC = _FieldSet(
1529 "name", "pinst_cnt", "sinst_cnt",
1530 "pinst_list", "sinst_list",
1531 "pip", "sip", "tags",
1535 def ExpandNames(self):
1536 _CheckOutputFields(static=self._FIELDS_STATIC,
1537 dynamic=self._FIELDS_DYNAMIC,
1538 selected=self.op.output_fields)
1540 self.needed_locks = {}
1541 self.share_locks[locking.LEVEL_NODE] = 1
1544 self.wanted = _GetWantedNodes(self, self.op.names)
1546 self.wanted = locking.ALL_SET
1548 self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1550 # if we don't request only static fields, we need to lock the nodes
1551 self.needed_locks[locking.LEVEL_NODE] = self.wanted
1554 def CheckPrereq(self):
1555 """Check prerequisites.
1558 # The validation of the node list is done in the _GetWantedNodes,
1559 # if non empty, and if empty, there's no validation to do
1562 def Exec(self, feedback_fn):
1563 """Computes the list of nodes and their attributes.
1566 all_info = self.cfg.GetAllNodesInfo()
1568 nodenames = self.acquired_locks[locking.LEVEL_NODE]
1569 elif self.wanted != locking.ALL_SET:
1570 nodenames = self.wanted
1571 missing = set(nodenames).difference(all_info.keys())
1573 raise errors.OpExecError(
1574 "Some nodes were removed before retrieving their data: %s" % missing)
1576 nodenames = all_info.keys()
1578 nodenames = utils.NiceSort(nodenames)
1579 nodelist = [all_info[name] for name in nodenames]
1581 # begin data gathering
1585 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1586 self.cfg.GetHypervisorType())
1587 for name in nodenames:
1588 nodeinfo = node_data.get(name, None)
1591 "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1592 "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1593 "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1594 "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1595 "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1596 "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1597 "bootid": nodeinfo['bootid'],
1600 live_data[name] = {}
1602 live_data = dict.fromkeys(nodenames, {})
1604 node_to_primary = dict([(name, set()) for name in nodenames])
1605 node_to_secondary = dict([(name, set()) for name in nodenames])
1607 inst_fields = frozenset(("pinst_cnt", "pinst_list",
1608 "sinst_cnt", "sinst_list"))
1609 if inst_fields & frozenset(self.op.output_fields):
1610 instancelist = self.cfg.GetInstanceList()
1612 for instance_name in instancelist:
1613 inst = self.cfg.GetInstanceInfo(instance_name)
1614 if inst.primary_node in node_to_primary:
1615 node_to_primary[inst.primary_node].add(inst.name)
1616 for secnode in inst.secondary_nodes:
1617 if secnode in node_to_secondary:
1618 node_to_secondary[secnode].add(inst.name)
1620 # end data gathering
1623 for node in nodelist:
1625 for field in self.op.output_fields:
1628 elif field == "pinst_list":
1629 val = list(node_to_primary[node.name])
1630 elif field == "sinst_list":
1631 val = list(node_to_secondary[node.name])
1632 elif field == "pinst_cnt":
1633 val = len(node_to_primary[node.name])
1634 elif field == "sinst_cnt":
1635 val = len(node_to_secondary[node.name])
1636 elif field == "pip":
1637 val = node.primary_ip
1638 elif field == "sip":
1639 val = node.secondary_ip
1640 elif field == "tags":
1641 val = list(node.GetTags())
1642 elif field == "serial_no":
1643 val = node.serial_no
1644 elif self._FIELDS_DYNAMIC.Matches(field):
1645 val = live_data[node.name].get(field, None)
1647 raise errors.ParameterError(field)
1648 node_output.append(val)
1649 output.append(node_output)
1654 class LUQueryNodeVolumes(NoHooksLU):
1655 """Logical unit for getting volumes on node(s).
1658 _OP_REQP = ["nodes", "output_fields"]
1660 _FIELDS_DYNAMIC = _FieldSet("phys", "vg", "name", "size", "instance")
1661 _FIELDS_STATIC = _FieldSet("node")
1663 def ExpandNames(self):
1664 _CheckOutputFields(static=self._FIELDS_STATIC,
1665 dynamic=self._FIELDS_DYNAMIC,
1666 selected=self.op.output_fields)
1668 self.needed_locks = {}
1669 self.share_locks[locking.LEVEL_NODE] = 1
1670 if not self.op.nodes:
1671 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1673 self.needed_locks[locking.LEVEL_NODE] = \
1674 _GetWantedNodes(self, self.op.nodes)
1676 def CheckPrereq(self):
1677 """Check prerequisites.
1679 This checks that the fields required are valid output fields.
1682 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1684 def Exec(self, feedback_fn):
1685 """Computes the list of nodes and their attributes.
1688 nodenames = self.nodes
1689 volumes = self.rpc.call_node_volumes(nodenames)
1691 ilist = [self.cfg.GetInstanceInfo(iname) for iname
1692 in self.cfg.GetInstanceList()]
1694 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1697 for node in nodenames:
1698 if node not in volumes or not volumes[node]:
1701 node_vols = volumes[node][:]
1702 node_vols.sort(key=lambda vol: vol['dev'])
1704 for vol in node_vols:
1706 for field in self.op.output_fields:
1709 elif field == "phys":
1713 elif field == "name":
1715 elif field == "size":
1716 val = int(float(vol['size']))
1717 elif field == "instance":
1719 if node not in lv_by_node[inst]:
1721 if vol['name'] in lv_by_node[inst][node]:
1727 raise errors.ParameterError(field)
1728 node_output.append(str(val))
1730 output.append(node_output)
1735 class LUAddNode(LogicalUnit):
1736 """Logical unit for adding node to the cluster.
1740 HTYPE = constants.HTYPE_NODE
1741 _OP_REQP = ["node_name"]
1743 def BuildHooksEnv(self):
1746 This will run on all nodes before, and on all nodes + the new node after.
1750 "OP_TARGET": self.op.node_name,
1751 "NODE_NAME": self.op.node_name,
1752 "NODE_PIP": self.op.primary_ip,
1753 "NODE_SIP": self.op.secondary_ip,
1755 nodes_0 = self.cfg.GetNodeList()
1756 nodes_1 = nodes_0 + [self.op.node_name, ]
1757 return env, nodes_0, nodes_1
1759 def CheckPrereq(self):
1760 """Check prerequisites.
1763 - the new node is not already in the config
1765 - its parameters (single/dual homed) matches the cluster
1767 Any errors are signalled by raising errors.OpPrereqError.
1770 node_name = self.op.node_name
1773 dns_data = utils.HostInfo(node_name)
1775 node = dns_data.name
1776 primary_ip = self.op.primary_ip = dns_data.ip
1777 secondary_ip = getattr(self.op, "secondary_ip", None)
1778 if secondary_ip is None:
1779 secondary_ip = primary_ip
1780 if not utils.IsValidIP(secondary_ip):
1781 raise errors.OpPrereqError("Invalid secondary IP given")
1782 self.op.secondary_ip = secondary_ip
1784 node_list = cfg.GetNodeList()
1785 if not self.op.readd and node in node_list:
1786 raise errors.OpPrereqError("Node %s is already in the configuration" %
1788 elif self.op.readd and node not in node_list:
1789 raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1791 for existing_node_name in node_list:
1792 existing_node = cfg.GetNodeInfo(existing_node_name)
1794 if self.op.readd and node == existing_node_name:
1795 if (existing_node.primary_ip != primary_ip or
1796 existing_node.secondary_ip != secondary_ip):
1797 raise errors.OpPrereqError("Readded node doesn't have the same IP"
1798 " address configuration as before")
1801 if (existing_node.primary_ip == primary_ip or
1802 existing_node.secondary_ip == primary_ip or
1803 existing_node.primary_ip == secondary_ip or
1804 existing_node.secondary_ip == secondary_ip):
1805 raise errors.OpPrereqError("New node ip address(es) conflict with"
1806 " existing node %s" % existing_node.name)
1808 # check that the type of the node (single versus dual homed) is the
1809 # same as for the master
1810 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1811 master_singlehomed = myself.secondary_ip == myself.primary_ip
1812 newbie_singlehomed = secondary_ip == primary_ip
1813 if master_singlehomed != newbie_singlehomed:
1814 if master_singlehomed:
1815 raise errors.OpPrereqError("The master has no private ip but the"
1816 " new node has one")
1818 raise errors.OpPrereqError("The master has a private ip but the"
1819 " new node doesn't have one")
1821 # checks reachablity
1822 if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1823 raise errors.OpPrereqError("Node not reachable by ping")
1825 if not newbie_singlehomed:
1826 # check reachability from my secondary ip to newbie's secondary ip
1827 if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1828 source=myself.secondary_ip):
1829 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1830 " based ping to noded port")
1832 self.new_node = objects.Node(name=node,
1833 primary_ip=primary_ip,
1834 secondary_ip=secondary_ip)
1836 def Exec(self, feedback_fn):
1837 """Adds the new node to the cluster.
1840 new_node = self.new_node
1841 node = new_node.name
1843 # check connectivity
1844 result = self.rpc.call_version([node])[node]
1846 if constants.PROTOCOL_VERSION == result:
1847 logging.info("Communication to node %s fine, sw version %s match",
1850 raise errors.OpExecError("Version mismatch master version %s,"
1851 " node version %s" %
1852 (constants.PROTOCOL_VERSION, result))
1854 raise errors.OpExecError("Cannot get version from the new node")
1857 logging.info("Copy ssh key to node %s", node)
1858 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1860 keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1861 constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1867 keyarray.append(f.read())
1871 result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1873 keyarray[3], keyarray[4], keyarray[5])
1876 raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1878 # Add node to our /etc/hosts, and add key to known_hosts
1879 utils.AddHostToEtcHosts(new_node.name)
1881 if new_node.secondary_ip != new_node.primary_ip:
1882 if not self.rpc.call_node_has_ip_address(new_node.name,
1883 new_node.secondary_ip):
1884 raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1885 " you gave (%s). Please fix and re-run this"
1886 " command." % new_node.secondary_ip)
1888 node_verify_list = [self.cfg.GetMasterNode()]
1889 node_verify_param = {
1891 # TODO: do a node-net-test as well?
1894 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1895 self.cfg.GetClusterName())
1896 for verifier in node_verify_list:
1897 if not result[verifier]:
1898 raise errors.OpExecError("Cannot communicate with %s's node daemon"
1899 " for remote verification" % verifier)
1900 if result[verifier]['nodelist']:
1901 for failed in result[verifier]['nodelist']:
1902 feedback_fn("ssh/hostname verification failed %s -> %s" %
1903 (verifier, result[verifier]['nodelist'][failed]))
1904 raise errors.OpExecError("ssh/hostname verification failed.")
1906 # Distribute updated /etc/hosts and known_hosts to all nodes,
1907 # including the node just added
1908 myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1909 dist_nodes = self.cfg.GetNodeList()
1910 if not self.op.readd:
1911 dist_nodes.append(node)
1912 if myself.name in dist_nodes:
1913 dist_nodes.remove(myself.name)
1915 logging.debug("Copying hosts and known_hosts to all nodes")
1916 for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1917 result = self.rpc.call_upload_file(dist_nodes, fname)
1918 for to_node in dist_nodes:
1919 if not result[to_node]:
1920 logging.error("Copy of file %s to node %s failed", fname, to_node)
1923 if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1924 to_copy.append(constants.VNC_PASSWORD_FILE)
1925 for fname in to_copy:
1926 result = self.rpc.call_upload_file([node], fname)
1927 if not result[node]:
1928 logging.error("Could not copy file %s to node %s", fname, node)
1931 self.context.ReaddNode(new_node)
1933 self.context.AddNode(new_node)
1936 class LUQueryClusterInfo(NoHooksLU):
1937 """Query cluster configuration.
1944 def ExpandNames(self):
1945 self.needed_locks = {}
1947 def CheckPrereq(self):
1948 """No prerequsites needed for this LU.
1953 def Exec(self, feedback_fn):
1954 """Return cluster config.
1957 cluster = self.cfg.GetClusterInfo()
1959 "software_version": constants.RELEASE_VERSION,
1960 "protocol_version": constants.PROTOCOL_VERSION,
1961 "config_version": constants.CONFIG_VERSION,
1962 "os_api_version": constants.OS_API_VERSION,
1963 "export_version": constants.EXPORT_VERSION,
1964 "architecture": (platform.architecture()[0], platform.machine()),
1965 "name": cluster.cluster_name,
1966 "master": cluster.master_node,
1967 "default_hypervisor": cluster.default_hypervisor,
1968 "enabled_hypervisors": cluster.enabled_hypervisors,
1969 "hvparams": cluster.hvparams,
1970 "beparams": cluster.beparams,
1976 class LUQueryConfigValues(NoHooksLU):
1977 """Return configuration values.
1982 _FIELDS_DYNAMIC = _FieldSet()
1983 _FIELDS_STATIC = _FieldSet("cluster_name", "master_node", "drain_flag")
1985 def ExpandNames(self):
1986 self.needed_locks = {}
1988 _CheckOutputFields(static=self._FIELDS_STATIC,
1989 dynamic=self._FIELDS_DYNAMIC,
1990 selected=self.op.output_fields)
1992 def CheckPrereq(self):
1993 """No prerequisites.
1998 def Exec(self, feedback_fn):
1999 """Dump a representation of the cluster config to the standard output.
2003 for field in self.op.output_fields:
2004 if field == "cluster_name":
2005 entry = self.cfg.GetClusterName()
2006 elif field == "master_node":
2007 entry = self.cfg.GetMasterNode()
2008 elif field == "drain_flag":
2009 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2011 raise errors.ParameterError(field)
2012 values.append(entry)
2016 class LUActivateInstanceDisks(NoHooksLU):
2017 """Bring up an instance's disks.
2020 _OP_REQP = ["instance_name"]
2023 def ExpandNames(self):
2024 self._ExpandAndLockInstance()
2025 self.needed_locks[locking.LEVEL_NODE] = []
2026 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2028 def DeclareLocks(self, level):
2029 if level == locking.LEVEL_NODE:
2030 self._LockInstancesNodes()
2032 def CheckPrereq(self):
2033 """Check prerequisites.
2035 This checks that the instance is in the cluster.
2038 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2039 assert self.instance is not None, \
2040 "Cannot retrieve locked instance %s" % self.op.instance_name
2042 def Exec(self, feedback_fn):
2043 """Activate the disks.
2046 disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2048 raise errors.OpExecError("Cannot activate block devices")
2053 def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2054 """Prepare the block devices for an instance.
2056 This sets up the block devices on all nodes.
2059 instance: a ganeti.objects.Instance object
2060 ignore_secondaries: if true, errors on secondary nodes won't result
2061 in an error return from the function
2064 false if the operation failed
2065 list of (host, instance_visible_name, node_visible_name) if the operation
2066 suceeded with the mapping from node devices to instance devices
2070 iname = instance.name
2071 # With the two passes mechanism we try to reduce the window of
2072 # opportunity for the race condition of switching DRBD to primary
2073 # before handshaking occured, but we do not eliminate it
2075 # The proper fix would be to wait (with some limits) until the
2076 # connection has been made and drbd transitions from WFConnection
2077 # into any other network-connected state (Connected, SyncTarget,
2080 # 1st pass, assemble on all nodes in secondary mode
2081 for inst_disk in instance.disks:
2082 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2083 lu.cfg.SetDiskID(node_disk, node)
2084 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2086 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2087 " (is_primary=False, pass=1)",
2088 inst_disk.iv_name, node)
2089 if not ignore_secondaries:
2092 # FIXME: race condition on drbd migration to primary
2094 # 2nd pass, do only the primary node
2095 for inst_disk in instance.disks:
2096 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2097 if node != instance.primary_node:
2099 lu.cfg.SetDiskID(node_disk, node)
2100 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2102 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2103 " (is_primary=True, pass=2)",
2104 inst_disk.iv_name, node)
2106 device_info.append((instance.primary_node, inst_disk.iv_name, result))
2108 # leave the disks configured for the primary node
2109 # this is a workaround that would be fixed better by
2110 # improving the logical/physical id handling
2111 for disk in instance.disks:
2112 lu.cfg.SetDiskID(disk, instance.primary_node)
2114 return disks_ok, device_info
2117 def _StartInstanceDisks(lu, instance, force):
2118 """Start the disks of an instance.
2121 disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2122 ignore_secondaries=force)
2124 _ShutdownInstanceDisks(lu, instance)
2125 if force is not None and not force:
2126 lu.proc.LogWarning("", hint="If the message above refers to a"
2128 " you can retry the operation using '--force'.")
2129 raise errors.OpExecError("Disk consistency error")
2132 class LUDeactivateInstanceDisks(NoHooksLU):
2133 """Shutdown an instance's disks.
2136 _OP_REQP = ["instance_name"]
2139 def ExpandNames(self):
2140 self._ExpandAndLockInstance()
2141 self.needed_locks[locking.LEVEL_NODE] = []
2142 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2144 def DeclareLocks(self, level):
2145 if level == locking.LEVEL_NODE:
2146 self._LockInstancesNodes()
2148 def CheckPrereq(self):
2149 """Check prerequisites.
2151 This checks that the instance is in the cluster.
2154 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2155 assert self.instance is not None, \
2156 "Cannot retrieve locked instance %s" % self.op.instance_name
2158 def Exec(self, feedback_fn):
2159 """Deactivate the disks
2162 instance = self.instance
2163 _SafeShutdownInstanceDisks(self, instance)
2166 def _SafeShutdownInstanceDisks(lu, instance):
2167 """Shutdown block devices of an instance.
2169 This function checks if an instance is running, before calling
2170 _ShutdownInstanceDisks.
2173 ins_l = lu.rpc.call_instance_list([instance.primary_node],
2174 [instance.hypervisor])
2175 ins_l = ins_l[instance.primary_node]
2176 if not type(ins_l) is list:
2177 raise errors.OpExecError("Can't contact node '%s'" %
2178 instance.primary_node)
2180 if instance.name in ins_l:
2181 raise errors.OpExecError("Instance is running, can't shutdown"
2184 _ShutdownInstanceDisks(lu, instance)
2187 def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2188 """Shutdown block devices of an instance.
2190 This does the shutdown on all nodes of the instance.
2192 If the ignore_primary is false, errors on the primary node are
2197 for disk in instance.disks:
2198 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2199 lu.cfg.SetDiskID(top_disk, node)
2200 if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2201 logging.error("Could not shutdown block device %s on node %s",
2203 if not ignore_primary or node != instance.primary_node:
2208 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2209 """Checks if a node has enough free memory.
2211 This function check if a given node has the needed amount of free
2212 memory. In case the node has less memory or we cannot get the
2213 information from the node, this function raise an OpPrereqError
2216 @type lu: C{LogicalUnit}
2217 @param lu: a logical unit from which we get configuration data
2219 @param node: the node to check
2220 @type reason: C{str}
2221 @param reason: string to use in the error message
2222 @type requested: C{int}
2223 @param requested: the amount of memory in MiB to check for
2224 @type hypervisor: C{str}
2225 @param hypervisor: the hypervisor to ask for memory stats
2226 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2227 we cannot check the node
2230 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2231 if not nodeinfo or not isinstance(nodeinfo, dict):
2232 raise errors.OpPrereqError("Could not contact node %s for resource"
2233 " information" % (node,))
2235 free_mem = nodeinfo[node].get('memory_free')
2236 if not isinstance(free_mem, int):
2237 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2238 " was '%s'" % (node, free_mem))
2239 if requested > free_mem:
2240 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2241 " needed %s MiB, available %s MiB" %
2242 (node, reason, requested, free_mem))
2245 class LUStartupInstance(LogicalUnit):
2246 """Starts an instance.
2249 HPATH = "instance-start"
2250 HTYPE = constants.HTYPE_INSTANCE
2251 _OP_REQP = ["instance_name", "force"]
2254 def ExpandNames(self):
2255 self._ExpandAndLockInstance()
2256 self.needed_locks[locking.LEVEL_NODE] = []
2257 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2259 def DeclareLocks(self, level):
2260 if level == locking.LEVEL_NODE:
2261 self._LockInstancesNodes()
2263 def BuildHooksEnv(self):
2266 This runs on master, primary and secondary nodes of the instance.
2270 "FORCE": self.op.force,
2272 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2273 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2274 list(self.instance.secondary_nodes))
2277 def CheckPrereq(self):
2278 """Check prerequisites.
2280 This checks that the instance is in the cluster.
2283 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2284 assert self.instance is not None, \
2285 "Cannot retrieve locked instance %s" % self.op.instance_name
2287 bep = self.cfg.GetClusterInfo().FillBE(instance)
2288 # check bridges existance
2289 _CheckInstanceBridgesExist(self, instance)
2291 _CheckNodeFreeMemory(self, instance.primary_node,
2292 "starting instance %s" % instance.name,
2293 bep[constants.BE_MEMORY], instance.hypervisor)
2295 def Exec(self, feedback_fn):
2296 """Start the instance.
2299 instance = self.instance
2300 force = self.op.force
2301 extra_args = getattr(self.op, "extra_args", "")
2303 self.cfg.MarkInstanceUp(instance.name)
2305 node_current = instance.primary_node
2307 _StartInstanceDisks(self, instance, force)
2309 if not self.rpc.call_instance_start(node_current, instance, extra_args):
2310 _ShutdownInstanceDisks(self, instance)
2311 raise errors.OpExecError("Could not start instance")
2314 class LURebootInstance(LogicalUnit):
2315 """Reboot an instance.
2318 HPATH = "instance-reboot"
2319 HTYPE = constants.HTYPE_INSTANCE
2320 _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2323 def ExpandNames(self):
2324 if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2325 constants.INSTANCE_REBOOT_HARD,
2326 constants.INSTANCE_REBOOT_FULL]:
2327 raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2328 (constants.INSTANCE_REBOOT_SOFT,
2329 constants.INSTANCE_REBOOT_HARD,
2330 constants.INSTANCE_REBOOT_FULL))
2331 self._ExpandAndLockInstance()
2332 self.needed_locks[locking.LEVEL_NODE] = []
2333 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2335 def DeclareLocks(self, level):
2336 if level == locking.LEVEL_NODE:
2337 primary_only = not constants.INSTANCE_REBOOT_FULL
2338 self._LockInstancesNodes(primary_only=primary_only)
2340 def BuildHooksEnv(self):
2343 This runs on master, primary and secondary nodes of the instance.
2347 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2349 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2350 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2351 list(self.instance.secondary_nodes))
2354 def CheckPrereq(self):
2355 """Check prerequisites.
2357 This checks that the instance is in the cluster.
2360 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2361 assert self.instance is not None, \
2362 "Cannot retrieve locked instance %s" % self.op.instance_name
2364 # check bridges existance
2365 _CheckInstanceBridgesExist(self, instance)
2367 def Exec(self, feedback_fn):
2368 """Reboot the instance.
2371 instance = self.instance
2372 ignore_secondaries = self.op.ignore_secondaries
2373 reboot_type = self.op.reboot_type
2374 extra_args = getattr(self.op, "extra_args", "")
2376 node_current = instance.primary_node
2378 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2379 constants.INSTANCE_REBOOT_HARD]:
2380 if not self.rpc.call_instance_reboot(node_current, instance,
2381 reboot_type, extra_args):
2382 raise errors.OpExecError("Could not reboot instance")
2384 if not self.rpc.call_instance_shutdown(node_current, instance):
2385 raise errors.OpExecError("could not shutdown instance for full reboot")
2386 _ShutdownInstanceDisks(self, instance)
2387 _StartInstanceDisks(self, instance, ignore_secondaries)
2388 if not self.rpc.call_instance_start(node_current, instance, extra_args):
2389 _ShutdownInstanceDisks(self, instance)
2390 raise errors.OpExecError("Could not start instance for full reboot")
2392 self.cfg.MarkInstanceUp(instance.name)
2395 class LUShutdownInstance(LogicalUnit):
2396 """Shutdown an instance.
2399 HPATH = "instance-stop"
2400 HTYPE = constants.HTYPE_INSTANCE
2401 _OP_REQP = ["instance_name"]
2404 def ExpandNames(self):
2405 self._ExpandAndLockInstance()
2406 self.needed_locks[locking.LEVEL_NODE] = []
2407 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2409 def DeclareLocks(self, level):
2410 if level == locking.LEVEL_NODE:
2411 self._LockInstancesNodes()
2413 def BuildHooksEnv(self):
2416 This runs on master, primary and secondary nodes of the instance.
2419 env = _BuildInstanceHookEnvByObject(self, self.instance)
2420 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2421 list(self.instance.secondary_nodes))
2424 def CheckPrereq(self):
2425 """Check prerequisites.
2427 This checks that the instance is in the cluster.
2430 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2431 assert self.instance is not None, \
2432 "Cannot retrieve locked instance %s" % self.op.instance_name
2434 def Exec(self, feedback_fn):
2435 """Shutdown the instance.
2438 instance = self.instance
2439 node_current = instance.primary_node
2440 self.cfg.MarkInstanceDown(instance.name)
2441 if not self.rpc.call_instance_shutdown(node_current, instance):
2442 self.proc.LogWarning("Could not shutdown instance")
2444 _ShutdownInstanceDisks(self, instance)
2447 class LUReinstallInstance(LogicalUnit):
2448 """Reinstall an instance.
2451 HPATH = "instance-reinstall"
2452 HTYPE = constants.HTYPE_INSTANCE
2453 _OP_REQP = ["instance_name"]
2456 def ExpandNames(self):
2457 self._ExpandAndLockInstance()
2458 self.needed_locks[locking.LEVEL_NODE] = []
2459 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2461 def DeclareLocks(self, level):
2462 if level == locking.LEVEL_NODE:
2463 self._LockInstancesNodes()
2465 def BuildHooksEnv(self):
2468 This runs on master, primary and secondary nodes of the instance.
2471 env = _BuildInstanceHookEnvByObject(self, self.instance)
2472 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2473 list(self.instance.secondary_nodes))
2476 def CheckPrereq(self):
2477 """Check prerequisites.
2479 This checks that the instance is in the cluster and is not running.
2482 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2483 assert instance is not None, \
2484 "Cannot retrieve locked instance %s" % self.op.instance_name
2486 if instance.disk_template == constants.DT_DISKLESS:
2487 raise errors.OpPrereqError("Instance '%s' has no disks" %
2488 self.op.instance_name)
2489 if instance.status != "down":
2490 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2491 self.op.instance_name)
2492 remote_info = self.rpc.call_instance_info(instance.primary_node,
2494 instance.hypervisor)
2496 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2497 (self.op.instance_name,
2498 instance.primary_node))
2500 self.op.os_type = getattr(self.op, "os_type", None)
2501 if self.op.os_type is not None:
2503 pnode = self.cfg.GetNodeInfo(
2504 self.cfg.ExpandNodeName(instance.primary_node))
2506 raise errors.OpPrereqError("Primary node '%s' is unknown" %
2508 os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2510 raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2511 " primary node" % self.op.os_type)
2513 self.instance = instance
2515 def Exec(self, feedback_fn):
2516 """Reinstall the instance.
2519 inst = self.instance
2521 if self.op.os_type is not None:
2522 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2523 inst.os = self.op.os_type
2524 self.cfg.Update(inst)
2526 _StartInstanceDisks(self, inst, None)
2528 feedback_fn("Running the instance OS create scripts...")
2529 if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2530 raise errors.OpExecError("Could not install OS for instance %s"
2532 (inst.name, inst.primary_node))
2534 _ShutdownInstanceDisks(self, inst)
2537 class LURenameInstance(LogicalUnit):
2538 """Rename an instance.
2541 HPATH = "instance-rename"
2542 HTYPE = constants.HTYPE_INSTANCE
2543 _OP_REQP = ["instance_name", "new_name"]
2545 def BuildHooksEnv(self):
2548 This runs on master, primary and secondary nodes of the instance.
2551 env = _BuildInstanceHookEnvByObject(self, self.instance)
2552 env["INSTANCE_NEW_NAME"] = self.op.new_name
2553 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2554 list(self.instance.secondary_nodes))
2557 def CheckPrereq(self):
2558 """Check prerequisites.
2560 This checks that the instance is in the cluster and is not running.
2563 instance = self.cfg.GetInstanceInfo(
2564 self.cfg.ExpandInstanceName(self.op.instance_name))
2565 if instance is None:
2566 raise errors.OpPrereqError("Instance '%s' not known" %
2567 self.op.instance_name)
2568 if instance.status != "down":
2569 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2570 self.op.instance_name)
2571 remote_info = self.rpc.call_instance_info(instance.primary_node,
2573 instance.hypervisor)
2575 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2576 (self.op.instance_name,
2577 instance.primary_node))
2578 self.instance = instance
2580 # new name verification
2581 name_info = utils.HostInfo(self.op.new_name)
2583 self.op.new_name = new_name = name_info.name
2584 instance_list = self.cfg.GetInstanceList()
2585 if new_name in instance_list:
2586 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2589 if not getattr(self.op, "ignore_ip", False):
2590 if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2591 raise errors.OpPrereqError("IP %s of instance %s already in use" %
2592 (name_info.ip, new_name))
2595 def Exec(self, feedback_fn):
2596 """Reinstall the instance.
2599 inst = self.instance
2600 old_name = inst.name
2602 if inst.disk_template == constants.DT_FILE:
2603 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2605 self.cfg.RenameInstance(inst.name, self.op.new_name)
2606 # Change the instance lock. This is definitely safe while we hold the BGL
2607 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2608 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2610 # re-read the instance from the configuration after rename
2611 inst = self.cfg.GetInstanceInfo(self.op.new_name)
2613 if inst.disk_template == constants.DT_FILE:
2614 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2615 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2616 old_file_storage_dir,
2617 new_file_storage_dir)
2620 raise errors.OpExecError("Could not connect to node '%s' to rename"
2621 " directory '%s' to '%s' (but the instance"
2622 " has been renamed in Ganeti)" % (
2623 inst.primary_node, old_file_storage_dir,
2624 new_file_storage_dir))
2627 raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2628 " (but the instance has been renamed in"
2629 " Ganeti)" % (old_file_storage_dir,
2630 new_file_storage_dir))
2632 _StartInstanceDisks(self, inst, None)
2634 if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2636 msg = ("Could not run OS rename script for instance %s on node %s"
2637 " (but the instance has been renamed in Ganeti)" %
2638 (inst.name, inst.primary_node))
2639 self.proc.LogWarning(msg)
2641 _ShutdownInstanceDisks(self, inst)
2644 class LURemoveInstance(LogicalUnit):
2645 """Remove an instance.
2648 HPATH = "instance-remove"
2649 HTYPE = constants.HTYPE_INSTANCE
2650 _OP_REQP = ["instance_name", "ignore_failures"]
2653 def ExpandNames(self):
2654 self._ExpandAndLockInstance()
2655 self.needed_locks[locking.LEVEL_NODE] = []
2656 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2658 def DeclareLocks(self, level):
2659 if level == locking.LEVEL_NODE:
2660 self._LockInstancesNodes()
2662 def BuildHooksEnv(self):
2665 This runs on master, primary and secondary nodes of the instance.
2668 env = _BuildInstanceHookEnvByObject(self, self.instance)
2669 nl = [self.cfg.GetMasterNode()]
2672 def CheckPrereq(self):
2673 """Check prerequisites.
2675 This checks that the instance is in the cluster.
2678 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2679 assert self.instance is not None, \
2680 "Cannot retrieve locked instance %s" % self.op.instance_name
2682 def Exec(self, feedback_fn):
2683 """Remove the instance.
2686 instance = self.instance
2687 logging.info("Shutting down instance %s on node %s",
2688 instance.name, instance.primary_node)
2690 if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2691 if self.op.ignore_failures:
2692 feedback_fn("Warning: can't shutdown instance")
2694 raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2695 (instance.name, instance.primary_node))
2697 logging.info("Removing block devices for instance %s", instance.name)
2699 if not _RemoveDisks(self, instance):
2700 if self.op.ignore_failures:
2701 feedback_fn("Warning: can't remove instance's disks")
2703 raise errors.OpExecError("Can't remove instance's disks")
2705 logging.info("Removing instance %s out of cluster config", instance.name)
2707 self.cfg.RemoveInstance(instance.name)
2708 self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2711 class LUQueryInstances(NoHooksLU):
2712 """Logical unit for querying instances.
2715 _OP_REQP = ["output_fields", "names"]
2717 _FIELDS_STATIC = _FieldSet(*["name", "os", "pnode", "snodes",
2718 "admin_state", "admin_ram",
2719 "disk_template", "ip", "mac", "bridge",
2720 "sda_size", "sdb_size", "vcpus", "tags",
2721 "network_port", "beparams",
2722 "(disk).(size)/([0-9]+)",
2723 "(nic).(mac|ip|bridge)/([0-9]+)",
2724 "(disk|nic).(count)",
2725 "serial_no", "hypervisor", "hvparams",] +
2727 for name in constants.HVS_PARAMETERS] +
2729 for name in constants.BES_PARAMETERS])
2730 _FIELDS_DYNAMIC = _FieldSet("oper_state", "oper_ram", "status")
2733 def ExpandNames(self):
2734 _CheckOutputFields(static=self._FIELDS_STATIC,
2735 dynamic=self._FIELDS_DYNAMIC,
2736 selected=self.op.output_fields)
2738 self.needed_locks = {}
2739 self.share_locks[locking.LEVEL_INSTANCE] = 1
2740 self.share_locks[locking.LEVEL_NODE] = 1
2743 self.wanted = _GetWantedInstances(self, self.op.names)
2745 self.wanted = locking.ALL_SET
2747 self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2749 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2750 self.needed_locks[locking.LEVEL_NODE] = []
2751 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2753 def DeclareLocks(self, level):
2754 if level == locking.LEVEL_NODE and self.do_locking:
2755 self._LockInstancesNodes()
2757 def CheckPrereq(self):
2758 """Check prerequisites.
2763 def Exec(self, feedback_fn):
2764 """Computes the list of nodes and their attributes.
2767 all_info = self.cfg.GetAllInstancesInfo()
2769 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2770 elif self.wanted != locking.ALL_SET:
2771 instance_names = self.wanted
2772 missing = set(instance_names).difference(all_info.keys())
2774 raise errors.OpExecError(
2775 "Some instances were removed before retrieving their data: %s"
2778 instance_names = all_info.keys()
2780 instance_names = utils.NiceSort(instance_names)
2781 instance_list = [all_info[iname] for iname in instance_names]
2783 # begin data gathering
2785 nodes = frozenset([inst.primary_node for inst in instance_list])
2786 hv_list = list(set([inst.hypervisor for inst in instance_list]))
2791 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2793 result = node_data[name]
2795 live_data.update(result)
2796 elif result == False:
2797 bad_nodes.append(name)
2798 # else no instance is alive
2800 live_data = dict([(name, {}) for name in instance_names])
2802 # end data gathering
2807 for instance in instance_list:
2809 i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2810 i_be = self.cfg.GetClusterInfo().FillBE(instance)
2811 for field in self.op.output_fields:
2812 st_match = self._FIELDS_STATIC.Matches(field)
2817 elif field == "pnode":
2818 val = instance.primary_node
2819 elif field == "snodes":
2820 val = list(instance.secondary_nodes)
2821 elif field == "admin_state":
2822 val = (instance.status != "down")
2823 elif field == "oper_state":
2824 if instance.primary_node in bad_nodes:
2827 val = bool(live_data.get(instance.name))
2828 elif field == "status":
2829 if instance.primary_node in bad_nodes:
2830 val = "ERROR_nodedown"
2832 running = bool(live_data.get(instance.name))
2834 if instance.status != "down":
2839 if instance.status != "down":
2843 elif field == "oper_ram":
2844 if instance.primary_node in bad_nodes:
2846 elif instance.name in live_data:
2847 val = live_data[instance.name].get("memory", "?")
2850 elif field == "disk_template":
2851 val = instance.disk_template
2853 val = instance.nics[0].ip
2854 elif field == "bridge":
2855 val = instance.nics[0].bridge
2856 elif field == "mac":
2857 val = instance.nics[0].mac
2858 elif field == "sda_size" or field == "sdb_size":
2859 disk = instance.FindDisk(field[:3])
2864 elif field == "tags":
2865 val = list(instance.GetTags())
2866 elif field == "serial_no":
2867 val = instance.serial_no
2868 elif field == "network_port":
2869 val = instance.network_port
2870 elif field == "hypervisor":
2871 val = instance.hypervisor
2872 elif field == "hvparams":
2874 elif (field.startswith(HVPREFIX) and
2875 field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2876 val = i_hv.get(field[len(HVPREFIX):], None)
2877 elif field == "beparams":
2879 elif (field.startswith(BEPREFIX) and
2880 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2881 val = i_be.get(field[len(BEPREFIX):], None)
2882 elif st_match and st_match.groups():
2883 # matches a variable list
2884 st_groups = st_match.groups()
2885 if st_groups and st_groups[0] == "disk":
2886 if st_groups[1] == "count":
2887 val = len(instance.disks)
2888 elif st_groups[1] == "size":
2889 disk_idx = int(st_groups[2])
2890 if disk_idx >= len(instance.disks):
2893 val = instance.disks[disk_idx].size
2895 assert False, "Unhandled disk parameter"
2896 elif st_groups[0] == "nic":
2897 if st_groups[1] == "count":
2898 val = len(instance.nics)
2901 nic_idx = int(st_groups[2])
2902 if nic_idx >= len(instance.nics):
2905 if st_groups[1] == "mac":
2906 val = instance.nics[nic_idx].mac
2907 elif st_groups[1] == "ip":
2908 val = instance.nics[nic_idx].ip
2909 elif st_groups[1] == "bridge":
2910 val = instance.nics[nic_idx].bridge
2912 assert False, "Unhandled NIC parameter"
2914 assert False, "Unhandled variable parameter"
2916 raise errors.ParameterError(field)
2923 class LUFailoverInstance(LogicalUnit):
2924 """Failover an instance.
2927 HPATH = "instance-failover"
2928 HTYPE = constants.HTYPE_INSTANCE
2929 _OP_REQP = ["instance_name", "ignore_consistency"]
2932 def ExpandNames(self):
2933 self._ExpandAndLockInstance()
2934 self.needed_locks[locking.LEVEL_NODE] = []
2935 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2937 def DeclareLocks(self, level):
2938 if level == locking.LEVEL_NODE:
2939 self._LockInstancesNodes()
2941 def BuildHooksEnv(self):
2944 This runs on master, primary and secondary nodes of the instance.
2948 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2950 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2951 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2954 def CheckPrereq(self):
2955 """Check prerequisites.
2957 This checks that the instance is in the cluster.
2960 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2961 assert self.instance is not None, \
2962 "Cannot retrieve locked instance %s" % self.op.instance_name
2964 bep = self.cfg.GetClusterInfo().FillBE(instance)
2965 if instance.disk_template not in constants.DTS_NET_MIRROR:
2966 raise errors.OpPrereqError("Instance's disk layout is not"
2967 " network mirrored, cannot failover.")
2969 secondary_nodes = instance.secondary_nodes
2970 if not secondary_nodes:
2971 raise errors.ProgrammerError("no secondary node but using "
2972 "a mirrored disk template")
2974 target_node = secondary_nodes[0]
2975 # check memory requirements on the secondary node
2976 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2977 instance.name, bep[constants.BE_MEMORY],
2978 instance.hypervisor)
2980 # check bridge existance
2981 brlist = [nic.bridge for nic in instance.nics]
2982 if not self.rpc.call_bridges_exist(target_node, brlist):
2983 raise errors.OpPrereqError("One or more target bridges %s does not"
2984 " exist on destination node '%s'" %
2985 (brlist, target_node))
2987 def Exec(self, feedback_fn):
2988 """Failover an instance.
2990 The failover is done by shutting it down on its present node and
2991 starting it on the secondary.
2994 instance = self.instance
2996 source_node = instance.primary_node
2997 target_node = instance.secondary_nodes[0]
2999 feedback_fn("* checking disk consistency between source and target")
3000 for dev in instance.disks:
3001 # for drbd, these are drbd over lvm
3002 if not _CheckDiskConsistency(self, dev, target_node, False):
3003 if instance.status == "up" and not self.op.ignore_consistency:
3004 raise errors.OpExecError("Disk %s is degraded on target node,"
3005 " aborting failover." % dev.iv_name)
3007 feedback_fn("* shutting down instance on source node")
3008 logging.info("Shutting down instance %s on node %s",
3009 instance.name, source_node)
3011 if not self.rpc.call_instance_shutdown(source_node, instance):
3012 if self.op.ignore_consistency:
3013 self.proc.LogWarning("Could not shutdown instance %s on node %s."
3015 " anyway. Please make sure node %s is down",
3016 instance.name, source_node, source_node)
3018 raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3019 (instance.name, source_node))
3021 feedback_fn("* deactivating the instance's disks on source node")
3022 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3023 raise errors.OpExecError("Can't shut down the instance's disks.")
3025 instance.primary_node = target_node
3026 # distribute new instance config to the other nodes
3027 self.cfg.Update(instance)
3029 # Only start the instance if it's marked as up
3030 if instance.status == "up":
3031 feedback_fn("* activating the instance's disks on target node")
3032 logging.info("Starting instance %s on node %s",
3033 instance.name, target_node)
3035 disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3036 ignore_secondaries=True)
3038 _ShutdownInstanceDisks(self, instance)
3039 raise errors.OpExecError("Can't activate the instance's disks")
3041 feedback_fn("* starting the instance on the target node")
3042 if not self.rpc.call_instance_start(target_node, instance, None):
3043 _ShutdownInstanceDisks(self, instance)
3044 raise errors.OpExecError("Could not start instance %s on node %s." %
3045 (instance.name, target_node))
3048 def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3049 """Create a tree of block devices on the primary node.
3051 This always creates all devices.
3055 for child in device.children:
3056 if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3059 lu.cfg.SetDiskID(device, node)
3060 new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3061 instance.name, True, info)
3064 if device.physical_id is None:
3065 device.physical_id = new_id
3069 def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3070 """Create a tree of block devices on a secondary node.
3072 If this device type has to be created on secondaries, create it and
3075 If not, just recurse to children keeping the same 'force' value.
3078 if device.CreateOnSecondary():
3081 for child in device.children:
3082 if not _CreateBlockDevOnSecondary(lu, node, instance,
3083 child, force, info):
3088 lu.cfg.SetDiskID(device, node)
3089 new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3090 instance.name, False, info)
3093 if device.physical_id is None:
3094 device.physical_id = new_id
3098 def _GenerateUniqueNames(lu, exts):
3099 """Generate a suitable LV name.
3101 This will generate a logical volume name for the given instance.
3106 new_id = lu.cfg.GenerateUniqueID()
3107 results.append("%s%s" % (new_id, val))
3111 def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3113 """Generate a drbd8 device complete with its children.
3116 port = lu.cfg.AllocatePort()
3117 vgname = lu.cfg.GetVGName()
3118 shared_secret = lu.cfg.GenerateDRBDSecret()
3119 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3120 logical_id=(vgname, names[0]))
3121 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3122 logical_id=(vgname, names[1]))
3123 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3124 logical_id=(primary, secondary, port,
3127 children=[dev_data, dev_meta],
3132 def _GenerateDiskTemplate(lu, template_name,
3133 instance_name, primary_node,
3134 secondary_nodes, disk_sz, swap_sz,
3135 file_storage_dir, file_driver):
3136 """Generate the entire disk layout for a given template type.
3139 #TODO: compute space requirements
3141 vgname = lu.cfg.GetVGName()
3142 if template_name == constants.DT_DISKLESS:
3144 elif template_name == constants.DT_PLAIN:
3145 if len(secondary_nodes) != 0:
3146 raise errors.ProgrammerError("Wrong template configuration")
3148 names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
3149 sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3150 logical_id=(vgname, names[0]),
3152 sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3153 logical_id=(vgname, names[1]),
3155 disks = [sda_dev, sdb_dev]
3156 elif template_name == constants.DT_DRBD8:
3157 if len(secondary_nodes) != 1:
3158 raise errors.ProgrammerError("Wrong template configuration")
3159 remote_node = secondary_nodes[0]
3160 (minor_pa, minor_pb,
3161 minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3162 [primary_node, primary_node, remote_node, remote_node], instance_name)
3164 names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3165 ".sdb_data", ".sdb_meta"])
3166 drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3167 disk_sz, names[0:2], "sda",
3169 drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3170 swap_sz, names[2:4], "sdb",
3172 disks = [drbd_sda_dev, drbd_sdb_dev]
3173 elif template_name == constants.DT_FILE:
3174 if len(secondary_nodes) != 0:
3175 raise errors.ProgrammerError("Wrong template configuration")
3177 file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3178 iv_name="sda", logical_id=(file_driver,
3179 "%s/sda" % file_storage_dir))
3180 file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3181 iv_name="sdb", logical_id=(file_driver,
3182 "%s/sdb" % file_storage_dir))
3183 disks = [file_sda_dev, file_sdb_dev]
3185 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3189 def _GetInstanceInfoText(instance):
3190 """Compute that text that should be added to the disk's metadata.
3193 return "originstname+%s" % instance.name
3196 def _CreateDisks(lu, instance):
3197 """Create all disks for an instance.
3199 This abstracts away some work from AddInstance.
3202 instance: the instance object
3205 True or False showing the success of the creation process
3208 info = _GetInstanceInfoText(instance)
3210 if instance.disk_template == constants.DT_FILE:
3211 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3212 result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3216 logging.error("Could not connect to node '%s'", instance.primary_node)
3220 logging.error("Failed to create directory '%s'", file_storage_dir)
3223 for device in instance.disks:
3224 logging.info("Creating volume %s for instance %s",
3225 device.iv_name, instance.name)
3227 for secondary_node in instance.secondary_nodes:
3228 if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3229 device, False, info):
3230 logging.error("Failed to create volume %s (%s) on secondary node %s!",
3231 device.iv_name, device, secondary_node)
3234 if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3235 instance, device, info):
3236 logging.error("Failed to create volume %s on primary!", device.iv_name)
3242 def _RemoveDisks(lu, instance):
3243 """Remove all disks for an instance.
3245 This abstracts away some work from `AddInstance()` and
3246 `RemoveInstance()`. Note that in case some of the devices couldn't
3247 be removed, the removal will continue with the other ones (compare
3248 with `_CreateDisks()`).
3251 instance: the instance object
3254 True or False showing the success of the removal proces
3257 logging.info("Removing block devices for instance %s", instance.name)
3260 for device in instance.disks:
3261 for node, disk in device.ComputeNodeTree(instance.primary_node):
3262 lu.cfg.SetDiskID(disk, node)
3263 if not lu.rpc.call_blockdev_remove(node, disk):
3264 lu.proc.LogWarning("Could not remove block device %s on node %s,"
3265 " continuing anyway", device.iv_name, node)
3268 if instance.disk_template == constants.DT_FILE:
3269 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3270 if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3272 logging.error("Could not remove directory '%s'", file_storage_dir)
3278 def _ComputeDiskSize(disk_template, disk_size, swap_size):
3279 """Compute disk size requirements in the volume group
3281 This is currently hard-coded for the two-drive layout.
3284 # Required free disk space as a function of disk and swap space
3286 constants.DT_DISKLESS: None,
3287 constants.DT_PLAIN: disk_size + swap_size,
3288 # 256 MB are added for drbd metadata, 128MB for each drbd device
3289 constants.DT_DRBD8: disk_size + swap_size + 256,
3290 constants.DT_FILE: None,
3293 if disk_template not in req_size_dict:
3294 raise errors.ProgrammerError("Disk template '%s' size requirement"
3295 " is unknown" % disk_template)
3297 return req_size_dict[disk_template]
3300 def _CheckHVParams(lu, nodenames, hvname, hvparams):
3301 """Hypervisor parameter validation.
3303 This function abstract the hypervisor parameter validation to be
3304 used in both instance create and instance modify.
3306 @type lu: L{LogicalUnit}
3307 @param lu: the logical unit for which we check
3308 @type nodenames: list
3309 @param nodenames: the list of nodes on which we should check
3310 @type hvname: string
3311 @param hvname: the name of the hypervisor we should use
3312 @type hvparams: dict
3313 @param hvparams: the parameters which we need to check
3314 @raise errors.OpPrereqError: if the parameters are not valid
3317 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3320 for node in nodenames:
3321 info = hvinfo.get(node, None)
3322 if not info or not isinstance(info, (tuple, list)):
3323 raise errors.OpPrereqError("Cannot get current information"
3324 " from node '%s' (%s)" % (node, info))
3326 raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3330 class LUCreateInstance(LogicalUnit):
3331 """Create an instance.
3334 HPATH = "instance-add"
3335 HTYPE = constants.HTYPE_INSTANCE
3336 _OP_REQP = ["instance_name", "disk_size",
3337 "disk_template", "swap_size", "mode", "start",
3338 "wait_for_sync", "ip_check", "mac",
3339 "hvparams", "beparams"]
3342 def _ExpandNode(self, node):
3343 """Expands and checks one node name.
3346 node_full = self.cfg.ExpandNodeName(node)
3347 if node_full is None:
3348 raise errors.OpPrereqError("Unknown node %s" % node)
3351 def ExpandNames(self):
3352 """ExpandNames for CreateInstance.
3354 Figure out the right locks for instance creation.
3357 self.needed_locks = {}
3359 # set optional parameters to none if they don't exist
3360 for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3361 if not hasattr(self.op, attr):
3362 setattr(self.op, attr, None)
3364 # cheap checks, mostly valid constants given
3366 # verify creation mode
3367 if self.op.mode not in (constants.INSTANCE_CREATE,
3368 constants.INSTANCE_IMPORT):
3369 raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3372 # disk template and mirror node verification
3373 if self.op.disk_template not in constants.DISK_TEMPLATES:
3374 raise errors.OpPrereqError("Invalid disk template name")
3376 if self.op.hypervisor is None:
3377 self.op.hypervisor = self.cfg.GetHypervisorType()
3379 cluster = self.cfg.GetClusterInfo()
3380 enabled_hvs = cluster.enabled_hypervisors
3381 if self.op.hypervisor not in enabled_hvs:
3382 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3383 " cluster (%s)" % (self.op.hypervisor,
3384 ",".join(enabled_hvs)))
3386 # check hypervisor parameter syntax (locally)
3388 filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3390 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3391 hv_type.CheckParameterSyntax(filled_hvp)
3393 # fill and remember the beparams dict
3394 self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3397 #### instance parameters check
3399 # instance name verification
3400 hostname1 = utils.HostInfo(self.op.instance_name)
3401 self.op.instance_name = instance_name = hostname1.name
3403 # this is just a preventive check, but someone might still add this
3404 # instance in the meantime, and creation will fail at lock-add time
3405 if instance_name in self.cfg.GetInstanceList():
3406 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3409 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3411 # ip validity checks
3412 ip = getattr(self.op, "ip", None)
3413 if ip is None or ip.lower() == "none":
3415 elif ip.lower() == constants.VALUE_AUTO:
3416 inst_ip = hostname1.ip
3418 if not utils.IsValidIP(ip):
3419 raise errors.OpPrereqError("given IP address '%s' doesn't look"
3420 " like a valid IP" % ip)
3422 self.inst_ip = self.op.ip = inst_ip
3423 # used in CheckPrereq for ip ping check
3424 self.check_ip = hostname1.ip
3426 # MAC address verification
3427 if self.op.mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3428 if not utils.IsValidMac(self.op.mac.lower()):
3429 raise errors.OpPrereqError("invalid MAC address specified: %s" %
3432 # file storage checks
3433 if (self.op.file_driver and
3434 not self.op.file_driver in constants.FILE_DRIVER):
3435 raise errors.OpPrereqError("Invalid file driver name '%s'" %
3436 self.op.file_driver)
3438 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3439 raise errors.OpPrereqError("File storage directory path not absolute")
3441 ### Node/iallocator related checks
3442 if [self.op.iallocator, self.op.pnode].count(None) != 1:
3443 raise errors.OpPrereqError("One and only one of iallocator and primary"
3444 " node must be given")
3446 if self.op.iallocator:
3447 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3449 self.op.pnode = self._ExpandNode(self.op.pnode)
3450 nodelist = [self.op.pnode]
3451 if self.op.snode is not None:
3452 self.op.snode = self._ExpandNode(self.op.snode)
3453 nodelist.append(self.op.snode)
3454 self.needed_locks[locking.LEVEL_NODE] = nodelist
3456 # in case of import lock the source node too
3457 if self.op.mode == constants.INSTANCE_IMPORT:
3458 src_node = getattr(self.op, "src_node", None)
3459 src_path = getattr(self.op, "src_path", None)
3461 if src_node is None or src_path is None:
3462 raise errors.OpPrereqError("Importing an instance requires source"
3463 " node and path options")
3465 if not os.path.isabs(src_path):
3466 raise errors.OpPrereqError("The source path must be absolute")
3468 self.op.src_node = src_node = self._ExpandNode(src_node)
3469 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3470 self.needed_locks[locking.LEVEL_NODE].append(src_node)
3472 else: # INSTANCE_CREATE
3473 if getattr(self.op, "os_type", None) is None:
3474 raise errors.OpPrereqError("No guest OS specified")
3476 def _RunAllocator(self):
3477 """Run the allocator based on input opcode.
3480 disks = [{"size": self.op.disk_size, "mode": "w"},
3481 {"size": self.op.swap_size, "mode": "w"}]
3482 nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3483 "bridge": self.op.bridge}]
3484 ial = IAllocator(self,
3485 mode=constants.IALLOCATOR_MODE_ALLOC,
3486 name=self.op.instance_name,
3487 disk_template=self.op.disk_template,
3490 vcpus=self.be_full[constants.BE_VCPUS],
3491 mem_size=self.be_full[constants.BE_MEMORY],
3496 ial.Run(self.op.iallocator)
3499 raise errors.OpPrereqError("Can't compute nodes using"
3500 " iallocator '%s': %s" % (self.op.iallocator,
3502 if len(ial.nodes) != ial.required_nodes:
3503 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3504 " of nodes (%s), required %s" %
3505 (self.op.iallocator, len(ial.nodes),
3506 ial.required_nodes))
3507 self.op.pnode = ial.nodes[0]
3508 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3509 self.op.instance_name, self.op.iallocator,
3510 ", ".join(ial.nodes))
3511 if ial.required_nodes == 2:
3512 self.op.snode = ial.nodes[1]
3514 def BuildHooksEnv(self):
3517 This runs on master, primary and secondary nodes of the instance.
3521 "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3522 "INSTANCE_DISK_SIZE": self.op.disk_size,
3523 "INSTANCE_SWAP_SIZE": self.op.swap_size,
3524 "INSTANCE_ADD_MODE": self.op.mode,
3526 if self.op.mode == constants.INSTANCE_IMPORT:
3527 env["INSTANCE_SRC_NODE"] = self.op.src_node
3528 env["INSTANCE_SRC_PATH"] = self.op.src_path
3529 env["INSTANCE_SRC_IMAGES"] = self.src_images
3531 env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3532 primary_node=self.op.pnode,
3533 secondary_nodes=self.secondaries,
3534 status=self.instance_status,
3535 os_type=self.op.os_type,
3536 memory=self.be_full[constants.BE_MEMORY],
3537 vcpus=self.be_full[constants.BE_VCPUS],
3538 nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3541 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3546 def CheckPrereq(self):
3547 """Check prerequisites.
3550 if (not self.cfg.GetVGName() and
3551 self.op.disk_template not in constants.DTS_NOT_LVM):
3552 raise errors.OpPrereqError("Cluster does not support lvm-based"
3556 if self.op.mode == constants.INSTANCE_IMPORT:
3557 src_node = self.op.src_node
3558 src_path = self.op.src_path
3560 export_info = self.rpc.call_export_info(src_node, src_path)
3563 raise errors.OpPrereqError("No export found in dir %s" % src_path)
3565 if not export_info.has_section(constants.INISECT_EXP):
3566 raise errors.ProgrammerError("Corrupted export config")
3568 ei_version = export_info.get(constants.INISECT_EXP, 'version')
3569 if (int(ei_version) != constants.EXPORT_VERSION):
3570 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3571 (ei_version, constants.EXPORT_VERSION))
3573 # Check that the new instance doesn't have less disks than the export
3574 # TODO: substitute "2" with the actual number of disks requested
3576 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3577 if instance_disks < export_disks:
3578 raise errors.OpPrereqError("Not enough disks to import."
3579 " (instance: %d, export: %d)" %
3582 self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3584 for idx in range(export_disks):
3585 option = 'disk%d_dump' % idx
3586 if export_info.has_option(constants.INISECT_INS, option):
3587 # FIXME: are the old os-es, disk sizes, etc. useful?
3588 export_name = export_info.get(constants.INISECT_INS, option)
3589 image = os.path.join(src_path, export_name)
3590 disk_images.append(image)
3592 disk_images.append(False)
3594 self.src_images = disk_images
3596 if self.op.mac == constants.VALUE_AUTO:
3597 old_name = export_info.get(constants.INISECT_INS, 'name')
3598 if self.op.instance_name == old_name:
3599 # FIXME: adjust every nic, when we'll be able to create instances
3600 # with more than one
3601 if int(export_info.get(constants.INISECT_INS, 'nic_count')) >= 1:
3602 self.op.mac = export_info.get(constants.INISECT_INS, 'nic_0_mac')
3604 # ip ping checks (we use the same ip that was resolved in ExpandNames)
3606 if self.op.start and not self.op.ip_check:
3607 raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3608 " adding an instance in start mode")
3610 if self.op.ip_check:
3611 if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3612 raise errors.OpPrereqError("IP %s of instance %s already in use" %
3613 (self.check_ip, self.op.instance_name))
3615 # bridge verification
3616 bridge = getattr(self.op, "bridge", None)
3618 self.op.bridge = self.cfg.GetDefBridge()
3620 self.op.bridge = bridge
3624 if self.op.iallocator is not None:
3625 self._RunAllocator()
3627 #### node related checks
3629 # check primary node
3630 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3631 assert self.pnode is not None, \
3632 "Cannot retrieve locked node %s" % self.op.pnode
3633 self.secondaries = []
3635 # mirror node verification
3636 if self.op.disk_template in constants.DTS_NET_MIRROR:
3637 if self.op.snode is None:
3638 raise errors.OpPrereqError("The networked disk templates need"
3640 if self.op.snode == pnode.name:
3641 raise errors.OpPrereqError("The secondary node cannot be"
3642 " the primary node.")
3643 self.secondaries.append(self.op.snode)
3645 nodenames = [pnode.name] + self.secondaries
3647 req_size = _ComputeDiskSize(self.op.disk_template,
3648 self.op.disk_size, self.op.swap_size)
3650 # Check lv size requirements
3651 if req_size is not None:
3652 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3654 for node in nodenames:
3655 info = nodeinfo.get(node, None)
3657 raise errors.OpPrereqError("Cannot get current information"
3658 " from node '%s'" % node)
3659 vg_free = info.get('vg_free', None)
3660 if not isinstance(vg_free, int):
3661 raise errors.OpPrereqError("Can't compute free disk space on"
3663 if req_size > info['vg_free']:
3664 raise errors.OpPrereqError("Not enough disk space on target node %s."
3665 " %d MB available, %d MB required" %
3666 (node, info['vg_free'], req_size))
3668 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3671 os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3673 raise errors.OpPrereqError("OS '%s' not in supported os list for"
3674 " primary node" % self.op.os_type)
3676 # bridge check on primary node
3677 if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3678 raise errors.OpPrereqError("target bridge '%s' does not exist on"
3679 " destination node '%s'" %
3680 (self.op.bridge, pnode.name))
3682 # memory check on primary node
3684 _CheckNodeFreeMemory(self, self.pnode.name,
3685 "creating instance %s" % self.op.instance_name,
3686 self.be_full[constants.BE_MEMORY],
3690 self.instance_status = 'up'
3692 self.instance_status = 'down'
3694 def Exec(self, feedback_fn):
3695 """Create and add the instance to the cluster.
3698 instance = self.op.instance_name
3699 pnode_name = self.pnode.name
3701 if self.op.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3702 mac_address = self.cfg.GenerateMAC()
3704 mac_address = self.op.mac
3706 nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3707 if self.inst_ip is not None:
3708 nic.ip = self.inst_ip
3710 ht_kind = self.op.hypervisor
3711 if ht_kind in constants.HTS_REQ_PORT:
3712 network_port = self.cfg.AllocatePort()
3716 ##if self.op.vnc_bind_address is None:
3717 ## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3719 # this is needed because os.path.join does not accept None arguments
3720 if self.op.file_storage_dir is None:
3721 string_file_storage_dir = ""
3723 string_file_storage_dir = self.op.file_storage_dir
3725 # build the full file storage dir path
3726 file_storage_dir = os.path.normpath(os.path.join(
3727 self.cfg.GetFileStorageDir(),
3728 string_file_storage_dir, instance))
3731 disks = _GenerateDiskTemplate(self,
3732 self.op.disk_template,
3733 instance, pnode_name,
3734 self.secondaries, self.op.disk_size,
3737 self.op.file_driver)
3739 iobj = objects.Instance(name=instance, os=self.op.os_type,
3740 primary_node=pnode_name,
3741 nics=[nic], disks=disks,
3742 disk_template=self.op.disk_template,
3743 status=self.instance_status,
3744 network_port=network_port,
3745 beparams=self.op.beparams,
3746 hvparams=self.op.hvparams,
3747 hypervisor=self.op.hypervisor,
3750 feedback_fn("* creating instance disks...")
3751 if not _CreateDisks(self, iobj):
3752 _RemoveDisks(self, iobj)
3753 self.cfg.ReleaseDRBDMinors(instance)
3754 raise errors.OpExecError("Device creation failed, reverting...")
3756 feedback_fn("adding instance %s to cluster config" % instance)
3758 self.cfg.AddInstance(iobj)
3759 # Declare that we don't want to remove the instance lock anymore, as we've
3760 # added the instance to the config
3761 del self.remove_locks[locking.LEVEL_INSTANCE]
3762 # Remove the temp. assignements for the instance's drbds
3763 self.cfg.ReleaseDRBDMinors(instance)
3765 if self.op.wait_for_sync:
3766 disk_abort = not _WaitForSync(self, iobj)
3767 elif iobj.disk_template in constants.DTS_NET_MIRROR:
3768 # make sure the disks are not degraded (still sync-ing is ok)
3770 feedback_fn("* checking mirrors status")
3771 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3776 _RemoveDisks(self, iobj)
3777 self.cfg.RemoveInstance(iobj.name)
3778 # Make sure the instance lock gets removed
3779 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3780 raise errors.OpExecError("There are some degraded disks for"
3783 feedback_fn("creating os for instance %s on node %s" %
3784 (instance, pnode_name))
3786 if iobj.disk_template != constants.DT_DISKLESS:
3787 if self.op.mode == constants.INSTANCE_CREATE:
3788 feedback_fn("* running the instance OS create scripts...")
3789 if not self.rpc.call_instance_os_add(pnode_name, iobj):
3790 raise errors.OpExecError("could not add os for instance %s"
3792 (instance, pnode_name))
3794 elif self.op.mode == constants.INSTANCE_IMPORT:
3795 feedback_fn("* running the instance OS import scripts...")
3796 src_node = self.op.src_node
3797 src_images = self.src_images
3798 cluster_name = self.cfg.GetClusterName()
3799 import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3800 src_node, src_images,
3802 for idx, result in enumerate(import_result):
3804 self.LogWarning("Could not image %s for on instance %s, disk %d,"
3805 " on node %s" % (src_images[idx], instance, idx,
3808 # also checked in the prereq part
3809 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3813 logging.info("Starting instance %s on node %s", instance, pnode_name)
3814 feedback_fn("* starting instance...")
3815 if not self.rpc.call_instance_start(pnode_name, iobj, None):
3816 raise errors.OpExecError("Could not start instance")
3819 class LUConnectConsole(NoHooksLU):
3820 """Connect to an instance's console.
3822 This is somewhat special in that it returns the command line that
3823 you need to run on the master node in order to connect to the
3827 _OP_REQP = ["instance_name"]
3830 def ExpandNames(self):
3831 self._ExpandAndLockInstance()
3833 def CheckPrereq(self):
3834 """Check prerequisites.
3836 This checks that the instance is in the cluster.
3839 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3840 assert self.instance is not None, \
3841 "Cannot retrieve locked instance %s" % self.op.instance_name
3843 def Exec(self, feedback_fn):
3844 """Connect to the console of an instance
3847 instance = self.instance
3848 node = instance.primary_node
3850 node_insts = self.rpc.call_instance_list([node],
3851 [instance.hypervisor])[node]
3852 if node_insts is False:
3853 raise errors.OpExecError("Can't connect to node %s." % node)
3855 if instance.name not in node_insts:
3856 raise errors.OpExecError("Instance %s is not running." % instance.name)
3858 logging.debug("Connecting to console of %s on %s", instance.name, node)
3860 hyper = hypervisor.GetHypervisor(instance.hypervisor)
3861 console_cmd = hyper.GetShellCommandForConsole(instance)
3864 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3867 class LUReplaceDisks(LogicalUnit):
3868 """Replace the disks of an instance.
3871 HPATH = "mirrors-replace"
3872 HTYPE = constants.HTYPE_INSTANCE
3873 _OP_REQP = ["instance_name", "mode", "disks"]
3876 def ExpandNames(self):
3877 self._ExpandAndLockInstance()
3879 if not hasattr(self.op, "remote_node"):
3880 self.op.remote_node = None
3882 ia_name = getattr(self.op, "iallocator", None)
3883 if ia_name is not None:
3884 if self.op.remote_node is not None:
3885 raise errors.OpPrereqError("Give either the iallocator or the new"
3886 " secondary, not both")
3887 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3888 elif self.op.remote_node is not None:
3889 remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3890 if remote_node is None:
3891 raise errors.OpPrereqError("Node '%s' not known" %
3892 self.op.remote_node)
3893 self.op.remote_node = remote_node
3894 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3895 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3897 self.needed_locks[locking.LEVEL_NODE] = []
3898 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3900 def DeclareLocks(self, level):
3901 # If we're not already locking all nodes in the set we have to declare the
3902 # instance's primary/secondary nodes.
3903 if (level == locking.LEVEL_NODE and
3904 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3905 self._LockInstancesNodes()
3907 def _RunAllocator(self):
3908 """Compute a new secondary node using an IAllocator.
3911 ial = IAllocator(self,
3912 mode=constants.IALLOCATOR_MODE_RELOC,
3913 name=self.op.instance_name,
3914 relocate_from=[self.sec_node])
3916 ial.Run(self.op.iallocator)
3919 raise errors.OpPrereqError("Can't compute nodes using"
3920 " iallocator '%s': %s" % (self.op.iallocator,
3922 if len(ial.nodes) != ial.required_nodes:
3923 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3924 " of nodes (%s), required %s" %
3925 (len(ial.nodes), ial.required_nodes))
3926 self.op.remote_node = ial.nodes[0]
3927 self.LogInfo("Selected new secondary for the instance: %s",
3928 self.op.remote_node)
3930 def BuildHooksEnv(self):
3933 This runs on the master, the primary and all the secondaries.
3937 "MODE": self.op.mode,
3938 "NEW_SECONDARY": self.op.remote_node,
3939 "OLD_SECONDARY": self.instance.secondary_nodes[0],
3941 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3943 self.cfg.GetMasterNode(),
3944 self.instance.primary_node,
3946 if self.op.remote_node is not None:
3947 nl.append(self.op.remote_node)
3950 def CheckPrereq(self):
3951 """Check prerequisites.
3953 This checks that the instance is in the cluster.
3956 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3957 assert instance is not None, \
3958 "Cannot retrieve locked instance %s" % self.op.instance_name
3959 self.instance = instance
3961 if instance.disk_template not in constants.DTS_NET_MIRROR:
3962 raise errors.OpPrereqError("Instance's disk layout is not"
3963 " network mirrored.")
3965 if len(instance.secondary_nodes) != 1:
3966 raise errors.OpPrereqError("The instance has a strange layout,"
3967 " expected one secondary but found %d" %
3968 len(instance.secondary_nodes))
3970 self.sec_node = instance.secondary_nodes[0]
3972 ia_name = getattr(self.op, "iallocator", None)
3973 if ia_name is not None:
3974 self._RunAllocator()
3976 remote_node = self.op.remote_node
3977 if remote_node is not None:
3978 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3979 assert self.remote_node_info is not None, \
3980 "Cannot retrieve locked node %s" % remote_node
3982 self.remote_node_info = None
3983 if remote_node == instance.primary_node:
3984 raise errors.OpPrereqError("The specified node is the primary node of"
3986 elif remote_node == self.sec_node:
3987 if self.op.mode == constants.REPLACE_DISK_SEC:
3988 # this is for DRBD8, where we can't execute the same mode of
3989 # replacement as for drbd7 (no different port allocated)
3990 raise errors.OpPrereqError("Same secondary given, cannot execute"
3992 if instance.disk_template == constants.DT_DRBD8:
3993 if (self.op.mode == constants.REPLACE_DISK_ALL and
3994 remote_node is not None):
3995 # switch to replace secondary mode
3996 self.op.mode = constants.REPLACE_DISK_SEC
3998 if self.op.mode == constants.REPLACE_DISK_ALL:
3999 raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4000 " secondary disk replacement, not"
4002 elif self.op.mode == constants.REPLACE_DISK_PRI:
4003 if remote_node is not None:
4004 raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4005 " the secondary while doing a primary"
4006 " node disk replacement")
4007 self.tgt_node = instance.primary_node
4008 self.oth_node = instance.secondary_nodes[0]
4009 elif self.op.mode == constants.REPLACE_DISK_SEC:
4010 self.new_node = remote_node # this can be None, in which case
4011 # we don't change the secondary
4012 self.tgt_node = instance.secondary_nodes[0]
4013 self.oth_node = instance.primary_node
4015 raise errors.ProgrammerError("Unhandled disk replace mode")
4017 for name in self.op.disks:
4018 if instance.FindDisk(name) is None:
4019 raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4020 (name, instance.name))
4022 def _ExecD8DiskOnly(self, feedback_fn):
4023 """Replace a disk on the primary or secondary for dbrd8.
4025 The algorithm for replace is quite complicated:
4026 - for each disk to be replaced:
4027 - create new LVs on the target node with unique names
4028 - detach old LVs from the drbd device
4029 - rename old LVs to name_replaced.<time_t>
4030 - rename new LVs to old LVs
4031 - attach the new LVs (with the old names now) to the drbd device
4032 - wait for sync across all devices
4033 - for each modified disk:
4034 - remove old LVs (which have the name name_replaces.<time_t>)
4036 Failures are not very well handled.
4040 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4041 instance = self.instance
4043 vgname = self.cfg.GetVGName()
4046 tgt_node = self.tgt_node
4047 oth_node = self.oth_node
4049 # Step: check device activation
4050 self.proc.LogStep(1, steps_total, "check device existence")
4051 info("checking volume groups")
4052 my_vg = cfg.GetVGName()
4053 results = self.rpc.call_vg_list([oth_node, tgt_node])
4055 raise errors.OpExecError("Can't list volume groups on the nodes")
4056 for node in oth_node, tgt_node:
4057 res = results.get(node, False)
4058 if not res or my_vg not in res:
4059 raise errors.OpExecError("Volume group '%s' not found on %s" %
4061 for dev in instance.disks:
4062 if not dev.iv_name in self.op.disks:
4064 for node in tgt_node, oth_node:
4065 info("checking %s on %s" % (dev.iv_name, node))
4066 cfg.SetDiskID(dev, node)
4067 if not self.rpc.call_blockdev_find(node, dev):
4068 raise errors.OpExecError("Can't find device %s on node %s" %
4069 (dev.iv_name, node))
4071 # Step: check other node consistency
4072 self.proc.LogStep(2, steps_total, "check peer consistency")
4073 for dev in instance.disks:
4074 if not dev.iv_name in self.op.disks:
4076 info("checking %s consistency on %s" % (dev.iv_name, oth_node))
4077 if not _CheckDiskConsistency(self, dev, oth_node,
4078 oth_node==instance.primary_node):
4079 raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4080 " to replace disks on this node (%s)" %
4081 (oth_node, tgt_node))
4083 # Step: create new storage
4084 self.proc.LogStep(3, steps_total, "allocate new storage")
4085 for dev in instance.disks:
4086 if not dev.iv_name in self.op.disks:
4089 cfg.SetDiskID(dev, tgt_node)
4090 lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
4091 names = _GenerateUniqueNames(self, lv_names)
4092 lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4093 logical_id=(vgname, names[0]))
4094 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4095 logical_id=(vgname, names[1]))
4096 new_lvs = [lv_data, lv_meta]
4097 old_lvs = dev.children
4098 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4099 info("creating new local storage on %s for %s" %
4100 (tgt_node, dev.iv_name))
4101 # since we *always* want to create this LV, we use the
4102 # _Create...OnPrimary (which forces the creation), even if we
4103 # are talking about the secondary node
4104 for new_lv in new_lvs:
4105 if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4106 _GetInstanceInfoText(instance)):
4107 raise errors.OpExecError("Failed to create new LV named '%s' on"
4109 (new_lv.logical_id[1], tgt_node))
4111 # Step: for each lv, detach+rename*2+attach
4112 self.proc.LogStep(4, steps_total, "change drbd configuration")
4113 for dev, old_lvs, new_lvs in iv_names.itervalues():
4114 info("detaching %s drbd from local storage" % dev.iv_name)
4115 if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4116 raise errors.OpExecError("Can't detach drbd from local storage on node"
4117 " %s for device %s" % (tgt_node, dev.iv_name))
4119 #cfg.Update(instance)
4121 # ok, we created the new LVs, so now we know we have the needed
4122 # storage; as such, we proceed on the target node to rename
4123 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4124 # using the assumption that logical_id == physical_id (which in
4125 # turn is the unique_id on that node)
4127 # FIXME(iustin): use a better name for the replaced LVs
4128 temp_suffix = int(time.time())
4129 ren_fn = lambda d, suff: (d.physical_id[0],
4130 d.physical_id[1] + "_replaced-%s" % suff)
4131 # build the rename list based on what LVs exist on the node
4133 for to_ren in old_lvs:
4134 find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4135 if find_res is not None: # device exists
4136 rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4138 info("renaming the old LVs on the target node")
4139 if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4140 raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4141 # now we rename the new LVs to the old LVs
4142 info("renaming the new LVs on the target node")
4143 rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4144 if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4145 raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4147 for old, new in zip(old_lvs, new_lvs):
4148 new.logical_id = old.logical_id
4149 cfg.SetDiskID(new, tgt_node)
4151 for disk in old_lvs:
4152 disk.logical_id = ren_fn(disk, temp_suffix)
4153 cfg.SetDiskID(disk, tgt_node)
4155 # now that the new lvs have the old name, we can add them to the device
4156 info("adding new mirror component on %s" % tgt_node)
4157 if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4158 for new_lv in new_lvs:
4159 if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4160 warning("Can't rollback device %s", hint="manually cleanup unused"
4162 raise errors.OpExecError("Can't add local storage to drbd")
4164 dev.children = new_lvs
4165 cfg.Update(instance)
4167 # Step: wait for sync
4169 # this can fail as the old devices are degraded and _WaitForSync
4170 # does a combined result over all disks, so we don't check its
4172 self.proc.LogStep(5, steps_total, "sync devices")
4173 _WaitForSync(self, instance, unlock=True)
4175 # so check manually all the devices
4176 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4177 cfg.SetDiskID(dev, instance.primary_node)
4178 is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4180 raise errors.OpExecError("DRBD device %s is degraded!" % name)
4182 # Step: remove old storage
4183 self.proc.LogStep(6, steps_total, "removing old storage")
4184 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4185 info("remove logical volumes for %s" % name)
4187 cfg.SetDiskID(lv, tgt_node)
4188 if not self.rpc.call_blockdev_remove(tgt_node, lv):
4189 warning("Can't remove old LV", hint="manually remove unused LVs")
4192 def _ExecD8Secondary(self, feedback_fn):
4193 """Replace the secondary node for drbd8.
4195 The algorithm for replace is quite complicated:
4196 - for all disks of the instance:
4197 - create new LVs on the new node with same names
4198 - shutdown the drbd device on the old secondary
4199 - disconnect the drbd network on the primary
4200 - create the drbd device on the new secondary
4201 - network attach the drbd on the primary, using an artifice:
4202 the drbd code for Attach() will connect to the network if it
4203 finds a device which is connected to the good local disks but
4205 - wait for sync across all devices
4206 - remove all disks from the old secondary
4208 Failures are not very well handled.
4212 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4213 instance = self.instance
4215 vgname = self.cfg.GetVGName()
4218 old_node = self.tgt_node
4219 new_node = self.new_node
4220 pri_node = instance.primary_node
4222 # Step: check device activation
4223 self.proc.LogStep(1, steps_total, "check device existence")
4224 info("checking volume groups")
4225 my_vg = cfg.GetVGName()
4226 results = self.rpc.call_vg_list([pri_node, new_node])
4228 raise errors.OpExecError("Can't list volume groups on the nodes")
4229 for node in pri_node, new_node:
4230 res = results.get(node, False)
4231 if not res or my_vg not in res:
4232 raise errors.OpExecError("Volume group '%s' not found on %s" %
4234 for dev in instance.disks:
4235 if not dev.iv_name in self.op.disks:
4237 info("checking %s on %s" % (dev.iv_name, pri_node))
4238 cfg.SetDiskID(dev, pri_node)
4239 if not self.rpc.call_blockdev_find(pri_node, dev):
4240 raise errors.OpExecError("Can't find device %s on node %s" %
4241 (dev.iv_name, pri_node))
4243 # Step: check other node consistency
4244 self.proc.LogStep(2, steps_total, "check peer consistency")
4245 for dev in instance.disks:
4246 if not dev.iv_name in self.op.disks:
4248 info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4249 if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4250 raise errors.OpExecError("Primary node (%s) has degraded storage,"
4251 " unsafe to replace the secondary" %
4254 # Step: create new storage
4255 self.proc.LogStep(3, steps_total, "allocate new storage")
4256 for dev in instance.disks:
4258 info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4259 # since we *always* want to create this LV, we use the
4260 # _Create...OnPrimary (which forces the creation), even if we
4261 # are talking about the secondary node
4262 for new_lv in dev.children:
4263 if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4264 _GetInstanceInfoText(instance)):
4265 raise errors.OpExecError("Failed to create new LV named '%s' on"
4267 (new_lv.logical_id[1], new_node))
4270 # Step 4: dbrd minors and drbd setups changes
4271 # after this, we must manually remove the drbd minors on both the
4272 # error and the success paths
4273 minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4275 logging.debug("Allocated minors %s" % (minors,))
4276 self.proc.LogStep(4, steps_total, "changing drbd configuration")
4277 for dev, new_minor in zip(instance.disks, minors):
4279 info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4280 # create new devices on new_node
4281 if pri_node == dev.logical_id[0]:
4282 new_logical_id = (pri_node, new_node,
4283 dev.logical_id[2], dev.logical_id[3], new_minor,
4286 new_logical_id = (new_node, pri_node,
4287 dev.logical_id[2], new_minor, dev.logical_id[4],
4289 iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4290 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4292 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4293 logical_id=new_logical_id,
4294 children=dev.children)
4295 if not _CreateBlockDevOnSecondary(self, new_node, instance,
4297 _GetInstanceInfoText(instance)):
4298 self.cfg.ReleaseDRBDMinors(instance.name)
4299 raise errors.OpExecError("Failed to create new DRBD on"
4300 " node '%s'" % new_node)
4302 for dev in instance.disks:
4303 # we have new devices, shutdown the drbd on the old secondary
4304 info("shutting down drbd for %s on old node" % dev.iv_name)
4305 cfg.SetDiskID(dev, old_node)
4306 if not self.rpc.call_blockdev_shutdown(old_node, dev):
4307 warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4308 hint="Please cleanup this device manually as soon as possible")
4310 info("detaching primary drbds from the network (=> standalone)")
4312 for dev in instance.disks:
4313 cfg.SetDiskID(dev, pri_node)
4314 # set the network part of the physical (unique in bdev terms) id
4315 # to None, meaning detach from network
4316 dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4317 # and 'find' the device, which will 'fix' it to match the
4319 if self.rpc.call_blockdev_find(pri_node, dev):
4322 warning("Failed to detach drbd %s from network, unusual case" %
4326 # no detaches succeeded (very unlikely)
4327 self.cfg.ReleaseDRBDMinors(instance.name)
4328 raise errors.OpExecError("Can't detach at least one DRBD from old node")
4330 # if we managed to detach at least one, we update all the disks of
4331 # the instance to point to the new secondary
4332 info("updating instance configuration")
4333 for dev, _, new_logical_id in iv_names.itervalues():
4334 dev.logical_id = new_logical_id
4335 cfg.SetDiskID(dev, pri_node)
4336 cfg.Update(instance)
4337 # we can remove now the temp minors as now the new values are
4338 # written to the config file (and therefore stable)
4339 self.cfg.ReleaseDRBDMinors(instance.name)
4341 # and now perform the drbd attach
4342 info("attaching primary drbds to new secondary (standalone => connected)")
4344 for dev in instance.disks:
4345 info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4346 # since the attach is smart, it's enough to 'find' the device,
4347 # it will automatically activate the network, if the physical_id
4349 cfg.SetDiskID(dev, pri_node)
4350 logging.debug("Disk to attach: %s", dev)
4351 if not self.rpc.call_blockdev_find(pri_node, dev):
4352 warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4353 "please do a gnt-instance info to see the status of disks")
4355 # this can fail as the old devices are degraded and _WaitForSync
4356 # does a combined result over all disks, so we don't check its
4358 self.proc.LogStep(5, steps_total, "sync devices")
4359 _WaitForSync(self, instance, unlock=True)
4361 # so check manually all the devices
4362 for name, (dev, old_lvs, _) in iv_names.iteritems():
4363 cfg.SetDiskID(dev, pri_node)
4364 is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4366 raise errors.OpExecError("DRBD device %s is degraded!" % name)
4368 self.proc.LogStep(6, steps_total, "removing old storage")
4369 for name, (dev, old_lvs, _) in iv_names.iteritems():
4370 info("remove logical volumes for %s" % name)
4372 cfg.SetDiskID(lv, old_node)
4373 if not self.rpc.call_blockdev_remove(old_node, lv):
4374 warning("Can't remove LV on old secondary",
4375 hint="Cleanup stale volumes by hand")
4377 def Exec(self, feedback_fn):
4378 """Execute disk replacement.
4380 This dispatches the disk replacement to the appropriate handler.
4383 instance = self.instance
4385 # Activate the instance disks if we're replacing them on a down instance
4386 if instance.status == "down":
4387 _StartInstanceDisks(self, instance, True)
4389 if instance.disk_template == constants.DT_DRBD8:
4390 if self.op.remote_node is None:
4391 fn = self._ExecD8DiskOnly
4393 fn = self._ExecD8Secondary
4395 raise errors.ProgrammerError("Unhandled disk replacement case")
4397 ret = fn(feedback_fn)
4399 # Deactivate the instance disks if we're replacing them on a down instance
4400 if instance.status == "down":
4401 _SafeShutdownInstanceDisks(self, instance)
4406 class LUGrowDisk(LogicalUnit):
4407 """Grow a disk of an instance.
4411 HTYPE = constants.HTYPE_INSTANCE
4412 _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4415 def ExpandNames(self):
4416 self._ExpandAndLockInstance()
4417 self.needed_locks[locking.LEVEL_NODE] = []
4418 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4420 def DeclareLocks(self, level):
4421 if level == locking.LEVEL_NODE:
4422 self._LockInstancesNodes()
4424 def BuildHooksEnv(self):
4427 This runs on the master, the primary and all the secondaries.
4431 "DISK": self.op.disk,
4432 "AMOUNT": self.op.amount,
4434 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4436 self.cfg.GetMasterNode(),
4437 self.instance.primary_node,
4441 def CheckPrereq(self):
4442 """Check prerequisites.
4444 This checks that the instance is in the cluster.
4447 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4448 assert instance is not None, \
4449 "Cannot retrieve locked instance %s" % self.op.instance_name
4451 self.instance = instance
4453 if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4454 raise errors.OpPrereqError("Instance's disk layout does not support"
4457 if instance.FindDisk(self.op.disk) is None:
4458 raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4459 (self.op.disk, instance.name))
4461 nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4462 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4463 instance.hypervisor)
4464 for node in nodenames:
4465 info = nodeinfo.get(node, None)
4467 raise errors.OpPrereqError("Cannot get current information"
4468 " from node '%s'" % node)
4469 vg_free = info.get('vg_free', None)
4470 if not isinstance(vg_free, int):
4471 raise errors.OpPrereqError("Can't compute free disk space on"
4473 if self.op.amount > info['vg_free']:
4474 raise errors.OpPrereqError("Not enough disk space on target node %s:"
4475 " %d MiB available, %d MiB required" %
4476 (node, info['vg_free'], self.op.amount))
4478 def Exec(self, feedback_fn):
4479 """Execute disk grow.
4482 instance = self.instance
4483 disk = instance.FindDisk(self.op.disk)
4484 for node in (instance.secondary_nodes + (instance.primary_node,)):
4485 self.cfg.SetDiskID(disk, node)
4486 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4487 if (not result or not isinstance(result, (list, tuple)) or
4489 raise errors.OpExecError("grow request failed to node %s" % node)
4491 raise errors.OpExecError("grow request failed to node %s: %s" %
4493 disk.RecordGrow(self.op.amount)
4494 self.cfg.Update(instance)
4495 if self.op.wait_for_sync:
4496 disk_abort = not _WaitForSync(self, instance)
4498 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4499 " status.\nPlease check the instance.")
4502 class LUQueryInstanceData(NoHooksLU):
4503 """Query runtime instance data.
4506 _OP_REQP = ["instances", "static"]
4509 def ExpandNames(self):
4510 self.needed_locks = {}
4511 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4513 if not isinstance(self.op.instances, list):
4514 raise errors.OpPrereqError("Invalid argument type 'instances'")
4516 if self.op.instances:
4517 self.wanted_names = []
4518 for name in self.op.instances:
4519 full_name = self.cfg.ExpandInstanceName(name)
4520 if full_name is None:
4521 raise errors.OpPrereqError("Instance '%s' not known" %
4522 self.op.instance_name)
4523 self.wanted_names.append(full_name)
4524 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4526 self.wanted_names = None
4527 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4529 self.needed_locks[locking.LEVEL_NODE] = []
4530 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4532 def DeclareLocks(self, level):
4533 if level == locking.LEVEL_NODE:
4534 self._LockInstancesNodes()
4536 def CheckPrereq(self):
4537 """Check prerequisites.
4539 This only checks the optional instance list against the existing names.
4542 if self.wanted_names is None:
4543 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4545 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4546 in self.wanted_names]
4549 def _ComputeDiskStatus(self, instance, snode, dev):
4550 """Compute block device status.
4553 static = self.op.static
4555 self.cfg.SetDiskID(dev, instance.primary_node)
4556 dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4560 if dev.dev_type in constants.LDS_DRBD:
4561 # we change the snode then (otherwise we use the one passed in)
4562 if dev.logical_id[0] == instance.primary_node:
4563 snode = dev.logical_id[1]
4565 snode = dev.logical_id[0]
4567 if snode and not static:
4568 self.cfg.SetDiskID(dev, snode)
4569 dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4574 dev_children = [self._ComputeDiskStatus(instance, snode, child)
4575 for child in dev.children]
4580 "iv_name": dev.iv_name,
4581 "dev_type": dev.dev_type,
4582 "logical_id": dev.logical_id,
4583 "physical_id": dev.physical_id,
4584 "pstatus": dev_pstatus,
4585 "sstatus": dev_sstatus,
4586 "children": dev_children,
4591 def Exec(self, feedback_fn):
4592 """Gather and return data"""
4595 cluster = self.cfg.GetClusterInfo()
4597 for instance in self.wanted_instances:
4598 if not self.op.static:
4599 remote_info = self.rpc.call_instance_info(instance.primary_node,
4601 instance.hypervisor)
4602 if remote_info and "state" in remote_info:
4605 remote_state = "down"
4608 if instance.status == "down":
4609 config_state = "down"
4613 disks = [self._ComputeDiskStatus(instance, None, device)
4614 for device in instance.disks]
4617 "name": instance.name,
4618 "config_state": config_state,
4619 "run_state": remote_state,
4620 "pnode": instance.primary_node,
4621 "snodes": instance.secondary_nodes,
4623 "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4625 "hypervisor": instance.hypervisor,
4626 "network_port": instance.network_port,
4627 "hv_instance": instance.hvparams,
4628 "hv_actual": cluster.FillHV(instance),
4629 "be_instance": instance.beparams,
4630 "be_actual": cluster.FillBE(instance),
4633 result[instance.name] = idict
4638 class LUSetInstanceParams(LogicalUnit):
4639 """Modifies an instances's parameters.
4642 HPATH = "instance-modify"
4643 HTYPE = constants.HTYPE_INSTANCE
4644 _OP_REQP = ["instance_name", "hvparams"]
4647 def ExpandNames(self):
4648 self._ExpandAndLockInstance()
4649 self.needed_locks[locking.LEVEL_NODE] = []
4650 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4653 def DeclareLocks(self, level):
4654 if level == locking.LEVEL_NODE:
4655 self._LockInstancesNodes()
4657 def BuildHooksEnv(self):
4660 This runs on the master, primary and secondaries.
4664 if constants.BE_MEMORY in self.be_new:
4665 args['memory'] = self.be_new[constants.BE_MEMORY]
4666 if constants.BE_VCPUS in self.be_new:
4667 args['vcpus'] = self.be_new[constants.BE_VCPUS]
4668 if self.do_ip or self.do_bridge or self.mac:
4672 ip = self.instance.nics[0].ip
4674 bridge = self.bridge
4676 bridge = self.instance.nics[0].bridge
4680 mac = self.instance.nics[0].mac
4681 args['nics'] = [(ip, bridge, mac)]
4682 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4683 nl = [self.cfg.GetMasterNode(),
4684 self.instance.primary_node] + list(self.instance.secondary_nodes)
4687 def CheckPrereq(self):
4688 """Check prerequisites.
4690 This only checks the instance list against the existing names.
4693 # FIXME: all the parameters could be checked before, in ExpandNames, or in
4694 # a separate CheckArguments function, if we implement one, so the operation
4695 # can be aborted without waiting for any lock, should it have an error...
4696 self.ip = getattr(self.op, "ip", None)
4697 self.mac = getattr(self.op, "mac", None)
4698 self.bridge = getattr(self.op, "bridge", None)
4699 self.kernel_path = getattr(self.op, "kernel_path", None)
4700 self.initrd_path = getattr(self.op, "initrd_path", None)
4701 self.force = getattr(self.op, "force", None)
4702 all_parms = [self.ip, self.bridge, self.mac]
4703 if (all_parms.count(None) == len(all_parms) and
4704 not self.op.hvparams and
4705 not self.op.beparams):
4706 raise errors.OpPrereqError("No changes submitted")
4707 for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4708 val = self.op.beparams.get(item, None)
4712 except ValueError, err:
4713 raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4714 self.op.beparams[item] = val
4715 if self.ip is not None:
4717 if self.ip.lower() == "none":
4720 if not utils.IsValidIP(self.ip):
4721 raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4724 self.do_bridge = (self.bridge is not None)
4725 if self.mac is not None:
4726 if self.cfg.IsMacInUse(self.mac):
4727 raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4729 if not utils.IsValidMac(self.mac):
4730 raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4732 # checking the new params on the primary/secondary nodes
4734 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4735 assert self.instance is not None, \
4736 "Cannot retrieve locked instance %s" % self.op.instance_name
4737 pnode = self.instance.primary_node
4739 nodelist.extend(instance.secondary_nodes)
4741 # hvparams processing
4742 if self.op.hvparams:
4743 i_hvdict = copy.deepcopy(instance.hvparams)
4744 for key, val in self.op.hvparams.iteritems():
4752 cluster = self.cfg.GetClusterInfo()
4753 hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4756 hypervisor.GetHypervisor(
4757 instance.hypervisor).CheckParameterSyntax(hv_new)
4758 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4759 self.hv_new = hv_new # the new actual values
4760 self.hv_inst = i_hvdict # the new dict (without defaults)
4762 self.hv_new = self.hv_inst = {}
4764 # beparams processing
4765 if self.op.beparams:
4766 i_bedict = copy.deepcopy(instance.beparams)
4767 for key, val in self.op.beparams.iteritems():
4775 cluster = self.cfg.GetClusterInfo()
4776 be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4778 self.be_new = be_new # the new actual values
4779 self.be_inst = i_bedict # the new dict (without defaults)
4781 self.hv_new = self.hv_inst = {}
4785 if constants.BE_MEMORY in self.op.beparams and not self.force:
4786 mem_check_list = [pnode]
4787 if be_new[constants.BE_AUTO_BALANCE]:
4788 # either we changed auto_balance to yes or it was from before
4789 mem_check_list.extend(instance.secondary_nodes)
4790 instance_info = self.rpc.call_instance_info(pnode, instance.name,
4791 instance.hypervisor)
4792 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4793 instance.hypervisor)
4795 if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4796 # Assume the primary node is unreachable and go ahead
4797 self.warn.append("Can't get info from primary node %s" % pnode)
4800 current_mem = instance_info['memory']
4802 # Assume instance not running
4803 # (there is a slight race condition here, but it's not very probable,
4804 # and we have no other way to check)
4806 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4807 nodeinfo[pnode]['memory_free'])
4809 raise errors.OpPrereqError("This change will prevent the instance"
4810 " from starting, due to %d MB of memory"
4811 " missing on its primary node" % miss_mem)
4813 if be_new[constants.BE_AUTO_BALANCE]:
4814 for node in instance.secondary_nodes:
4815 if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4816 self.warn.append("Can't get info from secondary node %s" % node)
4817 elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4818 self.warn.append("Not enough memory to failover instance to"
4819 " secondary node %s" % node)
4823 def Exec(self, feedback_fn):
4824 """Modifies an instance.
4826 All parameters take effect only at the next restart of the instance.
4828 # Process here the warnings from CheckPrereq, as we don't have a
4829 # feedback_fn there.
4830 for warn in self.warn:
4831 feedback_fn("WARNING: %s" % warn)
4834 instance = self.instance
4836 instance.nics[0].ip = self.ip
4837 result.append(("ip", self.ip))
4839 instance.nics[0].bridge = self.bridge
4840 result.append(("bridge", self.bridge))
4842 instance.nics[0].mac = self.mac
4843 result.append(("mac", self.mac))
4844 if self.op.hvparams:
4845 instance.hvparams = self.hv_new
4846 for key, val in self.op.hvparams.iteritems():
4847 result.append(("hv/%s" % key, val))
4848 if self.op.beparams:
4849 instance.beparams = self.be_inst
4850 for key, val in self.op.beparams.iteritems():
4851 result.append(("be/%s" % key, val))
4853 self.cfg.Update(instance)
4858 class LUQueryExports(NoHooksLU):
4859 """Query the exports list
4862 _OP_REQP = ['nodes']
4865 def ExpandNames(self):
4866 self.needed_locks = {}
4867 self.share_locks[locking.LEVEL_NODE] = 1
4868 if not self.op.nodes:
4869 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4871 self.needed_locks[locking.LEVEL_NODE] = \
4872 _GetWantedNodes(self, self.op.nodes)
4874 def CheckPrereq(self):
4875 """Check prerequisites.
4878 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4880 def Exec(self, feedback_fn):
4881 """Compute the list of all the exported system images.
4884 a dictionary with the structure node->(export-list)
4885 where export-list is a list of the instances exported on
4889 return self.rpc.call_export_list(self.nodes)
4892 class LUExportInstance(LogicalUnit):
4893 """Export an instance to an image in the cluster.
4896 HPATH = "instance-export"
4897 HTYPE = constants.HTYPE_INSTANCE
4898 _OP_REQP = ["instance_name", "target_node", "shutdown"]
4901 def ExpandNames(self):
4902 self._ExpandAndLockInstance()
4903 # FIXME: lock only instance primary and destination node
4905 # Sad but true, for now we have do lock all nodes, as we don't know where
4906 # the previous export might be, and and in this LU we search for it and
4907 # remove it from its current node. In the future we could fix this by:
4908 # - making a tasklet to search (share-lock all), then create the new one,
4909 # then one to remove, after
4910 # - removing the removal operation altoghether
4911 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4913 def DeclareLocks(self, level):
4914 """Last minute lock declaration."""
4915 # All nodes are locked anyway, so nothing to do here.
4917 def BuildHooksEnv(self):
4920 This will run on the master, primary node and target node.
4924 "EXPORT_NODE": self.op.target_node,
4925 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4927 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4928 nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4929 self.op.target_node]
4932 def CheckPrereq(self):
4933 """Check prerequisites.
4935 This checks that the instance and node names are valid.
4938 instance_name = self.op.instance_name
4939 self.instance = self.cfg.GetInstanceInfo(instance_name)
4940 assert self.instance is not None, \
4941 "Cannot retrieve locked instance %s" % self.op.instance_name
4943 self.dst_node = self.cfg.GetNodeInfo(
4944 self.cfg.ExpandNodeName(self.op.target_node))
4946 assert self.dst_node is not None, \
4947 "Cannot retrieve locked node %s" % self.op.target_node
4949 # instance disk type verification
4950 for disk in self.instance.disks:
4951 if disk.dev_type == constants.LD_FILE:
4952 raise errors.OpPrereqError("Export not supported for instances with"
4953 " file-based disks")
4955 def Exec(self, feedback_fn):
4956 """Export an instance to an image in the cluster.
4959 instance = self.instance
4960 dst_node = self.dst_node
4961 src_node = instance.primary_node
4962 if self.op.shutdown:
4963 # shutdown the instance, but not the disks
4964 if not self.rpc.call_instance_shutdown(src_node, instance):
4965 raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4966 (instance.name, src_node))
4968 vgname = self.cfg.GetVGName()
4973 for disk in instance.disks:
4974 # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4975 new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4977 if not new_dev_name:
4978 self.LogWarning("Could not snapshot block device %s on node %s",
4979 disk.logical_id[1], src_node)
4980 snap_disks.append(False)
4982 new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4983 logical_id=(vgname, new_dev_name),
4984 physical_id=(vgname, new_dev_name),
4985 iv_name=disk.iv_name)
4986 snap_disks.append(new_dev)
4989 if self.op.shutdown and instance.status == "up":
4990 if not self.rpc.call_instance_start(src_node, instance, None):
4991 _ShutdownInstanceDisks(self, instance)
4992 raise errors.OpExecError("Could not start instance")
4994 # TODO: check for size
4996 cluster_name = self.cfg.GetClusterName()
4997 for idx, dev in enumerate(snap_disks):
4999 if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5000 instance, cluster_name, idx):
5001 self.LogWarning("Could not export block device %s from node %s to"
5002 " node %s", dev.logical_id[1], src_node,
5004 if not self.rpc.call_blockdev_remove(src_node, dev):
5005 self.LogWarning("Could not remove snapshot block device %s from node"
5006 " %s", dev.logical_id[1], src_node)
5008 if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5009 self.LogWarning("Could not finalize export for instance %s on node %s",
5010 instance.name, dst_node.name)
5012 nodelist = self.cfg.GetNodeList()
5013 nodelist.remove(dst_node.name)
5015 # on one-node clusters nodelist will be empty after the removal
5016 # if we proceed the backup would be removed because OpQueryExports
5017 # substitutes an empty list with the full cluster node list.
5019 exportlist = self.rpc.call_export_list(nodelist)
5020 for node in exportlist:
5021 if instance.name in exportlist[node]:
5022 if not self.rpc.call_export_remove(node, instance.name):
5023 self.LogWarning("Could not remove older export for instance %s"
5024 " on node %s", instance.name, node)
5027 class LURemoveExport(NoHooksLU):
5028 """Remove exports related to the named instance.
5031 _OP_REQP = ["instance_name"]
5034 def ExpandNames(self):
5035 self.needed_locks = {}
5036 # We need all nodes to be locked in order for RemoveExport to work, but we
5037 # don't need to lock the instance itself, as nothing will happen to it (and
5038 # we can remove exports also for a removed instance)
5039 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5041 def CheckPrereq(self):
5042 """Check prerequisites.
5046 def Exec(self, feedback_fn):
5047 """Remove any export.
5050 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5051 # If the instance was not found we'll try with the name that was passed in.
5052 # This will only work if it was an FQDN, though.
5054 if not instance_name:
5056 instance_name = self.op.instance_name
5058 exportlist = self.rpc.call_export_list(self.acquired_locks[
5059 locking.LEVEL_NODE])
5061 for node in exportlist:
5062 if instance_name in exportlist[node]:
5064 if not self.rpc.call_export_remove(node, instance_name):
5065 logging.error("Could not remove export for instance %s"
5066 " on node %s", instance_name, node)
5068 if fqdn_warn and not found:
5069 feedback_fn("Export not found. If trying to remove an export belonging"
5070 " to a deleted instance please use its Fully Qualified"
5074 class TagsLU(NoHooksLU):
5077 This is an abstract class which is the parent of all the other tags LUs.
5081 def ExpandNames(self):
5082 self.needed_locks = {}
5083 if self.op.kind == constants.TAG_NODE:
5084 name = self.cfg.ExpandNodeName(self.op.name)
5086 raise errors.OpPrereqError("Invalid node name (%s)" %
5089 self.needed_locks[locking.LEVEL_NODE] = name
5090 elif self.op.kind == constants.TAG_INSTANCE:
5091 name = self.cfg.ExpandInstanceName(self.op.name)
5093 raise errors.OpPrereqError("Invalid instance name (%s)" %
5096 self.needed_locks[locking.LEVEL_INSTANCE] = name
5098 def CheckPrereq(self):
5099 """Check prerequisites.
5102 if self.op.kind == constants.TAG_CLUSTER:
5103 self.target = self.cfg.GetClusterInfo()
5104 elif self.op.kind == constants.TAG_NODE:
5105 self.target = self.cfg.GetNodeInfo(self.op.name)
5106 elif self.op.kind == constants.TAG_INSTANCE:
5107 self.target = self.cfg.GetInstanceInfo(self.op.name)
5109 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5113 class LUGetTags(TagsLU):
5114 """Returns the tags of a given object.
5117 _OP_REQP = ["kind", "name"]
5120 def Exec(self, feedback_fn):
5121 """Returns the tag list.
5124 return list(self.target.GetTags())
5127 class LUSearchTags(NoHooksLU):
5128 """Searches the tags for a given pattern.
5131 _OP_REQP = ["pattern"]
5134 def ExpandNames(self):
5135 self.needed_locks = {}
5137 def CheckPrereq(self):
5138 """Check prerequisites.
5140 This checks the pattern passed for validity by compiling it.
5144 self.re = re.compile(self.op.pattern)
5145 except re.error, err:
5146 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5147 (self.op.pattern, err))
5149 def Exec(self, feedback_fn):
5150 """Returns the tag list.
5154 tgts = [("/cluster", cfg.GetClusterInfo())]
5155 ilist = cfg.GetAllInstancesInfo().values()
5156 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5157 nlist = cfg.GetAllNodesInfo().values()
5158 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5160 for path, target in tgts:
5161 for tag in target.GetTags():
5162 if self.re.search(tag):
5163 results.append((path, tag))
5167 class LUAddTags(TagsLU):
5168 """Sets a tag on a given object.
5171 _OP_REQP = ["kind", "name", "tags"]
5174 def CheckPrereq(self):
5175 """Check prerequisites.
5177 This checks the type and length of the tag name and value.
5180 TagsLU.CheckPrereq(self)
5181 for tag in self.op.tags:
5182 objects.TaggableObject.ValidateTag(tag)
5184 def Exec(self, feedback_fn):
5189 for tag in self.op.tags:
5190 self.target.AddTag(tag)
5191 except errors.TagError, err:
5192 raise errors.OpExecError("Error while setting tag: %s" % str(err))
5194 self.cfg.Update(self.target)
5195 except errors.ConfigurationError:
5196 raise errors.OpRetryError("There has been a modification to the"
5197 " config file and the operation has been"
5198 " aborted. Please retry.")
5201 class LUDelTags(TagsLU):
5202 """Delete a list of tags from a given object.
5205 _OP_REQP = ["kind", "name", "tags"]
5208 def CheckPrereq(self):
5209 """Check prerequisites.
5211 This checks that we have the given tag.
5214 TagsLU.CheckPrereq(self)
5215 for tag in self.op.tags:
5216 objects.TaggableObject.ValidateTag(tag)
5217 del_tags = frozenset(self.op.tags)
5218 cur_tags = self.target.GetTags()
5219 if not del_tags <= cur_tags:
5220 diff_tags = del_tags - cur_tags
5221 diff_names = ["'%s'" % tag for tag in diff_tags]
5223 raise errors.OpPrereqError("Tag(s) %s not found" %
5224 (",".join(diff_names)))
5226 def Exec(self, feedback_fn):
5227 """Remove the tag from the object.
5230 for tag in self.op.tags:
5231 self.target.RemoveTag(tag)
5233 self.cfg.Update(self.target)
5234 except errors.ConfigurationError:
5235 raise errors.OpRetryError("There has been a modification to the"
5236 " config file and the operation has been"
5237 " aborted. Please retry.")
5240 class LUTestDelay(NoHooksLU):
5241 """Sleep for a specified amount of time.
5243 This LU sleeps on the master and/or nodes for a specified amount of
5247 _OP_REQP = ["duration", "on_master", "on_nodes"]
5250 def ExpandNames(self):
5251 """Expand names and set required locks.
5253 This expands the node list, if any.
5256 self.needed_locks = {}
5257 if self.op.on_nodes:
5258 # _GetWantedNodes can be used here, but is not always appropriate to use
5259 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5261 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5262 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5264 def CheckPrereq(self):
5265 """Check prerequisites.
5269 def Exec(self, feedback_fn):
5270 """Do the actual sleep.
5273 if self.op.on_master:
5274 if not utils.TestDelay(self.op.duration):
5275 raise errors.OpExecError("Error during master delay test")
5276 if self.op.on_nodes:
5277 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5279 raise errors.OpExecError("Complete failure from rpc call")
5280 for node, node_result in result.items():
5282 raise errors.OpExecError("Failure during rpc call to node %s,"
5283 " result: %s" % (node, node_result))
5286 class IAllocator(object):
5287 """IAllocator framework.
5289 An IAllocator instance has three sets of attributes:
5290 - cfg that is needed to query the cluster
5291 - input data (all members of the _KEYS class attribute are required)
5292 - four buffer attributes (in|out_data|text), that represent the
5293 input (to the external script) in text and data structure format,
5294 and the output from it, again in two formats
5295 - the result variables from the script (success, info, nodes) for
5300 "mem_size", "disks", "disk_template",
5301 "os", "tags", "nics", "vcpus",
5307 def __init__(self, lu, mode, name, **kwargs):
5309 # init buffer variables
5310 self.in_text = self.out_text = self.in_data = self.out_data = None
5311 # init all input fields so that pylint is happy
5314 self.mem_size = self.disks = self.disk_template = None
5315 self.os = self.tags = self.nics = self.vcpus = None
5316 self.relocate_from = None
5318 self.required_nodes = None
5319 # init result fields
5320 self.success = self.info = self.nodes = None
5321 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5322 keyset = self._ALLO_KEYS
5323 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5324 keyset = self._RELO_KEYS
5326 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5327 " IAllocator" % self.mode)
5329 if key not in keyset:
5330 raise errors.ProgrammerError("Invalid input parameter '%s' to"
5331 " IAllocator" % key)
5332 setattr(self, key, kwargs[key])
5334 if key not in kwargs:
5335 raise errors.ProgrammerError("Missing input parameter '%s' to"
5336 " IAllocator" % key)
5337 self._BuildInputData()
5339 def _ComputeClusterData(self):
5340 """Compute the generic allocator input data.
5342 This is the data that is independent of the actual operation.
5346 cluster_info = cfg.GetClusterInfo()
5350 "cluster_name": cfg.GetClusterName(),
5351 "cluster_tags": list(cluster_info.GetTags()),
5352 "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5353 # we don't have job IDs
5357 cluster = self.cfg.GetClusterInfo()
5358 for iname in cfg.GetInstanceList():
5359 i_obj = cfg.GetInstanceInfo(iname)
5360 i_list.append((i_obj, cluster.FillBE(i_obj)))
5364 node_list = cfg.GetNodeList()
5365 # FIXME: here we have only one hypervisor information, but
5366 # instance can belong to different hypervisors
5367 node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5368 cfg.GetHypervisorType())
5369 for nname in node_list:
5370 ninfo = cfg.GetNodeInfo(nname)
5371 if nname not in node_data or not isinstance(node_data[nname], dict):
5372 raise errors.OpExecError("Can't get data for node %s" % nname)
5373 remote_info = node_data[nname]
5374 for attr in ['memory_total', 'memory_free', 'memory_dom0',
5375 'vg_size', 'vg_free', 'cpu_total']:
5376 if attr not in remote_info:
5377 raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5380 remote_info[attr] = int(remote_info[attr])
5381 except ValueError, err:
5382 raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5383 " %s" % (nname, attr, str(err)))
5384 # compute memory used by primary instances
5385 i_p_mem = i_p_up_mem = 0
5386 for iinfo, beinfo in i_list:
5387 if iinfo.primary_node == nname:
5388 i_p_mem += beinfo[constants.BE_MEMORY]
5389 if iinfo.status == "up":
5390 i_p_up_mem += beinfo[constants.BE_MEMORY]
5392 # compute memory used by instances
5394 "tags": list(ninfo.GetTags()),
5395 "total_memory": remote_info['memory_total'],
5396 "reserved_memory": remote_info['memory_dom0'],
5397 "free_memory": remote_info['memory_free'],
5398 "i_pri_memory": i_p_mem,
5399 "i_pri_up_memory": i_p_up_mem,
5400 "total_disk": remote_info['vg_size'],
5401 "free_disk": remote_info['vg_free'],
5402 "primary_ip": ninfo.primary_ip,
5403 "secondary_ip": ninfo.secondary_ip,
5404 "total_cpus": remote_info['cpu_total'],
5406 node_results[nname] = pnr
5407 data["nodes"] = node_results
5411 for iinfo, beinfo in i_list:
5412 nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5413 for n in iinfo.nics]
5415 "tags": list(iinfo.GetTags()),
5416 "should_run": iinfo.status == "up",
5417 "vcpus": beinfo[constants.BE_VCPUS],
5418 "memory": beinfo[constants.BE_MEMORY],
5420 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5422 "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5423 "disk_template": iinfo.disk_template,
5424 "hypervisor": iinfo.hypervisor,
5426 instance_data[iinfo.name] = pir
5428 data["instances"] = instance_data
5432 def _AddNewInstance(self):
5433 """Add new instance data to allocator structure.
5435 This in combination with _AllocatorGetClusterData will create the
5436 correct structure needed as input for the allocator.
5438 The checks for the completeness of the opcode must have already been
5443 if len(self.disks) != 2:
5444 raise errors.OpExecError("Only two-disk configurations supported")
5446 disk_space = _ComputeDiskSize(self.disk_template,
5447 self.disks[0]["size"], self.disks[1]["size"])
5449 if self.disk_template in constants.DTS_NET_MIRROR:
5450 self.required_nodes = 2
5452 self.required_nodes = 1
5456 "disk_template": self.disk_template,
5459 "vcpus": self.vcpus,
5460 "memory": self.mem_size,
5461 "disks": self.disks,
5462 "disk_space_total": disk_space,
5464 "required_nodes": self.required_nodes,
5466 data["request"] = request
5468 def _AddRelocateInstance(self):
5469 """Add relocate instance data to allocator structure.
5471 This in combination with _IAllocatorGetClusterData will create the
5472 correct structure needed as input for the allocator.
5474 The checks for the completeness of the opcode must have already been
5478 instance = self.lu.cfg.GetInstanceInfo(self.name)
5479 if instance is None:
5480 raise errors.ProgrammerError("Unknown instance '%s' passed to"
5481 " IAllocator" % self.name)
5483 if instance.disk_template not in constants.DTS_NET_MIRROR:
5484 raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5486 if len(instance.secondary_nodes) != 1:
5487 raise errors.OpPrereqError("Instance has not exactly one secondary node")
5489 self.required_nodes = 1
5491 disk_space = _ComputeDiskSize(instance.disk_template,
5492 instance.disks[0].size,
5493 instance.disks[1].size)
5498 "disk_space_total": disk_space,
5499 "required_nodes": self.required_nodes,
5500 "relocate_from": self.relocate_from,
5502 self.in_data["request"] = request
5504 def _BuildInputData(self):
5505 """Build input data structures.
5508 self._ComputeClusterData()
5510 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5511 self._AddNewInstance()
5513 self._AddRelocateInstance()
5515 self.in_text = serializer.Dump(self.in_data)
5517 def Run(self, name, validate=True, call_fn=None):
5518 """Run an instance allocator and return the results.
5522 call_fn = self.lu.rpc.call_iallocator_runner
5525 result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5527 if not isinstance(result, (list, tuple)) or len(result) != 4:
5528 raise errors.OpExecError("Invalid result from master iallocator runner")
5530 rcode, stdout, stderr, fail = result
5532 if rcode == constants.IARUN_NOTFOUND:
5533 raise errors.OpExecError("Can't find allocator '%s'" % name)
5534 elif rcode == constants.IARUN_FAILURE:
5535 raise errors.OpExecError("Instance allocator call failed: %s,"
5536 " output: %s" % (fail, stdout+stderr))
5537 self.out_text = stdout
5539 self._ValidateResult()
5541 def _ValidateResult(self):
5542 """Process the allocator results.
5544 This will process and if successful save the result in
5545 self.out_data and the other parameters.
5549 rdict = serializer.Load(self.out_text)
5550 except Exception, err:
5551 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5553 if not isinstance(rdict, dict):
5554 raise errors.OpExecError("Can't parse iallocator results: not a dict")
5556 for key in "success", "info", "nodes":
5557 if key not in rdict:
5558 raise errors.OpExecError("Can't parse iallocator results:"
5559 " missing key '%s'" % key)
5560 setattr(self, key, rdict[key])
5562 if not isinstance(rdict["nodes"], list):
5563 raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5565 self.out_data = rdict
5568 class LUTestAllocator(NoHooksLU):
5569 """Run allocator tests.
5571 This LU runs the allocator tests
5574 _OP_REQP = ["direction", "mode", "name"]
5576 def CheckPrereq(self):
5577 """Check prerequisites.
5579 This checks the opcode parameters depending on the director and mode test.
5582 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5583 for attr in ["name", "mem_size", "disks", "disk_template",
5584 "os", "tags", "nics", "vcpus"]:
5585 if not hasattr(self.op, attr):
5586 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5588 iname = self.cfg.ExpandInstanceName(self.op.name)
5589 if iname is not None:
5590 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5592 if not isinstance(self.op.nics, list):
5593 raise errors.OpPrereqError("Invalid parameter 'nics'")
5594 for row in self.op.nics:
5595 if (not isinstance(row, dict) or
5598 "bridge" not in row):
5599 raise errors.OpPrereqError("Invalid contents of the"
5600 " 'nics' parameter")
5601 if not isinstance(self.op.disks, list):
5602 raise errors.OpPrereqError("Invalid parameter 'disks'")
5603 if len(self.op.disks) != 2:
5604 raise errors.OpPrereqError("Only two-disk configurations supported")
5605 for row in self.op.disks:
5606 if (not isinstance(row, dict) or
5607 "size" not in row or
5608 not isinstance(row["size"], int) or
5609 "mode" not in row or
5610 row["mode"] not in ['r', 'w']):
5611 raise errors.OpPrereqError("Invalid contents of the"
5612 " 'disks' parameter")
5613 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5614 if not hasattr(self.op, "name"):
5615 raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5616 fname = self.cfg.ExpandInstanceName(self.op.name)
5618 raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5620 self.op.name = fname
5621 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5623 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5626 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5627 if not hasattr(self.op, "allocator") or self.op.allocator is None:
5628 raise errors.OpPrereqError("Missing allocator name")
5629 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5630 raise errors.OpPrereqError("Wrong allocator test '%s'" %
5633 def Exec(self, feedback_fn):
5634 """Run the allocator test.
5637 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5638 ial = IAllocator(self,
5641 mem_size=self.op.mem_size,
5642 disks=self.op.disks,
5643 disk_template=self.op.disk_template,
5647 vcpus=self.op.vcpus,
5650 ial = IAllocator(self,
5653 relocate_from=list(self.relocate_from),
5656 if self.op.direction == constants.IALLOCATOR_DIR_IN:
5657 result = ial.in_text
5659 ial.Run(self.op.allocator, validate=False)
5660 result = ial.out_text