4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable-msg=W0613,W0201
37 from ganeti import ssh
38 from ganeti import utils
39 from ganeti import errors
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import constants
43 from ganeti import objects
44 from ganeti import opcodes
45 from ganeti import serializer
46 from ganeti import ssconf
49 class LogicalUnit(object):
50 """Logical Unit base class.
52 Subclasses must follow these rules:
53 - implement ExpandNames
54 - implement CheckPrereq
56 - implement BuildHooksEnv
57 - redefine HPATH and HTYPE
58 - optionally redefine their run requirements:
59 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61 Note that all commands require root permissions.
69 def __init__(self, processor, op, context, rpc):
70 """Constructor for LogicalUnit.
72 This needs to be overriden in derived classes in order to check op
78 self.cfg = context.cfg
79 self.context = context
81 # Dicts used to declare locking needs to mcpu
82 self.needed_locks = None
83 self.acquired_locks = {}
84 self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 self.remove_locks = {}
87 # Used to force good behavior when calling helper functions
88 self.recalculate_locks = {}
91 self.LogWarning = processor.LogWarning
92 self.LogInfo = processor.LogInfo
94 for attr_name in self._OP_REQP:
95 attr_val = getattr(op, attr_name, None)
97 raise errors.OpPrereqError("Required parameter '%s' missing" %
102 """Returns the SshRunner object
106 self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
109 ssh = property(fget=__GetSSH)
111 def CheckArguments(self):
112 """Check syntactic validity for the opcode arguments.
114 This method is for doing a simple syntactic check and ensure
115 validity of opcode parameters, without any cluster-related
116 checks. While the same can be accomplished in ExpandNames and/or
117 CheckPrereq, doing these separate is better because:
119 - ExpandNames is left as as purely a lock-related function
120 - CheckPrereq is run after we have aquired locks (and possible
123 The function is allowed to change the self.op attribute so that
124 later methods can no longer worry about missing parameters.
129 def ExpandNames(self):
130 """Expand names for this LU.
132 This method is called before starting to execute the opcode, and it should
133 update all the parameters of the opcode to their canonical form (e.g. a
134 short node name must be fully expanded after this method has successfully
135 completed). This way locking, hooks, logging, ecc. can work correctly.
137 LUs which implement this method must also populate the self.needed_locks
138 member, as a dict with lock levels as keys, and a list of needed lock names
141 - use an empty dict if you don't need any lock
142 - if you don't need any lock at a particular level omit that level
143 - don't put anything for the BGL level
144 - if you want all locks at a level use locking.ALL_SET as a value
146 If you need to share locks (rather than acquire them exclusively) at one
147 level you can modify self.share_locks, setting a true value (usually 1) for
148 that level. By default locks are not shared.
152 # Acquire all nodes and one instance
153 self.needed_locks = {
154 locking.LEVEL_NODE: locking.ALL_SET,
155 locking.LEVEL_INSTANCE: ['instance1.example.tld'],
157 # Acquire just two nodes
158 self.needed_locks = {
159 locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
162 self.needed_locks = {} # No, you can't leave it to the default value None
165 # The implementation of this method is mandatory only if the new LU is
166 # concurrent, so that old LUs don't need to be changed all at the same
169 self.needed_locks = {} # Exclusive LUs don't need locks.
171 raise NotImplementedError
173 def DeclareLocks(self, level):
174 """Declare LU locking needs for a level
176 While most LUs can just declare their locking needs at ExpandNames time,
177 sometimes there's the need to calculate some locks after having acquired
178 the ones before. This function is called just before acquiring locks at a
179 particular level, but after acquiring the ones at lower levels, and permits
180 such calculations. It can be used to modify self.needed_locks, and by
181 default it does nothing.
183 This function is only called if you have something already set in
184 self.needed_locks for the level.
186 @param level: Locking level which is going to be locked
187 @type level: member of ganeti.locking.LEVELS
191 def CheckPrereq(self):
192 """Check prerequisites for this LU.
194 This method should check that the prerequisites for the execution
195 of this LU are fulfilled. It can do internode communication, but
196 it should be idempotent - no cluster or system changes are
199 The method should raise errors.OpPrereqError in case something is
200 not fulfilled. Its return value is ignored.
202 This method should also update all the parameters of the opcode to
203 their canonical form if it hasn't been done by ExpandNames before.
206 raise NotImplementedError
208 def Exec(self, feedback_fn):
211 This method should implement the actual work. It should raise
212 errors.OpExecError for failures that are somewhat dealt with in
216 raise NotImplementedError
218 def BuildHooksEnv(self):
219 """Build hooks environment for this LU.
221 This method should return a three-node tuple consisting of: a dict
222 containing the environment that will be used for running the
223 specific hook for this LU, a list of node names on which the hook
224 should run before the execution, and a list of node names on which
225 the hook should run after the execution.
227 The keys of the dict must not have 'GANETI_' prefixed as this will
228 be handled in the hooks runner. Also note additional keys will be
229 added by the hooks runner. If the LU doesn't define any
230 environment, an empty dict (and not None) should be returned.
232 No nodes should be returned as an empty list (and not None).
234 Note that if the HPATH for a LU class is None, this function will
238 raise NotImplementedError
240 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241 """Notify the LU about the results of its hooks.
243 This method is called every time a hooks phase is executed, and notifies
244 the Logical Unit about the hooks' result. The LU can then use it to alter
245 its result based on the hooks. By default the method does nothing and the
246 previous result is passed back unchanged but any LU can define it if it
247 wants to use the local cluster hook-scripts somehow.
249 @param phase: one of L{constants.HOOKS_PHASE_POST} or
250 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251 @param hook_results: the results of the multi-node hooks rpc call
252 @param feedback_fn: function used send feedback back to the caller
253 @param lu_result: the previous Exec result this LU had, or None
255 @return: the new Exec result, based on the previous result
261 def _ExpandAndLockInstance(self):
262 """Helper function to expand and lock an instance.
264 Many LUs that work on an instance take its name in self.op.instance_name
265 and need to expand it and then declare the expanded name for locking. This
266 function does it, and then updates self.op.instance_name to the expanded
267 name. It also initializes needed_locks as a dict, if this hasn't been done
271 if self.needed_locks is None:
272 self.needed_locks = {}
274 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275 "_ExpandAndLockInstance called with instance-level locks set"
276 expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277 if expanded_name is None:
278 raise errors.OpPrereqError("Instance '%s' not known" %
279 self.op.instance_name)
280 self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281 self.op.instance_name = expanded_name
283 def _LockInstancesNodes(self, primary_only=False):
284 """Helper function to declare instances' nodes for locking.
286 This function should be called after locking one or more instances to lock
287 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288 with all primary or secondary nodes for instances already locked and
289 present in self.needed_locks[locking.LEVEL_INSTANCE].
291 It should be called from DeclareLocks, and for safety only works if
292 self.recalculate_locks[locking.LEVEL_NODE] is set.
294 In the future it may grow parameters to just lock some instance's nodes, or
295 to just lock primaries or secondary nodes, if needed.
297 If should be called in DeclareLocks in a way similar to::
299 if level == locking.LEVEL_NODE:
300 self._LockInstancesNodes()
302 @type primary_only: boolean
303 @param primary_only: only lock primary nodes of locked instances
306 assert locking.LEVEL_NODE in self.recalculate_locks, \
307 "_LockInstancesNodes helper function called with no nodes to recalculate"
309 # TODO: check if we're really been called with the instance locks held
311 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312 # future we might want to have different behaviors depending on the value
313 # of self.recalculate_locks[locking.LEVEL_NODE]
315 for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316 instance = self.context.cfg.GetInstanceInfo(instance_name)
317 wanted_nodes.append(instance.primary_node)
319 wanted_nodes.extend(instance.secondary_nodes)
321 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
326 del self.recalculate_locks[locking.LEVEL_NODE]
329 class NoHooksLU(LogicalUnit):
330 """Simple LU which runs no hooks.
332 This LU is intended as a parent for other LogicalUnits which will
333 run no hooks, in order to reduce duplicate code.
340 def _GetWantedNodes(lu, nodes):
341 """Returns list of checked and expanded node names.
343 @type lu: L{LogicalUnit}
344 @param lu: the logical unit on whose behalf we execute
346 @param nodes: list of node names or None for all nodes
348 @return: the list of nodes, sorted
349 @raise errors.OpProgrammerError: if the nodes parameter is wrong type
352 if not isinstance(nodes, list):
353 raise errors.OpPrereqError("Invalid argument type 'nodes'")
356 raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357 " non-empty list of nodes whose name is to be expanded.")
361 node = lu.cfg.ExpandNodeName(name)
363 raise errors.OpPrereqError("No such node name '%s'" % name)
366 return utils.NiceSort(wanted)
369 def _GetWantedInstances(lu, instances):
370 """Returns list of checked and expanded instance names.
372 @type lu: L{LogicalUnit}
373 @param lu: the logical unit on whose behalf we execute
374 @type instances: list
375 @param instances: list of instance names or None for all instances
377 @return: the list of instances, sorted
378 @raise errors.OpPrereqError: if the instances parameter is wrong type
379 @raise errors.OpPrereqError: if any of the passed instances is not found
382 if not isinstance(instances, list):
383 raise errors.OpPrereqError("Invalid argument type 'instances'")
388 for name in instances:
389 instance = lu.cfg.ExpandInstanceName(name)
391 raise errors.OpPrereqError("No such instance name '%s'" % name)
392 wanted.append(instance)
395 wanted = lu.cfg.GetInstanceList()
396 return utils.NiceSort(wanted)
399 def _CheckOutputFields(static, dynamic, selected):
400 """Checks whether all selected fields are valid.
402 @type static: L{utils.FieldSet}
403 @param static: static fields set
404 @type dynamic: L{utils.FieldSet}
405 @param dynamic: dynamic fields set
412 delta = f.NonMatching(selected)
414 raise errors.OpPrereqError("Unknown output fields selected: %s"
418 def _CheckBooleanOpField(op, name):
419 """Validates boolean opcode parameters.
421 This will ensure that an opcode parameter is either a boolean value,
422 or None (but that it always exists).
425 val = getattr(op, name, None)
426 if not (val is None or isinstance(val, bool)):
427 raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
429 setattr(op, name, val)
432 def _CheckNodeOnline(lu, node):
433 """Ensure that a given node is online.
435 @param lu: the LU on behalf of which we make the check
436 @param node: the node to check
437 @raise errors.OpPrereqError: if the nodes is offline
440 if lu.cfg.GetNodeInfo(node).offline:
441 raise errors.OpPrereqError("Can't use offline node %s" % node)
444 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
445 memory, vcpus, nics):
446 """Builds instance related env variables for hooks
448 This builds the hook environment from individual variables.
451 @param name: the name of the instance
452 @type primary_node: string
453 @param primary_node: the name of the instance's primary node
454 @type secondary_nodes: list
455 @param secondary_nodes: list of secondary nodes as strings
456 @type os_type: string
457 @param os_type: the name of the instance's OS
459 @param status: the desired status of the instances
461 @param memory: the memory size of the instance
463 @param vcpus: the count of VCPUs the instance has
465 @param nics: list of tuples (ip, bridge, mac) representing
466 the NICs the instance has
468 @return: the hook environment for this instance
473 "INSTANCE_NAME": name,
474 "INSTANCE_PRIMARY": primary_node,
475 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
476 "INSTANCE_OS_TYPE": os_type,
477 "INSTANCE_STATUS": status,
478 "INSTANCE_MEMORY": memory,
479 "INSTANCE_VCPUS": vcpus,
483 nic_count = len(nics)
484 for idx, (ip, bridge, mac) in enumerate(nics):
487 env["INSTANCE_NIC%d_IP" % idx] = ip
488 env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
489 env["INSTANCE_NIC%d_HWADDR" % idx] = mac
493 env["INSTANCE_NIC_COUNT"] = nic_count
498 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
499 """Builds instance related env variables for hooks from an object.
501 @type lu: L{LogicalUnit}
502 @param lu: the logical unit on whose behalf we execute
503 @type instance: L{objects.Instance}
504 @param instance: the instance for which we should build the
507 @param override: dictionary with key/values that will override
510 @return: the hook environment dictionary
513 bep = lu.cfg.GetClusterInfo().FillBE(instance)
515 'name': instance.name,
516 'primary_node': instance.primary_node,
517 'secondary_nodes': instance.secondary_nodes,
518 'os_type': instance.os,
519 'status': instance.os,
520 'memory': bep[constants.BE_MEMORY],
521 'vcpus': bep[constants.BE_VCPUS],
522 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
525 args.update(override)
526 return _BuildInstanceHookEnv(**args)
529 def _AdjustCandidatePool(lu):
530 """Adjust the candidate pool after node operations.
533 mod_list = lu.cfg.MaintainCandidatePool()
535 lu.LogInfo("Promoted nodes to master candidate role: %s",
536 ", ".join(node.name for node in mod_list))
537 for name in mod_list:
538 lu.context.ReaddNode(name)
539 mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
541 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
545 def _CheckInstanceBridgesExist(lu, instance):
546 """Check that the brigdes needed by an instance exist.
549 # check bridges existance
550 brlist = [nic.bridge for nic in instance.nics]
551 result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
554 raise errors.OpPrereqError("One or more target bridges %s does not"
555 " exist on destination node '%s'" %
556 (brlist, instance.primary_node))
559 class LUDestroyCluster(NoHooksLU):
560 """Logical unit for destroying the cluster.
565 def CheckPrereq(self):
566 """Check prerequisites.
568 This checks whether the cluster is empty.
570 Any errors are signalled by raising errors.OpPrereqError.
573 master = self.cfg.GetMasterNode()
575 nodelist = self.cfg.GetNodeList()
576 if len(nodelist) != 1 or nodelist[0] != master:
577 raise errors.OpPrereqError("There are still %d node(s) in"
578 " this cluster." % (len(nodelist) - 1))
579 instancelist = self.cfg.GetInstanceList()
581 raise errors.OpPrereqError("There are still %d instance(s) in"
582 " this cluster." % len(instancelist))
584 def Exec(self, feedback_fn):
585 """Destroys the cluster.
588 master = self.cfg.GetMasterNode()
589 result = self.rpc.call_node_stop_master(master, False)
592 raise errors.OpExecError("Could not disable the master role")
593 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
594 utils.CreateBackup(priv_key)
595 utils.CreateBackup(pub_key)
599 class LUVerifyCluster(LogicalUnit):
600 """Verifies the cluster status.
603 HPATH = "cluster-verify"
604 HTYPE = constants.HTYPE_CLUSTER
605 _OP_REQP = ["skip_checks"]
608 def ExpandNames(self):
609 self.needed_locks = {
610 locking.LEVEL_NODE: locking.ALL_SET,
611 locking.LEVEL_INSTANCE: locking.ALL_SET,
613 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
615 def _VerifyNode(self, nodeinfo, file_list, local_cksum,
616 node_result, feedback_fn, master_files):
617 """Run multiple tests against a node.
621 - compares ganeti version
622 - checks vg existance and size > 20G
623 - checks config file checksum
624 - checks ssh to other nodes
626 @type nodeinfo: L{objects.Node}
627 @param nodeinfo: the node to check
628 @param file_list: required list of files
629 @param local_cksum: dictionary of local files and their checksums
630 @param node_result: the results from the node
631 @param feedback_fn: function used to accumulate results
632 @param master_files: list of files that only masters should have
637 # main result, node_result should be a non-empty dict
638 if not node_result or not isinstance(node_result, dict):
639 feedback_fn(" - ERROR: unable to verify node %s." % (node,))
642 # compares ganeti version
643 local_version = constants.PROTOCOL_VERSION
644 remote_version = node_result.get('version', None)
645 if not remote_version:
646 feedback_fn(" - ERROR: connection to %s failed" % (node))
649 if local_version != remote_version:
650 feedback_fn(" - ERROR: sw version mismatch: master %s, node(%s) %s" %
651 (local_version, node, remote_version))
654 # checks vg existance and size > 20G
657 vglist = node_result.get(constants.NV_VGLIST, None)
659 feedback_fn(" - ERROR: unable to check volume groups on node %s." %
663 vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
664 constants.MIN_VG_SIZE)
666 feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
669 # checks config file checksum
671 remote_cksum = node_result.get(constants.NV_FILELIST, None)
672 if not isinstance(remote_cksum, dict):
674 feedback_fn(" - ERROR: node hasn't returned file checksum data")
676 for file_name in file_list:
677 node_is_mc = nodeinfo.master_candidate
678 must_have_file = file_name not in master_files
679 if file_name not in remote_cksum:
680 if node_is_mc or must_have_file:
682 feedback_fn(" - ERROR: file '%s' missing" % file_name)
683 elif remote_cksum[file_name] != local_cksum[file_name]:
684 if node_is_mc or must_have_file:
686 feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
688 # not candidate and this is not a must-have file
690 feedback_fn(" - ERROR: non master-candidate has old/wrong file"
693 # all good, except non-master/non-must have combination
694 if not node_is_mc and not must_have_file:
695 feedback_fn(" - ERROR: file '%s' should not exist on non master"
696 " candidates" % file_name)
700 if constants.NV_NODELIST not in node_result:
702 feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
704 if node_result[constants.NV_NODELIST]:
706 for node in node_result[constants.NV_NODELIST]:
707 feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
708 (node, node_result[constants.NV_NODELIST][node]))
710 if constants.NV_NODENETTEST not in node_result:
712 feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
714 if node_result[constants.NV_NODENETTEST]:
716 nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
718 feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
719 (node, node_result[constants.NV_NODENETTEST][node]))
721 hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
722 if isinstance(hyp_result, dict):
723 for hv_name, hv_result in hyp_result.iteritems():
724 if hv_result is not None:
725 feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
726 (hv_name, hv_result))
729 def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
730 node_instance, feedback_fn, n_offline):
731 """Verify an instance.
733 This function checks to see if the required block devices are
734 available on the instance's node.
739 node_current = instanceconfig.primary_node
742 instanceconfig.MapLVsByNode(node_vol_should)
744 for node in node_vol_should:
745 if node in n_offline:
746 # ignore missing volumes on offline nodes
748 for volume in node_vol_should[node]:
749 if node not in node_vol_is or volume not in node_vol_is[node]:
750 feedback_fn(" - ERROR: volume %s missing on node %s" %
754 if not instanceconfig.status == 'down':
755 if ((node_current not in node_instance or
756 not instance in node_instance[node_current]) and
757 node_current not in n_offline):
758 feedback_fn(" - ERROR: instance %s not running on node %s" %
759 (instance, node_current))
762 for node in node_instance:
763 if (not node == node_current):
764 if instance in node_instance[node]:
765 feedback_fn(" - ERROR: instance %s should not run on node %s" %
771 def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
772 """Verify if there are any unknown volumes in the cluster.
774 The .os, .swap and backup volumes are ignored. All other volumes are
780 for node in node_vol_is:
781 for volume in node_vol_is[node]:
782 if node not in node_vol_should or volume not in node_vol_should[node]:
783 feedback_fn(" - ERROR: volume %s on node %s should not exist" %
788 def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
789 """Verify the list of running instances.
791 This checks what instances are running but unknown to the cluster.
795 for node in node_instance:
796 for runninginstance in node_instance[node]:
797 if runninginstance not in instancelist:
798 feedback_fn(" - ERROR: instance %s on node %s should not exist" %
799 (runninginstance, node))
803 def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
804 """Verify N+1 Memory Resilience.
806 Check that if one single node dies we can still start all the instances it
812 for node, nodeinfo in node_info.iteritems():
813 # This code checks that every node which is now listed as secondary has
814 # enough memory to host all instances it is supposed to should a single
815 # other node in the cluster fail.
816 # FIXME: not ready for failover to an arbitrary node
817 # FIXME: does not support file-backed instances
818 # WARNING: we currently take into account down instances as well as up
819 # ones, considering that even if they're down someone might want to start
820 # them even in the event of a node failure.
821 for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
823 for instance in instances:
824 bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
825 if bep[constants.BE_AUTO_BALANCE]:
826 needed_mem += bep[constants.BE_MEMORY]
827 if nodeinfo['mfree'] < needed_mem:
828 feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
829 " failovers should node %s fail" % (node, prinode))
833 def CheckPrereq(self):
834 """Check prerequisites.
836 Transform the list of checks we're going to skip into a set and check that
837 all its members are valid.
840 self.skip_set = frozenset(self.op.skip_checks)
841 if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
842 raise errors.OpPrereqError("Invalid checks to be skipped specified")
844 def BuildHooksEnv(self):
847 Cluster-Verify hooks just rone in the post phase and their failure makes
848 the output be logged in the verify output and the verification to fail.
851 all_nodes = self.cfg.GetNodeList()
852 # TODO: populate the environment with useful information for verify hooks
854 return env, [], all_nodes
856 def Exec(self, feedback_fn):
857 """Verify integrity of cluster, performing various test on nodes.
861 feedback_fn("* Verifying global settings")
862 for msg in self.cfg.VerifyConfig():
863 feedback_fn(" - ERROR: %s" % msg)
865 vg_name = self.cfg.GetVGName()
866 hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
867 nodelist = utils.NiceSort(self.cfg.GetNodeList())
868 nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
869 instancelist = utils.NiceSort(self.cfg.GetInstanceList())
870 i_non_redundant = [] # Non redundant instances
871 i_non_a_balanced = [] # Non auto-balanced instances
872 n_offline = [] # List of offline nodes
878 # FIXME: verify OS list
880 master_files = [constants.CLUSTER_CONF_FILE]
882 file_names = ssconf.SimpleStore().GetFileList()
883 file_names.append(constants.SSL_CERT_FILE)
884 file_names.append(constants.RAPI_CERT_FILE)
885 file_names.extend(master_files)
887 local_checksums = utils.FingerprintFiles(file_names)
889 feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
890 node_verify_param = {
891 constants.NV_FILELIST: file_names,
892 constants.NV_NODELIST: [node.name for node in nodeinfo
893 if not node.offline],
894 constants.NV_HYPERVISOR: hypervisors,
895 constants.NV_NODENETTEST: [(node.name, node.primary_ip,
896 node.secondary_ip) for node in nodeinfo
897 if not node.offline],
898 constants.NV_LVLIST: vg_name,
899 constants.NV_INSTANCELIST: hypervisors,
900 constants.NV_VGLIST: None,
901 constants.NV_VERSION: None,
902 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
904 all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
905 self.cfg.GetClusterName())
907 cluster = self.cfg.GetClusterInfo()
908 master_node = self.cfg.GetMasterNode()
909 for node_i in nodeinfo:
911 nresult = all_nvinfo[node].data
914 feedback_fn("* Skipping offline node %s" % (node,))
915 n_offline.append(node)
918 if node == master_node:
920 elif node_i.master_candidate:
921 ntype = "master candidate"
924 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
926 if all_nvinfo[node].failed or not isinstance(nresult, dict):
927 feedback_fn(" - ERROR: connection to %s failed" % (node,))
931 result = self._VerifyNode(node_i, file_names, local_checksums,
932 nresult, feedback_fn, master_files)
935 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
936 if isinstance(lvdata, basestring):
937 feedback_fn(" - ERROR: LVM problem on node %s: %s" %
938 (node, lvdata.encode('string_escape')))
940 node_volume[node] = {}
941 elif not isinstance(lvdata, dict):
942 feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,))
946 node_volume[node] = lvdata
949 idata = nresult.get(constants.NV_INSTANCELIST, None)
950 if not isinstance(idata, list):
951 feedback_fn(" - ERROR: connection to %s failed (instancelist)" %
956 node_instance[node] = idata
959 nodeinfo = nresult.get(constants.NV_HVINFO, None)
960 if not isinstance(nodeinfo, dict):
961 feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,))
967 "mfree": int(nodeinfo['memory_free']),
968 "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
971 # dictionary holding all instances this node is secondary for,
972 # grouped by their primary node. Each key is a cluster node, and each
973 # value is a list of instances which have the key as primary and the
974 # current node as secondary. this is handy to calculate N+1 memory
975 # availability if you can only failover from a primary to its
977 "sinst-by-pnode": {},
980 feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
986 for instance in instancelist:
987 feedback_fn("* Verifying instance %s" % instance)
988 inst_config = self.cfg.GetInstanceInfo(instance)
989 result = self._VerifyInstance(instance, inst_config, node_volume,
990 node_instance, feedback_fn, n_offline)
992 inst_nodes_offline = []
994 inst_config.MapLVsByNode(node_vol_should)
996 instance_cfg[instance] = inst_config
998 pnode = inst_config.primary_node
999 if pnode in node_info:
1000 node_info[pnode]['pinst'].append(instance)
1001 elif pnode not in n_offline:
1002 feedback_fn(" - ERROR: instance %s, connection to primary node"
1003 " %s failed" % (instance, pnode))
1006 if pnode in n_offline:
1007 inst_nodes_offline.append(pnode)
1009 # If the instance is non-redundant we cannot survive losing its primary
1010 # node, so we are not N+1 compliant. On the other hand we have no disk
1011 # templates with more than one secondary so that situation is not well
1013 # FIXME: does not support file-backed instances
1014 if len(inst_config.secondary_nodes) == 0:
1015 i_non_redundant.append(instance)
1016 elif len(inst_config.secondary_nodes) > 1:
1017 feedback_fn(" - WARNING: multiple secondaries for instance %s"
1020 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1021 i_non_a_balanced.append(instance)
1023 for snode in inst_config.secondary_nodes:
1024 if snode in node_info:
1025 node_info[snode]['sinst'].append(instance)
1026 if pnode not in node_info[snode]['sinst-by-pnode']:
1027 node_info[snode]['sinst-by-pnode'][pnode] = []
1028 node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1029 elif snode not in n_offline:
1030 feedback_fn(" - ERROR: instance %s, connection to secondary node"
1031 " %s failed" % (instance, snode))
1033 if snode in n_offline:
1034 inst_nodes_offline.append(snode)
1036 if inst_nodes_offline:
1037 # warn that the instance lives on offline nodes, and set bad=True
1038 feedback_fn(" - ERROR: instance lives on offline node(s) %s" %
1039 ", ".join(inst_nodes_offline))
1042 feedback_fn("* Verifying orphan volumes")
1043 result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1047 feedback_fn("* Verifying remaining instances")
1048 result = self._VerifyOrphanInstances(instancelist, node_instance,
1052 if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1053 feedback_fn("* Verifying N+1 Memory redundancy")
1054 result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1057 feedback_fn("* Other Notes")
1059 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
1060 % len(i_non_redundant))
1062 if i_non_a_balanced:
1063 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
1064 % len(i_non_a_balanced))
1067 feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline))
1071 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1072 """Analize the post-hooks' result
1074 This method analyses the hook result, handles it, and sends some
1075 nicely-formatted feedback back to the user.
1077 @param phase: one of L{constants.HOOKS_PHASE_POST} or
1078 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1079 @param hooks_results: the results of the multi-node hooks rpc call
1080 @param feedback_fn: function used send feedback back to the caller
1081 @param lu_result: previous Exec result
1082 @return: the new Exec result, based on the previous result
1086 # We only really run POST phase hooks, and are only interested in
1088 if phase == constants.HOOKS_PHASE_POST:
1089 # Used to change hooks' output to proper indentation
1090 indent_re = re.compile('^', re.M)
1091 feedback_fn("* Hooks Results")
1092 if not hooks_results:
1093 feedback_fn(" - ERROR: general communication failure")
1096 for node_name in hooks_results:
1097 show_node_header = True
1098 res = hooks_results[node_name]
1099 if res.failed or res.data is False or not isinstance(res.data, list):
1101 # no need to warn or set fail return value
1103 feedback_fn(" Communication failure in hooks execution")
1106 for script, hkr, output in res.data:
1107 if hkr == constants.HKR_FAIL:
1108 # The node header is only shown once, if there are
1109 # failing hooks on that node
1110 if show_node_header:
1111 feedback_fn(" Node %s:" % node_name)
1112 show_node_header = False
1113 feedback_fn(" ERROR: Script %s failed, output:" % script)
1114 output = indent_re.sub(' ', output)
1115 feedback_fn("%s" % output)
1121 class LUVerifyDisks(NoHooksLU):
1122 """Verifies the cluster disks status.
1128 def ExpandNames(self):
1129 self.needed_locks = {
1130 locking.LEVEL_NODE: locking.ALL_SET,
1131 locking.LEVEL_INSTANCE: locking.ALL_SET,
1133 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1135 def CheckPrereq(self):
1136 """Check prerequisites.
1138 This has no prerequisites.
1143 def Exec(self, feedback_fn):
1144 """Verify integrity of cluster disks.
1147 result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1149 vg_name = self.cfg.GetVGName()
1150 nodes = utils.NiceSort(self.cfg.GetNodeList())
1151 instances = [self.cfg.GetInstanceInfo(name)
1152 for name in self.cfg.GetInstanceList()]
1155 for inst in instances:
1157 if (inst.status != "up" or
1158 inst.disk_template not in constants.DTS_NET_MIRROR):
1160 inst.MapLVsByNode(inst_lvs)
1161 # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1162 for node, vol_list in inst_lvs.iteritems():
1163 for vol in vol_list:
1164 nv_dict[(node, vol)] = inst
1169 node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1174 lvs = node_lvs[node]
1177 self.LogWarning("Connection to node %s failed: %s" %
1181 if isinstance(lvs, basestring):
1182 logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1183 res_nlvm[node] = lvs
1184 elif not isinstance(lvs, dict):
1185 logging.warning("Connection to node %s failed or invalid data"
1187 res_nodes.append(node)
1190 for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1191 inst = nv_dict.pop((node, lv_name), None)
1192 if (not lv_online and inst is not None
1193 and inst.name not in res_instances):
1194 res_instances.append(inst.name)
1196 # any leftover items in nv_dict are missing LVs, let's arrange the
1198 for key, inst in nv_dict.iteritems():
1199 if inst.name not in res_missing:
1200 res_missing[inst.name] = []
1201 res_missing[inst.name].append(key)
1206 class LURenameCluster(LogicalUnit):
1207 """Rename the cluster.
1210 HPATH = "cluster-rename"
1211 HTYPE = constants.HTYPE_CLUSTER
1214 def BuildHooksEnv(self):
1219 "OP_TARGET": self.cfg.GetClusterName(),
1220 "NEW_NAME": self.op.name,
1222 mn = self.cfg.GetMasterNode()
1223 return env, [mn], [mn]
1225 def CheckPrereq(self):
1226 """Verify that the passed name is a valid one.
1229 hostname = utils.HostInfo(self.op.name)
1231 new_name = hostname.name
1232 self.ip = new_ip = hostname.ip
1233 old_name = self.cfg.GetClusterName()
1234 old_ip = self.cfg.GetMasterIP()
1235 if new_name == old_name and new_ip == old_ip:
1236 raise errors.OpPrereqError("Neither the name nor the IP address of the"
1237 " cluster has changed")
1238 if new_ip != old_ip:
1239 if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1240 raise errors.OpPrereqError("The given cluster IP address (%s) is"
1241 " reachable on the network. Aborting." %
1244 self.op.name = new_name
1246 def Exec(self, feedback_fn):
1247 """Rename the cluster.
1250 clustername = self.op.name
1253 # shutdown the master IP
1254 master = self.cfg.GetMasterNode()
1255 result = self.rpc.call_node_stop_master(master, False)
1256 if result.failed or not result.data:
1257 raise errors.OpExecError("Could not disable the master role")
1260 cluster = self.cfg.GetClusterInfo()
1261 cluster.cluster_name = clustername
1262 cluster.master_ip = ip
1263 self.cfg.Update(cluster)
1265 # update the known hosts file
1266 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1267 node_list = self.cfg.GetNodeList()
1269 node_list.remove(master)
1272 result = self.rpc.call_upload_file(node_list,
1273 constants.SSH_KNOWN_HOSTS_FILE)
1274 for to_node, to_result in result.iteritems():
1275 if to_result.failed or not to_result.data:
1276 logging.error("Copy of file %s to node %s failed",
1277 constants.SSH_KNOWN_HOSTS_FILE, to_node)
1280 result = self.rpc.call_node_start_master(master, False)
1281 if result.failed or not result.data:
1282 self.LogWarning("Could not re-enable the master role on"
1283 " the master, please restart manually.")
1286 def _RecursiveCheckIfLVMBased(disk):
1287 """Check if the given disk or its children are lvm-based.
1289 @type disk: L{objects.Disk}
1290 @param disk: the disk to check
1292 @return: boolean indicating whether a LD_LV dev_type was found or not
1296 for chdisk in disk.children:
1297 if _RecursiveCheckIfLVMBased(chdisk):
1299 return disk.dev_type == constants.LD_LV
1302 class LUSetClusterParams(LogicalUnit):
1303 """Change the parameters of the cluster.
1306 HPATH = "cluster-modify"
1307 HTYPE = constants.HTYPE_CLUSTER
1311 def CheckParameters(self):
1315 if not hasattr(self.op, "candidate_pool_size"):
1316 self.op.candidate_pool_size = None
1317 if self.op.candidate_pool_size is not None:
1319 self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1320 except ValueError, err:
1321 raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1323 if self.op.candidate_pool_size < 1:
1324 raise errors.OpPrereqError("At least one master candidate needed")
1326 def ExpandNames(self):
1327 # FIXME: in the future maybe other cluster params won't require checking on
1328 # all nodes to be modified.
1329 self.needed_locks = {
1330 locking.LEVEL_NODE: locking.ALL_SET,
1332 self.share_locks[locking.LEVEL_NODE] = 1
1334 def BuildHooksEnv(self):
1339 "OP_TARGET": self.cfg.GetClusterName(),
1340 "NEW_VG_NAME": self.op.vg_name,
1342 mn = self.cfg.GetMasterNode()
1343 return env, [mn], [mn]
1345 def CheckPrereq(self):
1346 """Check prerequisites.
1348 This checks whether the given params don't conflict and
1349 if the given volume group is valid.
1352 # FIXME: This only works because there is only one parameter that can be
1353 # changed or removed.
1354 if self.op.vg_name is not None and not self.op.vg_name:
1355 instances = self.cfg.GetAllInstancesInfo().values()
1356 for inst in instances:
1357 for disk in inst.disks:
1358 if _RecursiveCheckIfLVMBased(disk):
1359 raise errors.OpPrereqError("Cannot disable lvm storage while"
1360 " lvm-based instances exist")
1362 node_list = self.acquired_locks[locking.LEVEL_NODE]
1364 # if vg_name not None, checks given volume group on all nodes
1366 vglist = self.rpc.call_vg_list(node_list)
1367 for node in node_list:
1368 if vglist[node].failed:
1369 # ignoring down node
1370 self.LogWarning("Node %s unreachable/error, ignoring" % node)
1372 vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1374 constants.MIN_VG_SIZE)
1376 raise errors.OpPrereqError("Error on node '%s': %s" %
1379 self.cluster = cluster = self.cfg.GetClusterInfo()
1380 # validate beparams changes
1381 if self.op.beparams:
1382 utils.CheckBEParams(self.op.beparams)
1383 self.new_beparams = cluster.FillDict(
1384 cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1386 # hypervisor list/parameters
1387 self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1388 if self.op.hvparams:
1389 if not isinstance(self.op.hvparams, dict):
1390 raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1391 for hv_name, hv_dict in self.op.hvparams.items():
1392 if hv_name not in self.new_hvparams:
1393 self.new_hvparams[hv_name] = hv_dict
1395 self.new_hvparams[hv_name].update(hv_dict)
1397 if self.op.enabled_hypervisors is not None:
1398 self.hv_list = self.op.enabled_hypervisors
1400 self.hv_list = cluster.enabled_hypervisors
1402 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1403 # either the enabled list has changed, or the parameters have, validate
1404 for hv_name, hv_params in self.new_hvparams.items():
1405 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1406 (self.op.enabled_hypervisors and
1407 hv_name in self.op.enabled_hypervisors)):
1408 # either this is a new hypervisor, or its parameters have changed
1409 hv_class = hypervisor.GetHypervisor(hv_name)
1410 hv_class.CheckParameterSyntax(hv_params)
1411 _CheckHVParams(self, node_list, hv_name, hv_params)
1413 def Exec(self, feedback_fn):
1414 """Change the parameters of the cluster.
1417 if self.op.vg_name is not None:
1418 if self.op.vg_name != self.cfg.GetVGName():
1419 self.cfg.SetVGName(self.op.vg_name)
1421 feedback_fn("Cluster LVM configuration already in desired"
1422 " state, not changing")
1423 if self.op.hvparams:
1424 self.cluster.hvparams = self.new_hvparams
1425 if self.op.enabled_hypervisors is not None:
1426 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1427 if self.op.beparams:
1428 self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1429 if self.op.candidate_pool_size is not None:
1430 self.cluster.candidate_pool_size = self.op.candidate_pool_size
1432 self.cfg.Update(self.cluster)
1434 # we want to update nodes after the cluster so that if any errors
1435 # happen, we have recorded and saved the cluster info
1436 if self.op.candidate_pool_size is not None:
1437 _AdjustCandidatePool(self)
1440 class LURedistributeConfig(NoHooksLU):
1441 """Force the redistribution of cluster configuration.
1443 This is a very simple LU.
1449 def ExpandNames(self):
1450 self.needed_locks = {
1451 locking.LEVEL_NODE: locking.ALL_SET,
1453 self.share_locks[locking.LEVEL_NODE] = 1
1455 def CheckPrereq(self):
1456 """Check prerequisites.
1460 def Exec(self, feedback_fn):
1461 """Redistribute the configuration.
1464 self.cfg.Update(self.cfg.GetClusterInfo())
1467 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1468 """Sleep and poll for an instance's disk to sync.
1471 if not instance.disks:
1475 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1477 node = instance.primary_node
1479 for dev in instance.disks:
1480 lu.cfg.SetDiskID(dev, node)
1486 cumul_degraded = False
1487 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1488 if rstats.failed or not rstats.data:
1489 lu.LogWarning("Can't get any data from node %s", node)
1492 raise errors.RemoteError("Can't contact node %s for mirror data,"
1493 " aborting." % node)
1496 rstats = rstats.data
1498 for i in range(len(rstats)):
1501 lu.LogWarning("Can't compute data for node %s/%s",
1502 node, instance.disks[i].iv_name)
1504 # we ignore the ldisk parameter
1505 perc_done, est_time, is_degraded, _ = mstat
1506 cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1507 if perc_done is not None:
1509 if est_time is not None:
1510 rem_time = "%d estimated seconds remaining" % est_time
1513 rem_time = "no time estimate"
1514 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1515 (instance.disks[i].iv_name, perc_done, rem_time))
1519 time.sleep(min(60, max_time))
1522 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1523 return not cumul_degraded
1526 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1527 """Check that mirrors are not degraded.
1529 The ldisk parameter, if True, will change the test from the
1530 is_degraded attribute (which represents overall non-ok status for
1531 the device(s)) to the ldisk (representing the local storage status).
1534 lu.cfg.SetDiskID(dev, node)
1541 if on_primary or dev.AssembleOnSecondary():
1542 rstats = lu.rpc.call_blockdev_find(node, dev)
1543 if rstats.failed or not rstats.data:
1544 logging.warning("Node %s: disk degraded, not found or node down", node)
1547 result = result and (not rstats.data[idx])
1549 for child in dev.children:
1550 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1555 class LUDiagnoseOS(NoHooksLU):
1556 """Logical unit for OS diagnose/query.
1559 _OP_REQP = ["output_fields", "names"]
1561 _FIELDS_STATIC = utils.FieldSet()
1562 _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1564 def ExpandNames(self):
1566 raise errors.OpPrereqError("Selective OS query not supported")
1568 _CheckOutputFields(static=self._FIELDS_STATIC,
1569 dynamic=self._FIELDS_DYNAMIC,
1570 selected=self.op.output_fields)
1572 # Lock all nodes, in shared mode
1573 self.needed_locks = {}
1574 self.share_locks[locking.LEVEL_NODE] = 1
1575 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1577 def CheckPrereq(self):
1578 """Check prerequisites.
1583 def _DiagnoseByOS(node_list, rlist):
1584 """Remaps a per-node return list into an a per-os per-node dictionary
1586 @param node_list: a list with the names of all nodes
1587 @param rlist: a map with node names as keys and OS objects as values
1590 @returns: a dictionary with osnames as keys and as value another map, with
1591 nodes as keys and list of OS objects as values, eg::
1593 {"debian-etch": {"node1": [<object>,...],
1594 "node2": [<object>,]}
1599 for node_name, nr in rlist.iteritems():
1600 if nr.failed or not nr.data:
1602 for os_obj in nr.data:
1603 if os_obj.name not in all_os:
1604 # build a list of nodes for this os containing empty lists
1605 # for each node in node_list
1606 all_os[os_obj.name] = {}
1607 for nname in node_list:
1608 all_os[os_obj.name][nname] = []
1609 all_os[os_obj.name][node_name].append(os_obj)
1612 def Exec(self, feedback_fn):
1613 """Compute the list of OSes.
1616 node_list = self.acquired_locks[locking.LEVEL_NODE]
1617 valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1618 if node in node_list]
1619 node_data = self.rpc.call_os_diagnose(valid_nodes)
1620 if node_data == False:
1621 raise errors.OpExecError("Can't gather the list of OSes")
1622 pol = self._DiagnoseByOS(valid_nodes, node_data)
1624 for os_name, os_data in pol.iteritems():
1626 for field in self.op.output_fields:
1629 elif field == "valid":
1630 val = utils.all([osl and osl[0] for osl in os_data.values()])
1631 elif field == "node_status":
1633 for node_name, nos_list in os_data.iteritems():
1634 val[node_name] = [(v.status, v.path) for v in nos_list]
1636 raise errors.ParameterError(field)
1643 class LURemoveNode(LogicalUnit):
1644 """Logical unit for removing a node.
1647 HPATH = "node-remove"
1648 HTYPE = constants.HTYPE_NODE
1649 _OP_REQP = ["node_name"]
1651 def BuildHooksEnv(self):
1654 This doesn't run on the target node in the pre phase as a failed
1655 node would then be impossible to remove.
1659 "OP_TARGET": self.op.node_name,
1660 "NODE_NAME": self.op.node_name,
1662 all_nodes = self.cfg.GetNodeList()
1663 all_nodes.remove(self.op.node_name)
1664 return env, all_nodes, all_nodes
1666 def CheckPrereq(self):
1667 """Check prerequisites.
1670 - the node exists in the configuration
1671 - it does not have primary or secondary instances
1672 - it's not the master
1674 Any errors are signalled by raising errors.OpPrereqError.
1677 node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1679 raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1681 instance_list = self.cfg.GetInstanceList()
1683 masternode = self.cfg.GetMasterNode()
1684 if node.name == masternode:
1685 raise errors.OpPrereqError("Node is the master node,"
1686 " you need to failover first.")
1688 for instance_name in instance_list:
1689 instance = self.cfg.GetInstanceInfo(instance_name)
1690 if node.name == instance.primary_node:
1691 raise errors.OpPrereqError("Instance %s still running on the node,"
1692 " please remove first." % instance_name)
1693 if node.name in instance.secondary_nodes:
1694 raise errors.OpPrereqError("Instance %s has node as a secondary,"
1695 " please remove first." % instance_name)
1696 self.op.node_name = node.name
1699 def Exec(self, feedback_fn):
1700 """Removes the node from the cluster.
1704 logging.info("Stopping the node daemon and removing configs from node %s",
1707 self.context.RemoveNode(node.name)
1709 self.rpc.call_node_leave_cluster(node.name)
1711 # Promote nodes to master candidate as needed
1712 _AdjustCandidatePool(self)
1715 class LUQueryNodes(NoHooksLU):
1716 """Logical unit for querying nodes.
1719 _OP_REQP = ["output_fields", "names"]
1721 _FIELDS_DYNAMIC = utils.FieldSet(
1723 "mtotal", "mnode", "mfree",
1728 _FIELDS_STATIC = utils.FieldSet(
1729 "name", "pinst_cnt", "sinst_cnt",
1730 "pinst_list", "sinst_list",
1731 "pip", "sip", "tags",
1738 def ExpandNames(self):
1739 _CheckOutputFields(static=self._FIELDS_STATIC,
1740 dynamic=self._FIELDS_DYNAMIC,
1741 selected=self.op.output_fields)
1743 self.needed_locks = {}
1744 self.share_locks[locking.LEVEL_NODE] = 1
1747 self.wanted = _GetWantedNodes(self, self.op.names)
1749 self.wanted = locking.ALL_SET
1751 self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1753 # if we don't request only static fields, we need to lock the nodes
1754 self.needed_locks[locking.LEVEL_NODE] = self.wanted
1757 def CheckPrereq(self):
1758 """Check prerequisites.
1761 # The validation of the node list is done in the _GetWantedNodes,
1762 # if non empty, and if empty, there's no validation to do
1765 def Exec(self, feedback_fn):
1766 """Computes the list of nodes and their attributes.
1769 all_info = self.cfg.GetAllNodesInfo()
1771 nodenames = self.acquired_locks[locking.LEVEL_NODE]
1772 elif self.wanted != locking.ALL_SET:
1773 nodenames = self.wanted
1774 missing = set(nodenames).difference(all_info.keys())
1776 raise errors.OpExecError(
1777 "Some nodes were removed before retrieving their data: %s" % missing)
1779 nodenames = all_info.keys()
1781 nodenames = utils.NiceSort(nodenames)
1782 nodelist = [all_info[name] for name in nodenames]
1784 # begin data gathering
1788 node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1789 self.cfg.GetHypervisorType())
1790 for name in nodenames:
1791 nodeinfo = node_data[name]
1792 if not nodeinfo.failed and nodeinfo.data:
1793 nodeinfo = nodeinfo.data
1794 fn = utils.TryConvert
1796 "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1797 "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1798 "mfree": fn(int, nodeinfo.get('memory_free', None)),
1799 "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1800 "dfree": fn(int, nodeinfo.get('vg_free', None)),
1801 "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1802 "bootid": nodeinfo.get('bootid', None),
1805 live_data[name] = {}
1807 live_data = dict.fromkeys(nodenames, {})
1809 node_to_primary = dict([(name, set()) for name in nodenames])
1810 node_to_secondary = dict([(name, set()) for name in nodenames])
1812 inst_fields = frozenset(("pinst_cnt", "pinst_list",
1813 "sinst_cnt", "sinst_list"))
1814 if inst_fields & frozenset(self.op.output_fields):
1815 instancelist = self.cfg.GetInstanceList()
1817 for instance_name in instancelist:
1818 inst = self.cfg.GetInstanceInfo(instance_name)
1819 if inst.primary_node in node_to_primary:
1820 node_to_primary[inst.primary_node].add(inst.name)
1821 for secnode in inst.secondary_nodes:
1822 if secnode in node_to_secondary:
1823 node_to_secondary[secnode].add(inst.name)
1825 master_node = self.cfg.GetMasterNode()
1827 # end data gathering
1830 for node in nodelist:
1832 for field in self.op.output_fields:
1835 elif field == "pinst_list":
1836 val = list(node_to_primary[node.name])
1837 elif field == "sinst_list":
1838 val = list(node_to_secondary[node.name])
1839 elif field == "pinst_cnt":
1840 val = len(node_to_primary[node.name])
1841 elif field == "sinst_cnt":
1842 val = len(node_to_secondary[node.name])
1843 elif field == "pip":
1844 val = node.primary_ip
1845 elif field == "sip":
1846 val = node.secondary_ip
1847 elif field == "tags":
1848 val = list(node.GetTags())
1849 elif field == "serial_no":
1850 val = node.serial_no
1851 elif field == "master_candidate":
1852 val = node.master_candidate
1853 elif field == "master":
1854 val = node.name == master_node
1855 elif field == "offline":
1857 elif self._FIELDS_DYNAMIC.Matches(field):
1858 val = live_data[node.name].get(field, None)
1860 raise errors.ParameterError(field)
1861 node_output.append(val)
1862 output.append(node_output)
1867 class LUQueryNodeVolumes(NoHooksLU):
1868 """Logical unit for getting volumes on node(s).
1871 _OP_REQP = ["nodes", "output_fields"]
1873 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1874 _FIELDS_STATIC = utils.FieldSet("node")
1876 def ExpandNames(self):
1877 _CheckOutputFields(static=self._FIELDS_STATIC,
1878 dynamic=self._FIELDS_DYNAMIC,
1879 selected=self.op.output_fields)
1881 self.needed_locks = {}
1882 self.share_locks[locking.LEVEL_NODE] = 1
1883 if not self.op.nodes:
1884 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1886 self.needed_locks[locking.LEVEL_NODE] = \
1887 _GetWantedNodes(self, self.op.nodes)
1889 def CheckPrereq(self):
1890 """Check prerequisites.
1892 This checks that the fields required are valid output fields.
1895 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1897 def Exec(self, feedback_fn):
1898 """Computes the list of nodes and their attributes.
1901 nodenames = self.nodes
1902 volumes = self.rpc.call_node_volumes(nodenames)
1904 ilist = [self.cfg.GetInstanceInfo(iname) for iname
1905 in self.cfg.GetInstanceList()]
1907 lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1910 for node in nodenames:
1911 if node not in volumes or volumes[node].failed or not volumes[node].data:
1914 node_vols = volumes[node].data[:]
1915 node_vols.sort(key=lambda vol: vol['dev'])
1917 for vol in node_vols:
1919 for field in self.op.output_fields:
1922 elif field == "phys":
1926 elif field == "name":
1928 elif field == "size":
1929 val = int(float(vol['size']))
1930 elif field == "instance":
1932 if node not in lv_by_node[inst]:
1934 if vol['name'] in lv_by_node[inst][node]:
1940 raise errors.ParameterError(field)
1941 node_output.append(str(val))
1943 output.append(node_output)
1948 class LUAddNode(LogicalUnit):
1949 """Logical unit for adding node to the cluster.
1953 HTYPE = constants.HTYPE_NODE
1954 _OP_REQP = ["node_name"]
1956 def BuildHooksEnv(self):
1959 This will run on all nodes before, and on all nodes + the new node after.
1963 "OP_TARGET": self.op.node_name,
1964 "NODE_NAME": self.op.node_name,
1965 "NODE_PIP": self.op.primary_ip,
1966 "NODE_SIP": self.op.secondary_ip,
1968 nodes_0 = self.cfg.GetNodeList()
1969 nodes_1 = nodes_0 + [self.op.node_name, ]
1970 return env, nodes_0, nodes_1
1972 def CheckPrereq(self):
1973 """Check prerequisites.
1976 - the new node is not already in the config
1978 - its parameters (single/dual homed) matches the cluster
1980 Any errors are signalled by raising errors.OpPrereqError.
1983 node_name = self.op.node_name
1986 dns_data = utils.HostInfo(node_name)
1988 node = dns_data.name
1989 primary_ip = self.op.primary_ip = dns_data.ip
1990 secondary_ip = getattr(self.op, "secondary_ip", None)
1991 if secondary_ip is None:
1992 secondary_ip = primary_ip
1993 if not utils.IsValidIP(secondary_ip):
1994 raise errors.OpPrereqError("Invalid secondary IP given")
1995 self.op.secondary_ip = secondary_ip
1997 node_list = cfg.GetNodeList()
1998 if not self.op.readd and node in node_list:
1999 raise errors.OpPrereqError("Node %s is already in the configuration" %
2001 elif self.op.readd and node not in node_list:
2002 raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2004 for existing_node_name in node_list:
2005 existing_node = cfg.GetNodeInfo(existing_node_name)
2007 if self.op.readd and node == existing_node_name:
2008 if (existing_node.primary_ip != primary_ip or
2009 existing_node.secondary_ip != secondary_ip):
2010 raise errors.OpPrereqError("Readded node doesn't have the same IP"
2011 " address configuration as before")
2014 if (existing_node.primary_ip == primary_ip or
2015 existing_node.secondary_ip == primary_ip or
2016 existing_node.primary_ip == secondary_ip or
2017 existing_node.secondary_ip == secondary_ip):
2018 raise errors.OpPrereqError("New node ip address(es) conflict with"
2019 " existing node %s" % existing_node.name)
2021 # check that the type of the node (single versus dual homed) is the
2022 # same as for the master
2023 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2024 master_singlehomed = myself.secondary_ip == myself.primary_ip
2025 newbie_singlehomed = secondary_ip == primary_ip
2026 if master_singlehomed != newbie_singlehomed:
2027 if master_singlehomed:
2028 raise errors.OpPrereqError("The master has no private ip but the"
2029 " new node has one")
2031 raise errors.OpPrereqError("The master has a private ip but the"
2032 " new node doesn't have one")
2034 # checks reachablity
2035 if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2036 raise errors.OpPrereqError("Node not reachable by ping")
2038 if not newbie_singlehomed:
2039 # check reachability from my secondary ip to newbie's secondary ip
2040 if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2041 source=myself.secondary_ip):
2042 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2043 " based ping to noded port")
2045 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2046 mc_now, _ = self.cfg.GetMasterCandidateStats()
2047 master_candidate = mc_now < cp_size
2049 self.new_node = objects.Node(name=node,
2050 primary_ip=primary_ip,
2051 secondary_ip=secondary_ip,
2052 master_candidate=master_candidate,
2055 def Exec(self, feedback_fn):
2056 """Adds the new node to the cluster.
2059 new_node = self.new_node
2060 node = new_node.name
2062 # check connectivity
2063 result = self.rpc.call_version([node])[node]
2066 if constants.PROTOCOL_VERSION == result.data:
2067 logging.info("Communication to node %s fine, sw version %s match",
2070 raise errors.OpExecError("Version mismatch master version %s,"
2071 " node version %s" %
2072 (constants.PROTOCOL_VERSION, result.data))
2074 raise errors.OpExecError("Cannot get version from the new node")
2077 logging.info("Copy ssh key to node %s", node)
2078 priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2080 keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2081 constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2087 keyarray.append(f.read())
2091 result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2093 keyarray[3], keyarray[4], keyarray[5])
2095 if result.failed or not result.data:
2096 raise errors.OpExecError("Cannot transfer ssh keys to the new node")
2098 # Add node to our /etc/hosts, and add key to known_hosts
2099 utils.AddHostToEtcHosts(new_node.name)
2101 if new_node.secondary_ip != new_node.primary_ip:
2102 result = self.rpc.call_node_has_ip_address(new_node.name,
2103 new_node.secondary_ip)
2104 if result.failed or not result.data:
2105 raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2106 " you gave (%s). Please fix and re-run this"
2107 " command." % new_node.secondary_ip)
2109 node_verify_list = [self.cfg.GetMasterNode()]
2110 node_verify_param = {
2112 # TODO: do a node-net-test as well?
2115 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2116 self.cfg.GetClusterName())
2117 for verifier in node_verify_list:
2118 if result[verifier].failed or not result[verifier].data:
2119 raise errors.OpExecError("Cannot communicate with %s's node daemon"
2120 " for remote verification" % verifier)
2121 if result[verifier].data['nodelist']:
2122 for failed in result[verifier].data['nodelist']:
2123 feedback_fn("ssh/hostname verification failed %s -> %s" %
2124 (verifier, result[verifier]['nodelist'][failed]))
2125 raise errors.OpExecError("ssh/hostname verification failed.")
2127 # Distribute updated /etc/hosts and known_hosts to all nodes,
2128 # including the node just added
2129 myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2130 dist_nodes = self.cfg.GetNodeList()
2131 if not self.op.readd:
2132 dist_nodes.append(node)
2133 if myself.name in dist_nodes:
2134 dist_nodes.remove(myself.name)
2136 logging.debug("Copying hosts and known_hosts to all nodes")
2137 for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2138 result = self.rpc.call_upload_file(dist_nodes, fname)
2139 for to_node, to_result in result.iteritems():
2140 if to_result.failed or not to_result.data:
2141 logging.error("Copy of file %s to node %s failed", fname, to_node)
2144 if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
2145 to_copy.append(constants.VNC_PASSWORD_FILE)
2146 for fname in to_copy:
2147 result = self.rpc.call_upload_file([node], fname)
2148 if result[node].failed or not result[node]:
2149 logging.error("Could not copy file %s to node %s", fname, node)
2152 self.context.ReaddNode(new_node)
2154 self.context.AddNode(new_node)
2157 class LUSetNodeParams(LogicalUnit):
2158 """Modifies the parameters of a node.
2161 HPATH = "node-modify"
2162 HTYPE = constants.HTYPE_NODE
2163 _OP_REQP = ["node_name"]
2166 def CheckArguments(self):
2167 node_name = self.cfg.ExpandNodeName(self.op.node_name)
2168 if node_name is None:
2169 raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2170 self.op.node_name = node_name
2171 _CheckBooleanOpField(self.op, 'master_candidate')
2172 _CheckBooleanOpField(self.op, 'offline')
2173 if self.op.master_candidate is None and self.op.offline is None:
2174 raise errors.OpPrereqError("Please pass at least one modification")
2175 if self.op.offline == True and self.op.master_candidate == True:
2176 raise errors.OpPrereqError("Can't set the node into offline and"
2177 " master_candidate at the same time")
2179 def ExpandNames(self):
2180 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2182 def BuildHooksEnv(self):
2185 This runs on the master node.
2189 "OP_TARGET": self.op.node_name,
2190 "MASTER_CANDIDATE": str(self.op.master_candidate),
2191 "OFFLINE": str(self.op.offline),
2193 nl = [self.cfg.GetMasterNode(),
2197 def CheckPrereq(self):
2198 """Check prerequisites.
2200 This only checks the instance list against the existing names.
2203 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2205 if ((self.op.master_candidate == False or self.op.offline == True)
2206 and node.master_candidate):
2207 # we will demote the node from master_candidate
2208 if self.op.node_name == self.cfg.GetMasterNode():
2209 raise errors.OpPrereqError("The master node has to be a"
2210 " master candidate and online")
2211 cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2212 num_candidates, _ = self.cfg.GetMasterCandidateStats()
2213 if num_candidates <= cp_size:
2214 msg = ("Not enough master candidates (desired"
2215 " %d, new value will be %d)" % (cp_size, num_candidates-1))
2217 self.LogWarning(msg)
2219 raise errors.OpPrereqError(msg)
2221 if (self.op.master_candidate == True and node.offline and
2222 not self.op.offline == False):
2223 raise errors.OpPrereqError("Can't set an offline node to"
2224 " master_candidate")
2228 def Exec(self, feedback_fn):
2236 if self.op.offline is not None:
2237 node.offline = self.op.offline
2238 result.append(("offline", str(self.op.offline)))
2239 if self.op.offline == True and node.master_candidate:
2240 node.master_candidate = False
2241 result.append(("master_candidate", "auto-demotion due to offline"))
2243 if self.op.master_candidate is not None:
2244 node.master_candidate = self.op.master_candidate
2245 result.append(("master_candidate", str(self.op.master_candidate)))
2246 if self.op.master_candidate == False:
2247 rrc = self.rpc.call_node_demote_from_mc(node.name)
2248 if (rrc.failed or not isinstance(rrc.data, (tuple, list))
2249 or len(rrc.data) != 2):
2250 self.LogWarning("Node rpc error: %s" % rrc.error)
2251 elif not rrc.data[0]:
2252 self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
2254 # this will trigger configuration file update, if needed
2255 self.cfg.Update(node)
2256 # this will trigger job queue propagation or cleanup
2257 if self.op.node_name != self.cfg.GetMasterNode():
2258 self.context.ReaddNode(node)
2263 class LUQueryClusterInfo(NoHooksLU):
2264 """Query cluster configuration.
2270 def ExpandNames(self):
2271 self.needed_locks = {}
2273 def CheckPrereq(self):
2274 """No prerequsites needed for this LU.
2279 def Exec(self, feedback_fn):
2280 """Return cluster config.
2283 cluster = self.cfg.GetClusterInfo()
2285 "software_version": constants.RELEASE_VERSION,
2286 "protocol_version": constants.PROTOCOL_VERSION,
2287 "config_version": constants.CONFIG_VERSION,
2288 "os_api_version": constants.OS_API_VERSION,
2289 "export_version": constants.EXPORT_VERSION,
2290 "architecture": (platform.architecture()[0], platform.machine()),
2291 "name": cluster.cluster_name,
2292 "master": cluster.master_node,
2293 "default_hypervisor": cluster.default_hypervisor,
2294 "enabled_hypervisors": cluster.enabled_hypervisors,
2295 "hvparams": cluster.hvparams,
2296 "beparams": cluster.beparams,
2297 "candidate_pool_size": cluster.candidate_pool_size,
2303 class LUQueryConfigValues(NoHooksLU):
2304 """Return configuration values.
2309 _FIELDS_DYNAMIC = utils.FieldSet()
2310 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2312 def ExpandNames(self):
2313 self.needed_locks = {}
2315 _CheckOutputFields(static=self._FIELDS_STATIC,
2316 dynamic=self._FIELDS_DYNAMIC,
2317 selected=self.op.output_fields)
2319 def CheckPrereq(self):
2320 """No prerequisites.
2325 def Exec(self, feedback_fn):
2326 """Dump a representation of the cluster config to the standard output.
2330 for field in self.op.output_fields:
2331 if field == "cluster_name":
2332 entry = self.cfg.GetClusterName()
2333 elif field == "master_node":
2334 entry = self.cfg.GetMasterNode()
2335 elif field == "drain_flag":
2336 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2338 raise errors.ParameterError(field)
2339 values.append(entry)
2343 class LUActivateInstanceDisks(NoHooksLU):
2344 """Bring up an instance's disks.
2347 _OP_REQP = ["instance_name"]
2350 def ExpandNames(self):
2351 self._ExpandAndLockInstance()
2352 self.needed_locks[locking.LEVEL_NODE] = []
2353 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2355 def DeclareLocks(self, level):
2356 if level == locking.LEVEL_NODE:
2357 self._LockInstancesNodes()
2359 def CheckPrereq(self):
2360 """Check prerequisites.
2362 This checks that the instance is in the cluster.
2365 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2366 assert self.instance is not None, \
2367 "Cannot retrieve locked instance %s" % self.op.instance_name
2368 _CheckNodeOnline(self, self.instance.primary_node)
2370 def Exec(self, feedback_fn):
2371 """Activate the disks.
2374 disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2376 raise errors.OpExecError("Cannot activate block devices")
2381 def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2382 """Prepare the block devices for an instance.
2384 This sets up the block devices on all nodes.
2386 @type lu: L{LogicalUnit}
2387 @param lu: the logical unit on whose behalf we execute
2388 @type instance: L{objects.Instance}
2389 @param instance: the instance for whose disks we assemble
2390 @type ignore_secondaries: boolean
2391 @param ignore_secondaries: if true, errors on secondary nodes
2392 won't result in an error return from the function
2393 @return: False if the operation failed, otherwise a list of
2394 (host, instance_visible_name, node_visible_name)
2395 with the mapping from node devices to instance devices
2400 iname = instance.name
2401 # With the two passes mechanism we try to reduce the window of
2402 # opportunity for the race condition of switching DRBD to primary
2403 # before handshaking occured, but we do not eliminate it
2405 # The proper fix would be to wait (with some limits) until the
2406 # connection has been made and drbd transitions from WFConnection
2407 # into any other network-connected state (Connected, SyncTarget,
2410 # 1st pass, assemble on all nodes in secondary mode
2411 for inst_disk in instance.disks:
2412 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2413 lu.cfg.SetDiskID(node_disk, node)
2414 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2415 if result.failed or not result:
2416 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2417 " (is_primary=False, pass=1)",
2418 inst_disk.iv_name, node)
2419 if not ignore_secondaries:
2422 # FIXME: race condition on drbd migration to primary
2424 # 2nd pass, do only the primary node
2425 for inst_disk in instance.disks:
2426 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2427 if node != instance.primary_node:
2429 lu.cfg.SetDiskID(node_disk, node)
2430 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2431 if result.failed or not result:
2432 lu.proc.LogWarning("Could not prepare block device %s on node %s"
2433 " (is_primary=True, pass=2)",
2434 inst_disk.iv_name, node)
2436 device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2438 # leave the disks configured for the primary node
2439 # this is a workaround that would be fixed better by
2440 # improving the logical/physical id handling
2441 for disk in instance.disks:
2442 lu.cfg.SetDiskID(disk, instance.primary_node)
2444 return disks_ok, device_info
2447 def _StartInstanceDisks(lu, instance, force):
2448 """Start the disks of an instance.
2451 disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2452 ignore_secondaries=force)
2454 _ShutdownInstanceDisks(lu, instance)
2455 if force is not None and not force:
2456 lu.proc.LogWarning("", hint="If the message above refers to a"
2458 " you can retry the operation using '--force'.")
2459 raise errors.OpExecError("Disk consistency error")
2462 class LUDeactivateInstanceDisks(NoHooksLU):
2463 """Shutdown an instance's disks.
2466 _OP_REQP = ["instance_name"]
2469 def ExpandNames(self):
2470 self._ExpandAndLockInstance()
2471 self.needed_locks[locking.LEVEL_NODE] = []
2472 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2474 def DeclareLocks(self, level):
2475 if level == locking.LEVEL_NODE:
2476 self._LockInstancesNodes()
2478 def CheckPrereq(self):
2479 """Check prerequisites.
2481 This checks that the instance is in the cluster.
2484 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2485 assert self.instance is not None, \
2486 "Cannot retrieve locked instance %s" % self.op.instance_name
2488 def Exec(self, feedback_fn):
2489 """Deactivate the disks
2492 instance = self.instance
2493 _SafeShutdownInstanceDisks(self, instance)
2496 def _SafeShutdownInstanceDisks(lu, instance):
2497 """Shutdown block devices of an instance.
2499 This function checks if an instance is running, before calling
2500 _ShutdownInstanceDisks.
2503 ins_l = lu.rpc.call_instance_list([instance.primary_node],
2504 [instance.hypervisor])
2505 ins_l = ins_l[instance.primary_node]
2506 if ins_l.failed or not isinstance(ins_l.data, list):
2507 raise errors.OpExecError("Can't contact node '%s'" %
2508 instance.primary_node)
2510 if instance.name in ins_l.data:
2511 raise errors.OpExecError("Instance is running, can't shutdown"
2514 _ShutdownInstanceDisks(lu, instance)
2517 def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2518 """Shutdown block devices of an instance.
2520 This does the shutdown on all nodes of the instance.
2522 If the ignore_primary is false, errors on the primary node are
2527 for disk in instance.disks:
2528 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2529 lu.cfg.SetDiskID(top_disk, node)
2530 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2531 if result.failed or not result.data:
2532 logging.error("Could not shutdown block device %s on node %s",
2534 if not ignore_primary or node != instance.primary_node:
2539 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2540 """Checks if a node has enough free memory.
2542 This function check if a given node has the needed amount of free
2543 memory. In case the node has less memory or we cannot get the
2544 information from the node, this function raise an OpPrereqError
2547 @type lu: C{LogicalUnit}
2548 @param lu: a logical unit from which we get configuration data
2550 @param node: the node to check
2551 @type reason: C{str}
2552 @param reason: string to use in the error message
2553 @type requested: C{int}
2554 @param requested: the amount of memory in MiB to check for
2555 @type hypervisor_name: C{str}
2556 @param hypervisor_name: the hypervisor to ask for memory stats
2557 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2558 we cannot check the node
2561 nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2562 nodeinfo[node].Raise()
2563 free_mem = nodeinfo[node].data.get('memory_free')
2564 if not isinstance(free_mem, int):
2565 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2566 " was '%s'" % (node, free_mem))
2567 if requested > free_mem:
2568 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2569 " needed %s MiB, available %s MiB" %
2570 (node, reason, requested, free_mem))
2573 class LUStartupInstance(LogicalUnit):
2574 """Starts an instance.
2577 HPATH = "instance-start"
2578 HTYPE = constants.HTYPE_INSTANCE
2579 _OP_REQP = ["instance_name", "force"]
2582 def ExpandNames(self):
2583 self._ExpandAndLockInstance()
2585 def BuildHooksEnv(self):
2588 This runs on master, primary and secondary nodes of the instance.
2592 "FORCE": self.op.force,
2594 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2595 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2596 list(self.instance.secondary_nodes))
2599 def CheckPrereq(self):
2600 """Check prerequisites.
2602 This checks that the instance is in the cluster.
2605 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2606 assert self.instance is not None, \
2607 "Cannot retrieve locked instance %s" % self.op.instance_name
2609 _CheckNodeOnline(self, instance.primary_node)
2611 bep = self.cfg.GetClusterInfo().FillBE(instance)
2612 # check bridges existance
2613 _CheckInstanceBridgesExist(self, instance)
2615 _CheckNodeFreeMemory(self, instance.primary_node,
2616 "starting instance %s" % instance.name,
2617 bep[constants.BE_MEMORY], instance.hypervisor)
2619 def Exec(self, feedback_fn):
2620 """Start the instance.
2623 instance = self.instance
2624 force = self.op.force
2625 extra_args = getattr(self.op, "extra_args", "")
2627 self.cfg.MarkInstanceUp(instance.name)
2629 node_current = instance.primary_node
2631 _StartInstanceDisks(self, instance, force)
2633 result = self.rpc.call_instance_start(node_current, instance, extra_args)
2634 if result.failed or not result.data:
2635 _ShutdownInstanceDisks(self, instance)
2636 raise errors.OpExecError("Could not start instance")
2639 class LURebootInstance(LogicalUnit):
2640 """Reboot an instance.
2643 HPATH = "instance-reboot"
2644 HTYPE = constants.HTYPE_INSTANCE
2645 _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2648 def ExpandNames(self):
2649 if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2650 constants.INSTANCE_REBOOT_HARD,
2651 constants.INSTANCE_REBOOT_FULL]:
2652 raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2653 (constants.INSTANCE_REBOOT_SOFT,
2654 constants.INSTANCE_REBOOT_HARD,
2655 constants.INSTANCE_REBOOT_FULL))
2656 self._ExpandAndLockInstance()
2658 def BuildHooksEnv(self):
2661 This runs on master, primary and secondary nodes of the instance.
2665 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2667 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2668 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2669 list(self.instance.secondary_nodes))
2672 def CheckPrereq(self):
2673 """Check prerequisites.
2675 This checks that the instance is in the cluster.
2678 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2679 assert self.instance is not None, \
2680 "Cannot retrieve locked instance %s" % self.op.instance_name
2682 _CheckNodeOnline(self, instance.primary_node)
2684 # check bridges existance
2685 _CheckInstanceBridgesExist(self, instance)
2687 def Exec(self, feedback_fn):
2688 """Reboot the instance.
2691 instance = self.instance
2692 ignore_secondaries = self.op.ignore_secondaries
2693 reboot_type = self.op.reboot_type
2694 extra_args = getattr(self.op, "extra_args", "")
2696 node_current = instance.primary_node
2698 if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2699 constants.INSTANCE_REBOOT_HARD]:
2700 result = self.rpc.call_instance_reboot(node_current, instance,
2701 reboot_type, extra_args)
2702 if result.failed or not result.data:
2703 raise errors.OpExecError("Could not reboot instance")
2705 if not self.rpc.call_instance_shutdown(node_current, instance):
2706 raise errors.OpExecError("could not shutdown instance for full reboot")
2707 _ShutdownInstanceDisks(self, instance)
2708 _StartInstanceDisks(self, instance, ignore_secondaries)
2709 result = self.rpc.call_instance_start(node_current, instance, extra_args)
2710 if result.failed or not result.data:
2711 _ShutdownInstanceDisks(self, instance)
2712 raise errors.OpExecError("Could not start instance for full reboot")
2714 self.cfg.MarkInstanceUp(instance.name)
2717 class LUShutdownInstance(LogicalUnit):
2718 """Shutdown an instance.
2721 HPATH = "instance-stop"
2722 HTYPE = constants.HTYPE_INSTANCE
2723 _OP_REQP = ["instance_name"]
2726 def ExpandNames(self):
2727 self._ExpandAndLockInstance()
2729 def BuildHooksEnv(self):
2732 This runs on master, primary and secondary nodes of the instance.
2735 env = _BuildInstanceHookEnvByObject(self, self.instance)
2736 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2737 list(self.instance.secondary_nodes))
2740 def CheckPrereq(self):
2741 """Check prerequisites.
2743 This checks that the instance is in the cluster.
2746 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2747 assert self.instance is not None, \
2748 "Cannot retrieve locked instance %s" % self.op.instance_name
2749 _CheckNodeOnline(self, self.instance.primary_node)
2751 def Exec(self, feedback_fn):
2752 """Shutdown the instance.
2755 instance = self.instance
2756 node_current = instance.primary_node
2757 self.cfg.MarkInstanceDown(instance.name)
2758 result = self.rpc.call_instance_shutdown(node_current, instance)
2759 if result.failed or not result.data:
2760 self.proc.LogWarning("Could not shutdown instance")
2762 _ShutdownInstanceDisks(self, instance)
2765 class LUReinstallInstance(LogicalUnit):
2766 """Reinstall an instance.
2769 HPATH = "instance-reinstall"
2770 HTYPE = constants.HTYPE_INSTANCE
2771 _OP_REQP = ["instance_name"]
2774 def ExpandNames(self):
2775 self._ExpandAndLockInstance()
2777 def BuildHooksEnv(self):
2780 This runs on master, primary and secondary nodes of the instance.
2783 env = _BuildInstanceHookEnvByObject(self, self.instance)
2784 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2785 list(self.instance.secondary_nodes))
2788 def CheckPrereq(self):
2789 """Check prerequisites.
2791 This checks that the instance is in the cluster and is not running.
2794 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2795 assert instance is not None, \
2796 "Cannot retrieve locked instance %s" % self.op.instance_name
2797 _CheckNodeOnline(self, instance.primary_node)
2799 if instance.disk_template == constants.DT_DISKLESS:
2800 raise errors.OpPrereqError("Instance '%s' has no disks" %
2801 self.op.instance_name)
2802 if instance.status != "down":
2803 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2804 self.op.instance_name)
2805 remote_info = self.rpc.call_instance_info(instance.primary_node,
2807 instance.hypervisor)
2808 if remote_info.failed or remote_info.data:
2809 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2810 (self.op.instance_name,
2811 instance.primary_node))
2813 self.op.os_type = getattr(self.op, "os_type", None)
2814 if self.op.os_type is not None:
2816 pnode = self.cfg.GetNodeInfo(
2817 self.cfg.ExpandNodeName(instance.primary_node))
2819 raise errors.OpPrereqError("Primary node '%s' is unknown" %
2821 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2823 if not isinstance(result.data, objects.OS):
2824 raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2825 " primary node" % self.op.os_type)
2827 self.instance = instance
2829 def Exec(self, feedback_fn):
2830 """Reinstall the instance.
2833 inst = self.instance
2835 if self.op.os_type is not None:
2836 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2837 inst.os = self.op.os_type
2838 self.cfg.Update(inst)
2840 _StartInstanceDisks(self, inst, None)
2842 feedback_fn("Running the instance OS create scripts...")
2843 result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2846 raise errors.OpExecError("Could not install OS for instance %s"
2848 (inst.name, inst.primary_node))
2850 _ShutdownInstanceDisks(self, inst)
2853 class LURenameInstance(LogicalUnit):
2854 """Rename an instance.
2857 HPATH = "instance-rename"
2858 HTYPE = constants.HTYPE_INSTANCE
2859 _OP_REQP = ["instance_name", "new_name"]
2861 def BuildHooksEnv(self):
2864 This runs on master, primary and secondary nodes of the instance.
2867 env = _BuildInstanceHookEnvByObject(self, self.instance)
2868 env["INSTANCE_NEW_NAME"] = self.op.new_name
2869 nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2870 list(self.instance.secondary_nodes))
2873 def CheckPrereq(self):
2874 """Check prerequisites.
2876 This checks that the instance is in the cluster and is not running.
2879 instance = self.cfg.GetInstanceInfo(
2880 self.cfg.ExpandInstanceName(self.op.instance_name))
2881 if instance is None:
2882 raise errors.OpPrereqError("Instance '%s' not known" %
2883 self.op.instance_name)
2884 _CheckNodeOnline(self, instance.primary_node)
2886 if instance.status != "down":
2887 raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2888 self.op.instance_name)
2889 remote_info = self.rpc.call_instance_info(instance.primary_node,
2891 instance.hypervisor)
2893 if remote_info.data:
2894 raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2895 (self.op.instance_name,
2896 instance.primary_node))
2897 self.instance = instance
2899 # new name verification
2900 name_info = utils.HostInfo(self.op.new_name)
2902 self.op.new_name = new_name = name_info.name
2903 instance_list = self.cfg.GetInstanceList()
2904 if new_name in instance_list:
2905 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2908 if not getattr(self.op, "ignore_ip", False):
2909 if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2910 raise errors.OpPrereqError("IP %s of instance %s already in use" %
2911 (name_info.ip, new_name))
2914 def Exec(self, feedback_fn):
2915 """Reinstall the instance.
2918 inst = self.instance
2919 old_name = inst.name
2921 if inst.disk_template == constants.DT_FILE:
2922 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2924 self.cfg.RenameInstance(inst.name, self.op.new_name)
2925 # Change the instance lock. This is definitely safe while we hold the BGL
2926 self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2927 self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2929 # re-read the instance from the configuration after rename
2930 inst = self.cfg.GetInstanceInfo(self.op.new_name)
2932 if inst.disk_template == constants.DT_FILE:
2933 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2934 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2935 old_file_storage_dir,
2936 new_file_storage_dir)
2939 raise errors.OpExecError("Could not connect to node '%s' to rename"
2940 " directory '%s' to '%s' (but the instance"
2941 " has been renamed in Ganeti)" % (
2942 inst.primary_node, old_file_storage_dir,
2943 new_file_storage_dir))
2945 if not result.data[0]:
2946 raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2947 " (but the instance has been renamed in"
2948 " Ganeti)" % (old_file_storage_dir,
2949 new_file_storage_dir))
2951 _StartInstanceDisks(self, inst, None)
2953 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2955 if result.failed or not result.data:
2956 msg = ("Could not run OS rename script for instance %s on node %s"
2957 " (but the instance has been renamed in Ganeti)" %
2958 (inst.name, inst.primary_node))
2959 self.proc.LogWarning(msg)
2961 _ShutdownInstanceDisks(self, inst)
2964 class LURemoveInstance(LogicalUnit):
2965 """Remove an instance.
2968 HPATH = "instance-remove"
2969 HTYPE = constants.HTYPE_INSTANCE
2970 _OP_REQP = ["instance_name", "ignore_failures"]
2973 def ExpandNames(self):
2974 self._ExpandAndLockInstance()
2975 self.needed_locks[locking.LEVEL_NODE] = []
2976 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2978 def DeclareLocks(self, level):
2979 if level == locking.LEVEL_NODE:
2980 self._LockInstancesNodes()
2982 def BuildHooksEnv(self):
2985 This runs on master, primary and secondary nodes of the instance.
2988 env = _BuildInstanceHookEnvByObject(self, self.instance)
2989 nl = [self.cfg.GetMasterNode()]
2992 def CheckPrereq(self):
2993 """Check prerequisites.
2995 This checks that the instance is in the cluster.
2998 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2999 assert self.instance is not None, \
3000 "Cannot retrieve locked instance %s" % self.op.instance_name
3002 def Exec(self, feedback_fn):
3003 """Remove the instance.
3006 instance = self.instance
3007 logging.info("Shutting down instance %s on node %s",
3008 instance.name, instance.primary_node)
3010 result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3011 if result.failed or not result.data:
3012 if self.op.ignore_failures:
3013 feedback_fn("Warning: can't shutdown instance")
3015 raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3016 (instance.name, instance.primary_node))
3018 logging.info("Removing block devices for instance %s", instance.name)
3020 if not _RemoveDisks(self, instance):
3021 if self.op.ignore_failures:
3022 feedback_fn("Warning: can't remove instance's disks")
3024 raise errors.OpExecError("Can't remove instance's disks")
3026 logging.info("Removing instance %s out of cluster config", instance.name)
3028 self.cfg.RemoveInstance(instance.name)
3029 self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3032 class LUQueryInstances(NoHooksLU):
3033 """Logical unit for querying instances.
3036 _OP_REQP = ["output_fields", "names"]
3038 _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3039 "admin_state", "admin_ram",
3040 "disk_template", "ip", "mac", "bridge",
3041 "sda_size", "sdb_size", "vcpus", "tags",
3042 "network_port", "beparams",
3043 "(disk).(size)/([0-9]+)",
3045 "(nic).(mac|ip|bridge)/([0-9]+)",
3046 "(nic).(macs|ips|bridges)",
3047 "(disk|nic).(count)",
3048 "serial_no", "hypervisor", "hvparams",] +
3050 for name in constants.HVS_PARAMETERS] +
3052 for name in constants.BES_PARAMETERS])
3053 _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3056 def ExpandNames(self):
3057 _CheckOutputFields(static=self._FIELDS_STATIC,
3058 dynamic=self._FIELDS_DYNAMIC,
3059 selected=self.op.output_fields)
3061 self.needed_locks = {}
3062 self.share_locks[locking.LEVEL_INSTANCE] = 1
3063 self.share_locks[locking.LEVEL_NODE] = 1
3066 self.wanted = _GetWantedInstances(self, self.op.names)
3068 self.wanted = locking.ALL_SET
3070 self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3072 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3073 self.needed_locks[locking.LEVEL_NODE] = []
3074 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3076 def DeclareLocks(self, level):
3077 if level == locking.LEVEL_NODE and self.do_locking:
3078 self._LockInstancesNodes()
3080 def CheckPrereq(self):
3081 """Check prerequisites.
3086 def Exec(self, feedback_fn):
3087 """Computes the list of nodes and their attributes.
3090 all_info = self.cfg.GetAllInstancesInfo()
3092 instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3093 elif self.wanted != locking.ALL_SET:
3094 instance_names = self.wanted
3095 missing = set(instance_names).difference(all_info.keys())
3097 raise errors.OpExecError(
3098 "Some instances were removed before retrieving their data: %s"
3101 instance_names = all_info.keys()
3103 instance_names = utils.NiceSort(instance_names)
3104 instance_list = [all_info[iname] for iname in instance_names]
3106 # begin data gathering
3108 nodes = frozenset([inst.primary_node for inst in instance_list])
3109 hv_list = list(set([inst.hypervisor for inst in instance_list]))
3115 node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3117 result = node_data[name]
3119 # offline nodes will be in both lists
3120 off_nodes.append(name)
3122 bad_nodes.append(name)
3125 live_data.update(result.data)
3126 # else no instance is alive
3128 live_data = dict([(name, {}) for name in instance_names])
3130 # end data gathering
3135 for instance in instance_list:
3137 i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3138 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3139 for field in self.op.output_fields:
3140 st_match = self._FIELDS_STATIC.Matches(field)
3145 elif field == "pnode":
3146 val = instance.primary_node
3147 elif field == "snodes":
3148 val = list(instance.secondary_nodes)
3149 elif field == "admin_state":
3150 val = (instance.status != "down")
3151 elif field == "oper_state":
3152 if instance.primary_node in bad_nodes:
3155 val = bool(live_data.get(instance.name))
3156 elif field == "status":
3157 if instance.primary_node in off_nodes:
3158 val = "ERROR_nodeoffline"
3159 elif instance.primary_node in bad_nodes:
3160 val = "ERROR_nodedown"
3162 running = bool(live_data.get(instance.name))
3164 if instance.status != "down":
3169 if instance.status != "down":
3173 elif field == "oper_ram":
3174 if instance.primary_node in bad_nodes:
3176 elif instance.name in live_data:
3177 val = live_data[instance.name].get("memory", "?")
3180 elif field == "disk_template":
3181 val = instance.disk_template
3183 val = instance.nics[0].ip
3184 elif field == "bridge":
3185 val = instance.nics[0].bridge
3186 elif field == "mac":
3187 val = instance.nics[0].mac
3188 elif field == "sda_size" or field == "sdb_size":
3189 idx = ord(field[2]) - ord('a')
3191 val = instance.FindDisk(idx).size
3192 except errors.OpPrereqError:
3194 elif field == "tags":
3195 val = list(instance.GetTags())
3196 elif field == "serial_no":
3197 val = instance.serial_no
3198 elif field == "network_port":
3199 val = instance.network_port
3200 elif field == "hypervisor":
3201 val = instance.hypervisor
3202 elif field == "hvparams":
3204 elif (field.startswith(HVPREFIX) and
3205 field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3206 val = i_hv.get(field[len(HVPREFIX):], None)
3207 elif field == "beparams":
3209 elif (field.startswith(BEPREFIX) and
3210 field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3211 val = i_be.get(field[len(BEPREFIX):], None)
3212 elif st_match and st_match.groups():
3213 # matches a variable list
3214 st_groups = st_match.groups()
3215 if st_groups and st_groups[0] == "disk":
3216 if st_groups[1] == "count":
3217 val = len(instance.disks)
3218 elif st_groups[1] == "sizes":
3219 val = [disk.size for disk in instance.disks]
3220 elif st_groups[1] == "size":
3222 val = instance.FindDisk(st_groups[2]).size
3223 except errors.OpPrereqError:
3226 assert False, "Unhandled disk parameter"
3227 elif st_groups[0] == "nic":
3228 if st_groups[1] == "count":
3229 val = len(instance.nics)
3230 elif st_groups[1] == "macs":
3231 val = [nic.mac for nic in instance.nics]
3232 elif st_groups[1] == "ips":
3233 val = [nic.ip for nic in instance.nics]
3234 elif st_groups[1] == "bridges":
3235 val = [nic.bridge for nic in instance.nics]
3238 nic_idx = int(st_groups[2])
3239 if nic_idx >= len(instance.nics):
3242 if st_groups[1] == "mac":
3243 val = instance.nics[nic_idx].mac
3244 elif st_groups[1] == "ip":
3245 val = instance.nics[nic_idx].ip
3246 elif st_groups[1] == "bridge":
3247 val = instance.nics[nic_idx].bridge
3249 assert False, "Unhandled NIC parameter"
3251 assert False, "Unhandled variable parameter"
3253 raise errors.ParameterError(field)
3260 class LUFailoverInstance(LogicalUnit):
3261 """Failover an instance.
3264 HPATH = "instance-failover"
3265 HTYPE = constants.HTYPE_INSTANCE
3266 _OP_REQP = ["instance_name", "ignore_consistency"]
3269 def ExpandNames(self):
3270 self._ExpandAndLockInstance()
3271 self.needed_locks[locking.LEVEL_NODE] = []
3272 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3274 def DeclareLocks(self, level):
3275 if level == locking.LEVEL_NODE:
3276 self._LockInstancesNodes()
3278 def BuildHooksEnv(self):
3281 This runs on master, primary and secondary nodes of the instance.
3285 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3287 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3288 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3291 def CheckPrereq(self):
3292 """Check prerequisites.
3294 This checks that the instance is in the cluster.
3297 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3298 assert self.instance is not None, \
3299 "Cannot retrieve locked instance %s" % self.op.instance_name
3301 bep = self.cfg.GetClusterInfo().FillBE(instance)
3302 if instance.disk_template not in constants.DTS_NET_MIRROR:
3303 raise errors.OpPrereqError("Instance's disk layout is not"
3304 " network mirrored, cannot failover.")
3306 secondary_nodes = instance.secondary_nodes
3307 if not secondary_nodes:
3308 raise errors.ProgrammerError("no secondary node but using "
3309 "a mirrored disk template")
3311 target_node = secondary_nodes[0]
3312 _CheckNodeOnline(self, target_node)
3313 # check memory requirements on the secondary node
3314 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3315 instance.name, bep[constants.BE_MEMORY],
3316 instance.hypervisor)
3318 # check bridge existance
3319 brlist = [nic.bridge for nic in instance.nics]
3320 result = self.rpc.call_bridges_exist(target_node, brlist)
3323 raise errors.OpPrereqError("One or more target bridges %s does not"
3324 " exist on destination node '%s'" %
3325 (brlist, target_node))
3327 def Exec(self, feedback_fn):
3328 """Failover an instance.
3330 The failover is done by shutting it down on its present node and
3331 starting it on the secondary.
3334 instance = self.instance
3336 source_node = instance.primary_node
3337 target_node = instance.secondary_nodes[0]
3339 feedback_fn("* checking disk consistency between source and target")
3340 for dev in instance.disks:
3341 # for drbd, these are drbd over lvm
3342 if not _CheckDiskConsistency(self, dev, target_node, False):
3343 if instance.status == "up" and not self.op.ignore_consistency:
3344 raise errors.OpExecError("Disk %s is degraded on target node,"
3345 " aborting failover." % dev.iv_name)
3347 feedback_fn("* shutting down instance on source node")
3348 logging.info("Shutting down instance %s on node %s",
3349 instance.name, source_node)
3351 result = self.rpc.call_instance_shutdown(source_node, instance)
3352 if result.failed or not result.data:
3353 if self.op.ignore_consistency:
3354 self.proc.LogWarning("Could not shutdown instance %s on node %s."
3356 " anyway. Please make sure node %s is down",
3357 instance.name, source_node, source_node)
3359 raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3360 (instance.name, source_node))
3362 feedback_fn("* deactivating the instance's disks on source node")
3363 if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3364 raise errors.OpExecError("Can't shut down the instance's disks.")
3366 instance.primary_node = target_node
3367 # distribute new instance config to the other nodes
3368 self.cfg.Update(instance)
3370 # Only start the instance if it's marked as up
3371 if instance.status == "up":
3372 feedback_fn("* activating the instance's disks on target node")
3373 logging.info("Starting instance %s on node %s",
3374 instance.name, target_node)
3376 disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3377 ignore_secondaries=True)
3379 _ShutdownInstanceDisks(self, instance)
3380 raise errors.OpExecError("Can't activate the instance's disks")
3382 feedback_fn("* starting the instance on the target node")
3383 result = self.rpc.call_instance_start(target_node, instance, None)
3384 if result.failed or not result.data:
3385 _ShutdownInstanceDisks(self, instance)
3386 raise errors.OpExecError("Could not start instance %s on node %s." %
3387 (instance.name, target_node))
3390 class LUMigrateInstance(LogicalUnit):
3391 """Migrate an instance.
3393 This is migration without shutting down, compared to the failover,
3394 which is done with shutdown.
3397 HPATH = "instance-migrate"
3398 HTYPE = constants.HTYPE_INSTANCE
3399 _OP_REQP = ["instance_name", "live", "cleanup"]
3403 def ExpandNames(self):
3404 self._ExpandAndLockInstance()
3405 self.needed_locks[locking.LEVEL_NODE] = []
3406 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3408 def DeclareLocks(self, level):
3409 if level == locking.LEVEL_NODE:
3410 self._LockInstancesNodes()
3412 def BuildHooksEnv(self):
3415 This runs on master, primary and secondary nodes of the instance.
3418 env = _BuildInstanceHookEnvByObject(self, self.instance)
3419 nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3422 def CheckPrereq(self):
3423 """Check prerequisites.
3425 This checks that the instance is in the cluster.
3428 instance = self.cfg.GetInstanceInfo(
3429 self.cfg.ExpandInstanceName(self.op.instance_name))
3430 if instance is None:
3431 raise errors.OpPrereqError("Instance '%s' not known" %
3432 self.op.instance_name)
3434 if instance.disk_template != constants.DT_DRBD8:
3435 raise errors.OpPrereqError("Instance's disk layout is not"
3436 " drbd8, cannot migrate.")
3438 secondary_nodes = instance.secondary_nodes
3439 if not secondary_nodes:
3440 raise errors.ProgrammerError("no secondary node but using "
3441 "drbd8 disk template")
3443 i_be = self.cfg.GetClusterInfo().FillBE(instance)
3445 target_node = secondary_nodes[0]
3446 # check memory requirements on the secondary node
3447 _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3448 instance.name, i_be[constants.BE_MEMORY],
3449 instance.hypervisor)
3451 # check bridge existance
3452 brlist = [nic.bridge for nic in instance.nics]
3453 result = self.rpc.call_bridges_exist(target_node, brlist)
3454 if result.failed or not result.data:
3455 raise errors.OpPrereqError("One or more target bridges %s does not"
3456 " exist on destination node '%s'" %
3457 (brlist, target_node))
3459 if not self.op.cleanup:
3460 result = self.rpc.call_instance_migratable(instance.primary_node,
3462 msg = result.RemoteFailMsg()
3464 raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3467 self.instance = instance
3469 def _WaitUntilSync(self):
3470 """Poll with custom rpc for disk sync.
3472 This uses our own step-based rpc call.
3475 self.feedback_fn("* wait until resync is done")
3479 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3481 self.instance.disks)
3483 for node, nres in result.items():
3484 msg = nres.RemoteFailMsg()
3486 raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3488 node_done, node_percent = nres.data[1]
3489 all_done = all_done and node_done
3490 if node_percent is not None:
3491 min_percent = min(min_percent, node_percent)
3493 if min_percent < 100:
3494 self.feedback_fn(" - progress: %.1f%%" % min_percent)
3497 def _EnsureSecondary(self, node):
3498 """Demote a node to secondary.
3501 self.feedback_fn("* switching node %s to secondary mode" % node)
3503 for dev in self.instance.disks:
3504 self.cfg.SetDiskID(dev, node)
3506 result = self.rpc.call_blockdev_close(node, self.instance.name,
3507 self.instance.disks)
3508 msg = result.RemoteFailMsg()
3510 raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3511 " error %s" % (node, msg))
3513 def _GoStandalone(self):
3514 """Disconnect from the network.
3517 self.feedback_fn("* changing into standalone mode")
3518 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3519 self.instance.disks)
3520 for node, nres in result.items():
3521 msg = nres.RemoteFailMsg()
3523 raise errors.OpExecError("Cannot disconnect disks node %s,"
3524 " error %s" % (node, msg))
3526 def _GoReconnect(self, multimaster):
3527 """Reconnect to the network.
3533 msg = "single-master"
3534 self.feedback_fn("* changing disks into %s mode" % msg)
3535 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3536 self.instance.disks,
3537 self.instance.name, multimaster)
3538 for node, nres in result.items():
3539 msg = nres.RemoteFailMsg()
3541 raise errors.OpExecError("Cannot change disks config on node %s,"
3542 " error: %s" % (node, msg))
3544 def _ExecCleanup(self):
3545 """Try to cleanup after a failed migration.
3547 The cleanup is done by:
3548 - check that the instance is running only on one node
3549 (and update the config if needed)
3550 - change disks on its secondary node to secondary
3551 - wait until disks are fully synchronized
3552 - disconnect from the network
3553 - change disks into single-master mode
3554 - wait again until disks are fully synchronized
3557 instance = self.instance
3558 target_node = self.target_node
3559 source_node = self.source_node
3561 # check running on only one node
3562 self.feedback_fn("* checking where the instance actually runs"
3563 " (if this hangs, the hypervisor might be in"
3565 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3566 for node, result in ins_l.items():
3568 if not isinstance(result.data, list):
3569 raise errors.OpExecError("Can't contact node '%s'" % node)
3571 runningon_source = instance.name in ins_l[source_node].data
3572 runningon_target = instance.name in ins_l[target_node].data
3574 if runningon_source and runningon_target:
3575 raise errors.OpExecError("Instance seems to be running on two nodes,"
3576 " or the hypervisor is confused. You will have"
3577 " to ensure manually that it runs only on one"
3578 " and restart this operation.")
3580 if not (runningon_source or runningon_target):
3581 raise errors.OpExecError("Instance does not seem to be running at all."
3582 " In this case, it's safer to repair by"
3583 " running 'gnt-instance stop' to ensure disk"
3584 " shutdown, and then restarting it.")
3586 if runningon_target:
3587 # the migration has actually succeeded, we need to update the config
3588 self.feedback_fn("* instance running on secondary node (%s),"
3589 " updating config" % target_node)
3590 instance.primary_node = target_node
3591 self.cfg.Update(instance)
3592 demoted_node = source_node
3594 self.feedback_fn("* instance confirmed to be running on its"
3595 " primary node (%s)" % source_node)
3596 demoted_node = target_node
3598 self._EnsureSecondary(demoted_node)
3600 self._WaitUntilSync()
3601 except errors.OpExecError:
3602 # we ignore here errors, since if the device is standalone, it
3603 # won't be able to sync
3605 self._GoStandalone()
3606 self._GoReconnect(False)
3607 self._WaitUntilSync()
3609 self.feedback_fn("* done")
3611 def _ExecMigration(self):
3612 """Migrate an instance.
3614 The migrate is done by:
3615 - change the disks into dual-master mode
3616 - wait until disks are fully synchronized again
3617 - migrate the instance
3618 - change disks on the new secondary node (the old primary) to secondary
3619 - wait until disks are fully synchronized
3620 - change disks into single-master mode
3623 instance = self.instance
3624 target_node = self.target_node
3625 source_node = self.source_node
3627 self.feedback_fn("* checking disk consistency between source and target")
3628 for dev in instance.disks:
3629 if not _CheckDiskConsistency(self, dev, target_node, False):
3630 raise errors.OpExecError("Disk %s is degraded or not fully"
3631 " synchronized on target node,"
3632 " aborting migrate." % dev.iv_name)
3634 self._EnsureSecondary(target_node)
3635 self._GoStandalone()
3636 self._GoReconnect(True)
3637 self._WaitUntilSync()
3639 self.feedback_fn("* migrating instance to %s" % target_node)
3641 result = self.rpc.call_instance_migrate(source_node, instance,
3642 self.nodes_ip[target_node],
3644 msg = result.RemoteFailMsg()
3646 logging.error("Instance migration failed, trying to revert"
3647 " disk status: %s", msg)
3649 self._EnsureSecondary(target_node)
3650 self._GoStandalone()
3651 self._GoReconnect(False)
3652 self._WaitUntilSync()
3653 except errors.OpExecError, err:
3654 self.LogWarning("Migration failed and I can't reconnect the"
3655 " drives: error '%s'\n"
3656 "Please look and recover the instance status" %
3659 raise errors.OpExecError("Could not migrate instance %s: %s" %
3660 (instance.name, msg))
3663 instance.primary_node = target_node
3664 # distribute new instance config to the other nodes
3665 self.cfg.Update(instance)
3667 self._EnsureSecondary(source_node)
3668 self._WaitUntilSync()
3669 self._GoStandalone()
3670 self._GoReconnect(False)
3671 self._WaitUntilSync()
3673 self.feedback_fn("* done")
3675 def Exec(self, feedback_fn):
3676 """Perform the migration.
3679 self.feedback_fn = feedback_fn
3681 self.source_node = self.instance.primary_node
3682 self.target_node = self.instance.secondary_nodes[0]
3683 self.all_nodes = [self.source_node, self.target_node]
3685 self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3686 self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3689 return self._ExecCleanup()
3691 return self._ExecMigration()
3694 def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3695 """Create a tree of block devices on the primary node.
3697 This always creates all devices.
3701 for child in device.children:
3702 _CreateBlockDevOnPrimary(lu, node, instance, child, info)
3704 lu.cfg.SetDiskID(device, node)
3705 new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3706 instance.name, True, info)
3707 if new_id.failed or not new_id.data:
3708 raise errors.OpExecError("Can't create block device %s on primary"
3709 " node %s" % (device, node))
3710 if device.physical_id is None:
3711 device.physical_id = new_id
3714 def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3715 """Create a tree of block devices on a secondary node.
3717 If this device type has to be created on secondaries, create it and
3720 If not, just recurse to children keeping the same 'force' value.
3723 if device.CreateOnSecondary():
3727 for child in device.children:
3728 _CreateBlockDevOnSecondary(lu, node, instance, child, force, info)
3733 lu.cfg.SetDiskID(device, node)
3734 new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3735 instance.name, False, info)
3736 if new_id.failed or not new_id.data:
3737 raise errors.OpExecError("Can't create block device %s on secondary"
3738 " node %s" % (device, node))
3739 if device.physical_id is None:
3740 device.physical_id = new_id
3743 def _GenerateUniqueNames(lu, exts):
3744 """Generate a suitable LV name.
3746 This will generate a logical volume name for the given instance.
3751 new_id = lu.cfg.GenerateUniqueID()
3752 results.append("%s%s" % (new_id, val))
3756 def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3758 """Generate a drbd8 device complete with its children.
3761 port = lu.cfg.AllocatePort()
3762 vgname = lu.cfg.GetVGName()
3763 shared_secret = lu.cfg.GenerateDRBDSecret()
3764 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3765 logical_id=(vgname, names[0]))
3766 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3767 logical_id=(vgname, names[1]))
3768 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3769 logical_id=(primary, secondary, port,
3772 children=[dev_data, dev_meta],
3777 def _GenerateDiskTemplate(lu, template_name,
3778 instance_name, primary_node,
3779 secondary_nodes, disk_info,
3780 file_storage_dir, file_driver,
3782 """Generate the entire disk layout for a given template type.
3785 #TODO: compute space requirements
3787 vgname = lu.cfg.GetVGName()
3788 disk_count = len(disk_info)
3790 if template_name == constants.DT_DISKLESS:
3792 elif template_name == constants.DT_PLAIN:
3793 if len(secondary_nodes) != 0:
3794 raise errors.ProgrammerError("Wrong template configuration")
3796 names = _GenerateUniqueNames(lu, [".disk%d" % i
3797 for i in range(disk_count)])
3798 for idx, disk in enumerate(disk_info):
3799 disk_index = idx + base_index
3800 disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3801 logical_id=(vgname, names[idx]),
3802 iv_name="disk/%d" % disk_index)
3803 disks.append(disk_dev)
3804 elif template_name == constants.DT_DRBD8:
3805 if len(secondary_nodes) != 1:
3806 raise errors.ProgrammerError("Wrong template configuration")
3807 remote_node = secondary_nodes[0]
3808 minors = lu.cfg.AllocateDRBDMinor(
3809 [primary_node, remote_node] * len(disk_info), instance_name)
3812 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3813 for i in range(disk_count)]):
3814 names.append(lv_prefix + "_data")
3815 names.append(lv_prefix + "_meta")
3816 for idx, disk in enumerate(disk_info):
3817 disk_index = idx + base_index
3818 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3819 disk["size"], names[idx*2:idx*2+2],
3820 "disk/%d" % disk_index,
3821 minors[idx*2], minors[idx*2+1])
3822 disks.append(disk_dev)
3823 elif template_name == constants.DT_FILE:
3824 if len(secondary_nodes) != 0:
3825 raise errors.ProgrammerError("Wrong template configuration")
3827 for idx, disk in enumerate(disk_info):
3828 disk_index = idx + base_index
3829 disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3830 iv_name="disk/%d" % disk_index,
3831 logical_id=(file_driver,
3832 "%s/disk%d" % (file_storage_dir,
3834 disks.append(disk_dev)
3836 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3840 def _GetInstanceInfoText(instance):
3841 """Compute that text that should be added to the disk's metadata.
3844 return "originstname+%s" % instance.name
3847 def _CreateDisks(lu, instance):
3848 """Create all disks for an instance.
3850 This abstracts away some work from AddInstance.
3852 @type lu: L{LogicalUnit}
3853 @param lu: the logical unit on whose behalf we execute
3854 @type instance: L{objects.Instance}
3855 @param instance: the instance whose disks we should create
3857 @return: the success of the creation
3860 info = _GetInstanceInfoText(instance)
3862 if instance.disk_template == constants.DT_FILE:
3863 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3864 result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3867 if result.failed or not result.data:
3868 raise errors.OpExecError("Could not connect to node '%s'" %
3869 instance.primary_node)
3871 if not result.data[0]:
3872 raise errors.OpExecError("Failed to create directory '%s'" %
3875 # Note: this needs to be kept in sync with adding of disks in
3876 # LUSetInstanceParams
3877 for device in instance.disks:
3878 logging.info("Creating volume %s for instance %s",
3879 device.iv_name, instance.name)
3881 for secondary_node in instance.secondary_nodes:
3882 _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3883 device, False, info)
3885 _CreateBlockDevOnPrimary(lu, instance.primary_node,
3886 instance, device, info)
3889 def _RemoveDisks(lu, instance):
3890 """Remove all disks for an instance.
3892 This abstracts away some work from `AddInstance()` and
3893 `RemoveInstance()`. Note that in case some of the devices couldn't
3894 be removed, the removal will continue with the other ones (compare
3895 with `_CreateDisks()`).
3897 @type lu: L{LogicalUnit}
3898 @param lu: the logical unit on whose behalf we execute
3899 @type instance: L{objects.Instance}
3900 @param instance: the instance whose disks we should remove
3902 @return: the success of the removal
3905 logging.info("Removing block devices for instance %s", instance.name)
3908 for device in instance.disks:
3909 for node, disk in device.ComputeNodeTree(instance.primary_node):
3910 lu.cfg.SetDiskID(disk, node)
3911 result = lu.rpc.call_blockdev_remove(node, disk)
3912 if result.failed or not result.data:
3913 lu.proc.LogWarning("Could not remove block device %s on node %s,"
3914 " continuing anyway", device.iv_name, node)
3917 if instance.disk_template == constants.DT_FILE:
3918 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3919 result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3921 if result.failed or not result.data:
3922 logging.error("Could not remove directory '%s'", file_storage_dir)
3928 def _ComputeDiskSize(disk_template, disks):
3929 """Compute disk size requirements in the volume group
3932 # Required free disk space as a function of disk and swap space
3934 constants.DT_DISKLESS: None,
3935 constants.DT_PLAIN: sum(d["size"] for d in disks),
3936 # 128 MB are added for drbd metadata for each disk
3937 constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3938 constants.DT_FILE: None,
3941 if disk_template not in req_size_dict:
3942 raise errors.ProgrammerError("Disk template '%s' size requirement"
3943 " is unknown" % disk_template)
3945 return req_size_dict[disk_template]
3948 def _CheckHVParams(lu, nodenames, hvname, hvparams):
3949 """Hypervisor parameter validation.
3951 This function abstract the hypervisor parameter validation to be
3952 used in both instance create and instance modify.
3954 @type lu: L{LogicalUnit}
3955 @param lu: the logical unit for which we check
3956 @type nodenames: list
3957 @param nodenames: the list of nodes on which we should check
3958 @type hvname: string
3959 @param hvname: the name of the hypervisor we should use
3960 @type hvparams: dict
3961 @param hvparams: the parameters which we need to check
3962 @raise errors.OpPrereqError: if the parameters are not valid
3965 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3968 for node in nodenames:
3971 if not info.data or not isinstance(info.data, (tuple, list)):
3972 raise errors.OpPrereqError("Cannot get current information"
3973 " from node '%s' (%s)" % (node, info.data))
3974 if not info.data[0]:
3975 raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3976 " %s" % info.data[1])
3979 class LUCreateInstance(LogicalUnit):
3980 """Create an instance.
3983 HPATH = "instance-add"
3984 HTYPE = constants.HTYPE_INSTANCE
3985 _OP_REQP = ["instance_name", "disks", "disk_template",
3987 "wait_for_sync", "ip_check", "nics",
3988 "hvparams", "beparams"]
3991 def _ExpandNode(self, node):
3992 """Expands and checks one node name.
3995 node_full = self.cfg.ExpandNodeName(node)
3996 if node_full is None:
3997 raise errors.OpPrereqError("Unknown node %s" % node)
4000 def ExpandNames(self):
4001 """ExpandNames for CreateInstance.
4003 Figure out the right locks for instance creation.
4006 self.needed_locks = {}
4008 # set optional parameters to none if they don't exist
4009 for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4010 if not hasattr(self.op, attr):
4011 setattr(self.op, attr, None)
4013 # cheap checks, mostly valid constants given
4015 # verify creation mode
4016 if self.op.mode not in (constants.INSTANCE_CREATE,
4017 constants.INSTANCE_IMPORT):
4018 raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4021 # disk template and mirror node verification
4022 if self.op.disk_template not in constants.DISK_TEMPLATES:
4023 raise errors.OpPrereqError("Invalid disk template name")
4025 if self.op.hypervisor is None:
4026 self.op.hypervisor = self.cfg.GetHypervisorType()
4028 cluster = self.cfg.GetClusterInfo()
4029 enabled_hvs = cluster.enabled_hypervisors
4030 if self.op.hypervisor not in enabled_hvs:
4031 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4032 " cluster (%s)" % (self.op.hypervisor,
4033 ",".join(enabled_hvs)))
4035 # check hypervisor parameter syntax (locally)
4037 filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4039 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4040 hv_type.CheckParameterSyntax(filled_hvp)
4042 # fill and remember the beparams dict
4043 utils.CheckBEParams(self.op.beparams)
4044 self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4047 #### instance parameters check
4049 # instance name verification
4050 hostname1 = utils.HostInfo(self.op.instance_name)
4051 self.op.instance_name = instance_name = hostname1.name
4053 # this is just a preventive check, but someone might still add this
4054 # instance in the meantime, and creation will fail at lock-add time
4055 if instance_name in self.cfg.GetInstanceList():
4056 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4059 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4063 for nic in self.op.nics:
4064 # ip validity checks
4065 ip = nic.get("ip", None)
4066 if ip is None or ip.lower() == "none":
4068 elif ip.lower() == constants.VALUE_AUTO:
4069 nic_ip = hostname1.ip
4071 if not utils.IsValidIP(ip):
4072 raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4073 " like a valid IP" % ip)
4076 # MAC address verification
4077 mac = nic.get("mac", constants.VALUE_AUTO)
4078 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4079 if not utils.IsValidMac(mac.lower()):
4080 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4082 # bridge verification
4083 bridge = nic.get("bridge", self.cfg.GetDefBridge())
4084 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4086 # disk checks/pre-build
4088 for disk in self.op.disks:
4089 mode = disk.get("mode", constants.DISK_RDWR)
4090 if mode not in constants.DISK_ACCESS_SET:
4091 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4093 size = disk.get("size", None)
4095 raise errors.OpPrereqError("Missing disk size")
4099 raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4100 self.disks.append({"size": size, "mode": mode})
4102 # used in CheckPrereq for ip ping check
4103 self.check_ip = hostname1.ip
4105 # file storage checks
4106 if (self.op.file_driver and
4107 not self.op.file_driver in constants.FILE_DRIVER):
4108 raise errors.OpPrereqError("Invalid file driver name '%s'" %
4109 self.op.file_driver)
4111 if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4112 raise errors.OpPrereqError("File storage directory path not absolute")
4114 ### Node/iallocator related checks
4115 if [self.op.iallocator, self.op.pnode].count(None) != 1:
4116 raise errors.OpPrereqError("One and only one of iallocator and primary"
4117 " node must be given")
4119 if self.op.iallocator:
4120 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4122 self.op.pnode = self._ExpandNode(self.op.pnode)
4123 nodelist = [self.op.pnode]
4124 if self.op.snode is not None:
4125 self.op.snode = self._ExpandNode(self.op.snode)
4126 nodelist.append(self.op.snode)
4127 self.needed_locks[locking.LEVEL_NODE] = nodelist
4129 # in case of import lock the source node too
4130 if self.op.mode == constants.INSTANCE_IMPORT:
4131 src_node = getattr(self.op, "src_node", None)
4132 src_path = getattr(self.op, "src_path", None)
4134 if src_path is None:
4135 self.op.src_path = src_path = self.op.instance_name
4137 if src_node is None:
4138 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4139 self.op.src_node = None
4140 if os.path.isabs(src_path):
4141 raise errors.OpPrereqError("Importing an instance from an absolute"
4142 " path requires a source node option.")
4144 self.op.src_node = src_node = self._ExpandNode(src_node)
4145 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4146 self.needed_locks[locking.LEVEL_NODE].append(src_node)
4147 if not os.path.isabs(src_path):
4148 self.op.src_path = src_path = \
4149 os.path.join(constants.EXPORT_DIR, src_path)
4151 else: # INSTANCE_CREATE
4152 if getattr(self.op, "os_type", None) is None:
4153 raise errors.OpPrereqError("No guest OS specified")
4155 def _RunAllocator(self):
4156 """Run the allocator based on input opcode.
4159 nics = [n.ToDict() for n in self.nics]
4160 ial = IAllocator(self,
4161 mode=constants.IALLOCATOR_MODE_ALLOC,
4162 name=self.op.instance_name,
4163 disk_template=self.op.disk_template,
4166 vcpus=self.be_full[constants.BE_VCPUS],
4167 mem_size=self.be_full[constants.BE_MEMORY],
4170 hypervisor=self.op.hypervisor,
4173 ial.Run(self.op.iallocator)
4176 raise errors.OpPrereqError("Can't compute nodes using"
4177 " iallocator '%s': %s" % (self.op.iallocator,
4179 if len(ial.nodes) != ial.required_nodes:
4180 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4181 " of nodes (%s), required %s" %
4182 (self.op.iallocator, len(ial.nodes),
4183 ial.required_nodes))
4184 self.op.pnode = ial.nodes[0]
4185 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4186 self.op.instance_name, self.op.iallocator,
4187 ", ".join(ial.nodes))
4188 if ial.required_nodes == 2:
4189 self.op.snode = ial.nodes[1]
4191 def BuildHooksEnv(self):
4194 This runs on master, primary and secondary nodes of the instance.
4198 "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
4199 "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
4200 "INSTANCE_ADD_MODE": self.op.mode,
4202 if self.op.mode == constants.INSTANCE_IMPORT:
4203 env["INSTANCE_SRC_NODE"] = self.op.src_node
4204 env["INSTANCE_SRC_PATH"] = self.op.src_path
4205 env["INSTANCE_SRC_IMAGES"] = self.src_images
4207 env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
4208 primary_node=self.op.pnode,
4209 secondary_nodes=self.secondaries,
4210 status=self.instance_status,
4211 os_type=self.op.os_type,
4212 memory=self.be_full[constants.BE_MEMORY],
4213 vcpus=self.be_full[constants.BE_VCPUS],
4214 nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4217 nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4222 def CheckPrereq(self):
4223 """Check prerequisites.
4226 if (not self.cfg.GetVGName() and
4227 self.op.disk_template not in constants.DTS_NOT_LVM):
4228 raise errors.OpPrereqError("Cluster does not support lvm-based"
4232 if self.op.mode == constants.INSTANCE_IMPORT:
4233 src_node = self.op.src_node
4234 src_path = self.op.src_path
4236 if src_node is None:
4237 exp_list = self.rpc.call_export_list(
4238 self.acquired_locks[locking.LEVEL_NODE])
4240 for node in exp_list:
4241 if not exp_list[node].failed and src_path in exp_list[node].data:
4243 self.op.src_node = src_node = node
4244 self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4248 raise errors.OpPrereqError("No export found for relative path %s" %
4251 _CheckNodeOnline(self, src_node)
4252 result = self.rpc.call_export_info(src_node, src_path)
4255 raise errors.OpPrereqError("No export found in dir %s" % src_path)
4257 export_info = result.data
4258 if not export_info.has_section(constants.INISECT_EXP):
4259 raise errors.ProgrammerError("Corrupted export config")
4261 ei_version = export_info.get(constants.INISECT_EXP, 'version')
4262 if (int(ei_version) != constants.EXPORT_VERSION):
4263 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4264 (ei_version, constants.EXPORT_VERSION))
4266 # Check that the new instance doesn't have less disks than the export
4267 instance_disks = len(self.disks)
4268 export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4269 if instance_disks < export_disks:
4270 raise errors.OpPrereqError("Not enough disks to import."
4271 " (instance: %d, export: %d)" %
4272 (instance_disks, export_disks))
4274 self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4276 for idx in range(export_disks):
4277 option = 'disk%d_dump' % idx
4278 if export_info.has_option(constants.INISECT_INS, option):
4279 # FIXME: are the old os-es, disk sizes, etc. useful?
4280 export_name = export_info.get(constants.INISECT_INS, option)
4281 image = os.path.join(src_path, export_name)
4282 disk_images.append(image)
4284 disk_images.append(False)
4286 self.src_images = disk_images
4288 old_name = export_info.get(constants.INISECT_INS, 'name')
4289 # FIXME: int() here could throw a ValueError on broken exports
4290 exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4291 if self.op.instance_name == old_name:
4292 for idx, nic in enumerate(self.nics):
4293 if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4294 nic_mac_ini = 'nic%d_mac' % idx
4295 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4297 # ip ping checks (we use the same ip that was resolved in ExpandNames)
4298 if self.op.start and not self.op.ip_check:
4299 raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4300 " adding an instance in start mode")
4302 if self.op.ip_check:
4303 if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4304 raise errors.OpPrereqError("IP %s of instance %s already in use" %
4305 (self.check_ip, self.op.instance_name))
4309 if self.op.iallocator is not None:
4310 self._RunAllocator()
4312 #### node related checks
4314 # check primary node
4315 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4316 assert self.pnode is not None, \
4317 "Cannot retrieve locked node %s" % self.op.pnode
4319 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4322 self.secondaries = []
4324 # mirror node verification
4325 if self.op.disk_template in constants.DTS_NET_MIRROR:
4326 if self.op.snode is None:
4327 raise errors.OpPrereqError("The networked disk templates need"
4329 if self.op.snode == pnode.name:
4330 raise errors.OpPrereqError("The secondary node cannot be"
4331 " the primary node.")
4332 self.secondaries.append(self.op.snode)
4333 _CheckNodeOnline(self, self.op.snode)
4335 nodenames = [pnode.name] + self.secondaries
4337 req_size = _ComputeDiskSize(self.op.disk_template,
4340 # Check lv size requirements
4341 if req_size is not None:
4342 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4344 for node in nodenames:
4345 info = nodeinfo[node]
4349 raise errors.OpPrereqError("Cannot get current information"
4350 " from node '%s'" % node)
4351 vg_free = info.get('vg_free', None)
4352 if not isinstance(vg_free, int):
4353 raise errors.OpPrereqError("Can't compute free disk space on"
4355 if req_size > info['vg_free']:
4356 raise errors.OpPrereqError("Not enough disk space on target node %s."
4357 " %d MB available, %d MB required" %
4358 (node, info['vg_free'], req_size))
4360 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4363 result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4365 if not isinstance(result.data, objects.OS):
4366 raise errors.OpPrereqError("OS '%s' not in supported os list for"
4367 " primary node" % self.op.os_type)
4369 # bridge check on primary node
4370 bridges = [n.bridge for n in self.nics]
4371 result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4374 raise errors.OpPrereqError("One of the target bridges '%s' does not"
4375 " exist on destination node '%s'" %
4376 (",".join(bridges), pnode.name))
4378 # memory check on primary node
4380 _CheckNodeFreeMemory(self, self.pnode.name,
4381 "creating instance %s" % self.op.instance_name,
4382 self.be_full[constants.BE_MEMORY],
4386 self.instance_status = 'up'
4388 self.instance_status = 'down'
4390 def Exec(self, feedback_fn):
4391 """Create and add the instance to the cluster.
4394 instance = self.op.instance_name
4395 pnode_name = self.pnode.name
4397 for nic in self.nics:
4398 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4399 nic.mac = self.cfg.GenerateMAC()
4401 ht_kind = self.op.hypervisor
4402 if ht_kind in constants.HTS_REQ_PORT:
4403 network_port = self.cfg.AllocatePort()
4407 ##if self.op.vnc_bind_address is None:
4408 ## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4410 # this is needed because os.path.join does not accept None arguments
4411 if self.op.file_storage_dir is None:
4412 string_file_storage_dir = ""
4414 string_file_storage_dir = self.op.file_storage_dir
4416 # build the full file storage dir path
4417 file_storage_dir = os.path.normpath(os.path.join(
4418 self.cfg.GetFileStorageDir(),
4419 string_file_storage_dir, instance))
4422 disks = _GenerateDiskTemplate(self,
4423 self.op.disk_template,
4424 instance, pnode_name,
4428 self.op.file_driver,
4431 iobj = objects.Instance(name=instance, os=self.op.os_type,
4432 primary_node=pnode_name,
4433 nics=self.nics, disks=disks,
4434 disk_template=self.op.disk_template,
4435 status=self.instance_status,
4436 network_port=network_port,
4437 beparams=self.op.beparams,
4438 hvparams=self.op.hvparams,
4439 hypervisor=self.op.hypervisor,
4442 feedback_fn("* creating instance disks...")
4444 _CreateDisks(self, iobj)
4445 except errors.OpExecError:
4446 self.LogWarning("Device creation failed, reverting...")
4448 _RemoveDisks(self, iobj)
4450 self.cfg.ReleaseDRBDMinors(instance)
4453 feedback_fn("adding instance %s to cluster config" % instance)
4455 self.cfg.AddInstance(iobj)
4456 # Declare that we don't want to remove the instance lock anymore, as we've
4457 # added the instance to the config
4458 del self.remove_locks[locking.LEVEL_INSTANCE]
4459 # Remove the temp. assignements for the instance's drbds
4460 self.cfg.ReleaseDRBDMinors(instance)
4461 # Unlock all the nodes
4462 if self.op.mode == constants.INSTANCE_IMPORT:
4463 nodes_keep = [self.op.src_node]
4464 nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4465 if node != self.op.src_node]
4466 self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4467 self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4469 self.context.glm.release(locking.LEVEL_NODE)
4470 del self.acquired_locks[locking.LEVEL_NODE]
4472 if self.op.wait_for_sync:
4473 disk_abort = not _WaitForSync(self, iobj)
4474 elif iobj.disk_template in constants.DTS_NET_MIRROR:
4475 # make sure the disks are not degraded (still sync-ing is ok)
4477 feedback_fn("* checking mirrors status")
4478 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4483 _RemoveDisks(self, iobj)
4484 self.cfg.RemoveInstance(iobj.name)
4485 # Make sure the instance lock gets removed
4486 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4487 raise errors.OpExecError("There are some degraded disks for"
4490 feedback_fn("creating os for instance %s on node %s" %
4491 (instance, pnode_name))
4493 if iobj.disk_template != constants.DT_DISKLESS:
4494 if self.op.mode == constants.INSTANCE_CREATE:
4495 feedback_fn("* running the instance OS create scripts...")
4496 result = self.rpc.call_instance_os_add(pnode_name, iobj)
4499 raise errors.OpExecError("Could not add os for instance %s"
4501 (instance, pnode_name))
4503 elif self.op.mode == constants.INSTANCE_IMPORT:
4504 feedback_fn("* running the instance OS import scripts...")
4505 src_node = self.op.src_node
4506 src_images = self.src_images
4507 cluster_name = self.cfg.GetClusterName()
4508 import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4509 src_node, src_images,
4511 import_result.Raise()
4512 for idx, result in enumerate(import_result.data):
4514 self.LogWarning("Could not import the image %s for instance"
4515 " %s, disk %d, on node %s" %
4516 (src_images[idx], instance, idx, pnode_name))
4518 # also checked in the prereq part
4519 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4523 logging.info("Starting instance %s on node %s", instance, pnode_name)
4524 feedback_fn("* starting instance...")
4525 result = self.rpc.call_instance_start(pnode_name, iobj, None)
4528 raise errors.OpExecError("Could not start instance")
4531 class LUConnectConsole(NoHooksLU):
4532 """Connect to an instance's console.
4534 This is somewhat special in that it returns the command line that
4535 you need to run on the master node in order to connect to the
4539 _OP_REQP = ["instance_name"]
4542 def ExpandNames(self):
4543 self._ExpandAndLockInstance()
4545 def CheckPrereq(self):
4546 """Check prerequisites.
4548 This checks that the instance is in the cluster.
4551 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4552 assert self.instance is not None, \
4553 "Cannot retrieve locked instance %s" % self.op.instance_name
4554 _CheckNodeOnline(self, self.instance.primary_node)
4556 def Exec(self, feedback_fn):
4557 """Connect to the console of an instance
4560 instance = self.instance
4561 node = instance.primary_node
4563 node_insts = self.rpc.call_instance_list([node],
4564 [instance.hypervisor])[node]
4567 if instance.name not in node_insts.data:
4568 raise errors.OpExecError("Instance %s is not running." % instance.name)
4570 logging.debug("Connecting to console of %s on %s", instance.name, node)
4572 hyper = hypervisor.GetHypervisor(instance.hypervisor)
4573 console_cmd = hyper.GetShellCommandForConsole(instance)
4576 return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4579 class LUReplaceDisks(LogicalUnit):
4580 """Replace the disks of an instance.
4583 HPATH = "mirrors-replace"
4584 HTYPE = constants.HTYPE_INSTANCE
4585 _OP_REQP = ["instance_name", "mode", "disks"]
4588 def CheckArguments(self):
4589 if not hasattr(self.op, "remote_node"):
4590 self.op.remote_node = None
4591 if not hasattr(self.op, "iallocator"):
4592 self.op.iallocator = None
4594 # check for valid parameter combination
4595 cnt = [self.op.remote_node, self.op.iallocator].count(None)
4596 if self.op.mode == constants.REPLACE_DISK_CHG:
4598 raise errors.OpPrereqError("When changing the secondary either an"
4599 " iallocator script must be used or the"
4602 raise errors.OpPrereqError("Give either the iallocator or the new"
4603 " secondary, not both")
4604 else: # not replacing the secondary
4606 raise errors.OpPrereqError("The iallocator and new node options can"
4607 " be used only when changing the"
4610 def ExpandNames(self):
4611 self._ExpandAndLockInstance()
4613 if self.op.iallocator is not None:
4614 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4615 elif self.op.remote_node is not None:
4616 remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4617 if remote_node is None:
4618 raise errors.OpPrereqError("Node '%s' not known" %
4619 self.op.remote_node)
4620 self.op.remote_node = remote_node
4621 self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4622 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4624 self.needed_locks[locking.LEVEL_NODE] = []
4625 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4627 def DeclareLocks(self, level):
4628 # If we're not already locking all nodes in the set we have to declare the
4629 # instance's primary/secondary nodes.
4630 if (level == locking.LEVEL_NODE and
4631 self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4632 self._LockInstancesNodes()
4634 def _RunAllocator(self):
4635 """Compute a new secondary node using an IAllocator.
4638 ial = IAllocator(self,
4639 mode=constants.IALLOCATOR_MODE_RELOC,
4640 name=self.op.instance_name,
4641 relocate_from=[self.sec_node])
4643 ial.Run(self.op.iallocator)
4646 raise errors.OpPrereqError("Can't compute nodes using"
4647 " iallocator '%s': %s" % (self.op.iallocator,
4649 if len(ial.nodes) != ial.required_nodes:
4650 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4651 " of nodes (%s), required %s" %
4652 (len(ial.nodes), ial.required_nodes))
4653 self.op.remote_node = ial.nodes[0]
4654 self.LogInfo("Selected new secondary for the instance: %s",
4655 self.op.remote_node)
4657 def BuildHooksEnv(self):
4660 This runs on the master, the primary and all the secondaries.
4664 "MODE": self.op.mode,
4665 "NEW_SECONDARY": self.op.remote_node,
4666 "OLD_SECONDARY": self.instance.secondary_nodes[0],
4668 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4670 self.cfg.GetMasterNode(),
4671 self.instance.primary_node,
4673 if self.op.remote_node is not None:
4674 nl.append(self.op.remote_node)
4677 def CheckPrereq(self):
4678 """Check prerequisites.
4680 This checks that the instance is in the cluster.
4683 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4684 assert instance is not None, \
4685 "Cannot retrieve locked instance %s" % self.op.instance_name
4686 self.instance = instance
4688 if instance.disk_template != constants.DT_DRBD8:
4689 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4692 if len(instance.secondary_nodes) != 1:
4693 raise errors.OpPrereqError("The instance has a strange layout,"
4694 " expected one secondary but found %d" %
4695 len(instance.secondary_nodes))
4697 self.sec_node = instance.secondary_nodes[0]
4699 if self.op.iallocator is not None:
4700 self._RunAllocator()
4702 remote_node = self.op.remote_node
4703 if remote_node is not None:
4704 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4705 assert self.remote_node_info is not None, \
4706 "Cannot retrieve locked node %s" % remote_node
4708 self.remote_node_info = None
4709 if remote_node == instance.primary_node:
4710 raise errors.OpPrereqError("The specified node is the primary node of"
4712 elif remote_node == self.sec_node:
4713 raise errors.OpPrereqError("The specified node is already the"
4714 " secondary node of the instance.")
4716 if self.op.mode == constants.REPLACE_DISK_PRI:
4717 n1 = self.tgt_node = instance.primary_node
4718 n2 = self.oth_node = self.sec_node
4719 elif self.op.mode == constants.REPLACE_DISK_SEC:
4720 n1 = self.tgt_node = self.sec_node
4721 n2 = self.oth_node = instance.primary_node
4722 elif self.op.mode == constants.REPLACE_DISK_CHG:
4723 n1 = self.new_node = remote_node
4724 n2 = self.oth_node = instance.primary_node
4725 self.tgt_node = self.sec_node
4727 raise errors.ProgrammerError("Unhandled disk replace mode")
4729 _CheckNodeOnline(self, n1)
4730 _CheckNodeOnline(self, n2)
4732 if not self.op.disks:
4733 self.op.disks = range(len(instance.disks))
4735 for disk_idx in self.op.disks:
4736 instance.FindDisk(disk_idx)
4738 def _ExecD8DiskOnly(self, feedback_fn):
4739 """Replace a disk on the primary or secondary for dbrd8.
4741 The algorithm for replace is quite complicated:
4743 1. for each disk to be replaced:
4745 1. create new LVs on the target node with unique names
4746 1. detach old LVs from the drbd device
4747 1. rename old LVs to name_replaced.<time_t>
4748 1. rename new LVs to old LVs
4749 1. attach the new LVs (with the old names now) to the drbd device
4751 1. wait for sync across all devices
4753 1. for each modified disk:
4755 1. remove old LVs (which have the name name_replaces.<time_t>)
4757 Failures are not very well handled.
4761 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4762 instance = self.instance
4764 vgname = self.cfg.GetVGName()
4767 tgt_node = self.tgt_node
4768 oth_node = self.oth_node
4770 # Step: check device activation
4771 self.proc.LogStep(1, steps_total, "check device existence")
4772 info("checking volume groups")
4773 my_vg = cfg.GetVGName()
4774 results = self.rpc.call_vg_list([oth_node, tgt_node])
4776 raise errors.OpExecError("Can't list volume groups on the nodes")
4777 for node in oth_node, tgt_node:
4779 if res.failed or not res.data or my_vg not in res.data:
4780 raise errors.OpExecError("Volume group '%s' not found on %s" %
4782 for idx, dev in enumerate(instance.disks):
4783 if idx not in self.op.disks:
4785 for node in tgt_node, oth_node:
4786 info("checking disk/%d on %s" % (idx, node))
4787 cfg.SetDiskID(dev, node)
4788 if not self.rpc.call_blockdev_find(node, dev):
4789 raise errors.OpExecError("Can't find disk/%d on node %s" %
4792 # Step: check other node consistency
4793 self.proc.LogStep(2, steps_total, "check peer consistency")
4794 for idx, dev in enumerate(instance.disks):
4795 if idx not in self.op.disks:
4797 info("checking disk/%d consistency on %s" % (idx, oth_node))
4798 if not _CheckDiskConsistency(self, dev, oth_node,
4799 oth_node==instance.primary_node):
4800 raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4801 " to replace disks on this node (%s)" %
4802 (oth_node, tgt_node))
4804 # Step: create new storage
4805 self.proc.LogStep(3, steps_total, "allocate new storage")
4806 for idx, dev in enumerate(instance.disks):
4807 if idx not in self.op.disks:
4810 cfg.SetDiskID(dev, tgt_node)
4811 lv_names = [".disk%d_%s" % (idx, suf)
4812 for suf in ["data", "meta"]]
4813 names = _GenerateUniqueNames(self, lv_names)
4814 lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4815 logical_id=(vgname, names[0]))
4816 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4817 logical_id=(vgname, names[1]))
4818 new_lvs = [lv_data, lv_meta]
4819 old_lvs = dev.children
4820 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4821 info("creating new local storage on %s for %s" %
4822 (tgt_node, dev.iv_name))
4823 # since we *always* want to create this LV, we use the
4824 # _Create...OnPrimary (which forces the creation), even if we
4825 # are talking about the secondary node
4826 for new_lv in new_lvs:
4827 _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4828 _GetInstanceInfoText(instance))
4830 # Step: for each lv, detach+rename*2+attach
4831 self.proc.LogStep(4, steps_total, "change drbd configuration")
4832 for dev, old_lvs, new_lvs in iv_names.itervalues():
4833 info("detaching %s drbd from local storage" % dev.iv_name)
4834 result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
4837 raise errors.OpExecError("Can't detach drbd from local storage on node"
4838 " %s for device %s" % (tgt_node, dev.iv_name))
4840 #cfg.Update(instance)
4842 # ok, we created the new LVs, so now we know we have the needed
4843 # storage; as such, we proceed on the target node to rename
4844 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4845 # using the assumption that logical_id == physical_id (which in
4846 # turn is the unique_id on that node)
4848 # FIXME(iustin): use a better name for the replaced LVs
4849 temp_suffix = int(time.time())
4850 ren_fn = lambda d, suff: (d.physical_id[0],
4851 d.physical_id[1] + "_replaced-%s" % suff)
4852 # build the rename list based on what LVs exist on the node
4854 for to_ren in old_lvs:
4855 find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4856 if not find_res.failed and find_res.data is not None: # device exists
4857 rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4859 info("renaming the old LVs on the target node")
4860 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4863 raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4864 # now we rename the new LVs to the old LVs
4865 info("renaming the new LVs on the target node")
4866 rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4867 result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4870 raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4872 for old, new in zip(old_lvs, new_lvs):
4873 new.logical_id = old.logical_id
4874 cfg.SetDiskID(new, tgt_node)
4876 for disk in old_lvs:
4877 disk.logical_id = ren_fn(disk, temp_suffix)
4878 cfg.SetDiskID(disk, tgt_node)
4880 # now that the new lvs have the old name, we can add them to the device
4881 info("adding new mirror component on %s" % tgt_node)
4882 result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
4883 if result.failed or not result.data:
4884 for new_lv in new_lvs:
4885 result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
4886 if result.failed or not result.data:
4887 warning("Can't rollback device %s", hint="manually cleanup unused"
4889 raise errors.OpExecError("Can't add local storage to drbd")
4891 dev.children = new_lvs
4892 cfg.Update(instance)
4894 # Step: wait for sync
4896 # this can fail as the old devices are degraded and _WaitForSync
4897 # does a combined result over all disks, so we don't check its
4899 self.proc.LogStep(5, steps_total, "sync devices")
4900 _WaitForSync(self, instance, unlock=True)
4902 # so check manually all the devices
4903 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4904 cfg.SetDiskID(dev, instance.primary_node)
4905 result = self.rpc.call_blockdev_find(instance.primary_node, dev)
4906 if result.failed or result.data[5]:
4907 raise errors.OpExecError("DRBD device %s is degraded!" % name)
4909 # Step: remove old storage
4910 self.proc.LogStep(6, steps_total, "removing old storage")
4911 for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4912 info("remove logical volumes for %s" % name)
4914 cfg.SetDiskID(lv, tgt_node)
4915 result = self.rpc.call_blockdev_remove(tgt_node, lv)
4916 if result.failed or not result.data:
4917 warning("Can't remove old LV", hint="manually remove unused LVs")
4920 def _ExecD8Secondary(self, feedback_fn):
4921 """Replace the secondary node for drbd8.
4923 The algorithm for replace is quite complicated:
4924 - for all disks of the instance:
4925 - create new LVs on the new node with same names
4926 - shutdown the drbd device on the old secondary
4927 - disconnect the drbd network on the primary
4928 - create the drbd device on the new secondary
4929 - network attach the drbd on the primary, using an artifice:
4930 the drbd code for Attach() will connect to the network if it
4931 finds a device which is connected to the good local disks but
4933 - wait for sync across all devices
4934 - remove all disks from the old secondary
4936 Failures are not very well handled.
4940 warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4941 instance = self.instance
4945 old_node = self.tgt_node
4946 new_node = self.new_node
4947 pri_node = instance.primary_node
4949 old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
4950 new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
4951 pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
4954 # Step: check device activation
4955 self.proc.LogStep(1, steps_total, "check device existence")
4956 info("checking volume groups")
4957 my_vg = cfg.GetVGName()
4958 results = self.rpc.call_vg_list([pri_node, new_node])
4959 for node in pri_node, new_node:
4961 if res.failed or not res.data or my_vg not in res.data:
4962 raise errors.OpExecError("Volume group '%s' not found on %s" %
4964 for idx, dev in enumerate(instance.disks):
4965 if idx not in self.op.disks:
4967 info("checking disk/%d on %s" % (idx, pri_node))
4968 cfg.SetDiskID(dev, pri_node)
4969 result = self.rpc.call_blockdev_find(pri_node, dev)
4972 raise errors.OpExecError("Can't find disk/%d on node %s" %
4975 # Step: check other node consistency
4976 self.proc.LogStep(2, steps_total, "check peer consistency")
4977 for idx, dev in enumerate(instance.disks):
4978 if idx not in self.op.disks:
4980 info("checking disk/%d consistency on %s" % (idx, pri_node))
4981 if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4982 raise errors.OpExecError("Primary node (%s) has degraded storage,"
4983 " unsafe to replace the secondary" %
4986 # Step: create new storage
4987 self.proc.LogStep(3, steps_total, "allocate new storage")
4988 for idx, dev in enumerate(instance.disks):
4989 info("adding new local storage on %s for disk/%d" %
4991 # since we *always* want to create this LV, we use the
4992 # _Create...OnPrimary (which forces the creation), even if we
4993 # are talking about the secondary node
4994 for new_lv in dev.children:
4995 _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4996 _GetInstanceInfoText(instance))
4998 # Step 4: dbrd minors and drbd setups changes
4999 # after this, we must manually remove the drbd minors on both the
5000 # error and the success paths
5001 minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5003 logging.debug("Allocated minors %s" % (minors,))
5004 self.proc.LogStep(4, steps_total, "changing drbd configuration")
5005 for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5007 info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5008 # create new devices on new_node; note that we create two IDs:
5009 # one without port, so the drbd will be activated without
5010 # networking information on the new node at this stage, and one
5011 # with network, for the latter activation in step 4
5012 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5013 if pri_node == o_node1:
5018 new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5019 new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5021 iv_names[idx] = (dev, dev.children, new_net_id)
5022 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5024 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5025 logical_id=new_alone_id,
5026 children=dev.children)
5028 _CreateBlockDevOnSecondary(self, new_node, instance, new_drbd, False,
5029 _GetInstanceInfoText(instance))
5030 except error.BlockDeviceError:
5031 self.cfg.ReleaseDRBDMinors(instance.name)
5034 for idx, dev in enumerate(instance.disks):
5035 # we have new devices, shutdown the drbd on the old secondary
5036 info("shutting down drbd for disk/%d on old node" % idx)
5037 cfg.SetDiskID(dev, old_node)
5038 result = self.rpc.call_blockdev_shutdown(old_node, dev)
5039 if result.failed or not result.data:
5040 warning("Failed to shutdown drbd for disk/%d on old node" % idx,
5041 hint="Please cleanup this device manually as soon as possible")
5043 info("detaching primary drbds from the network (=> standalone)")
5044 result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5045 instance.disks)[pri_node]
5047 msg = result.RemoteFailMsg()
5049 # detaches didn't succeed (unlikely)
5050 self.cfg.ReleaseDRBDMinors(instance.name)
5051 raise errors.OpExecError("Can't detach the disks from the network on"
5052 " old node: %s" % (msg,))
5054 # if we managed to detach at least one, we update all the disks of
5055 # the instance to point to the new secondary
5056 info("updating instance configuration")
5057 for dev, _, new_logical_id in iv_names.itervalues():
5058 dev.logical_id = new_logical_id
5059 cfg.SetDiskID(dev, pri_node)
5060 cfg.Update(instance)
5061 # we can remove now the temp minors as now the new values are
5062 # written to the config file (and therefore stable)
5063 self.cfg.ReleaseDRBDMinors(instance.name)
5065 # and now perform the drbd attach
5066 info("attaching primary drbds to new secondary (standalone => connected)")
5067 result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5068 instance.disks, instance.name,
5070 for to_node, to_result in result.items():
5071 msg = to_result.RemoteFailMsg()
5073 warning("can't attach drbd disks on node %s: %s", to_node, msg,
5074 hint="please do a gnt-instance info to see the"
5077 # this can fail as the old devices are degraded and _WaitForSync
5078 # does a combined result over all disks, so we don't check its
5080 self.proc.LogStep(5, steps_total, "sync devices")
5081 _WaitForSync(self, instance, unlock=True)
5083 # so check manually all the devices
5084 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5085 cfg.SetDiskID(dev, pri_node)
5086 result = self.rpc.call_blockdev_find(pri_node, dev)
5089 raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5091 self.proc.LogStep(6, steps_total, "removing old storage")
5092 for idx, (dev, old_lvs, _) in iv_names.iteritems():
5093 info("remove logical volumes for disk/%d" % idx)
5095 cfg.SetDiskID(lv, old_node)
5096 result = self.rpc.call_blockdev_remove(old_node, lv)
5097 if result.failed or not result.data:
5098 warning("Can't remove LV on old secondary",
5099 hint="Cleanup stale volumes by hand")
5101 def Exec(self, feedback_fn):
5102 """Execute disk replacement.
5104 This dispatches the disk replacement to the appropriate handler.
5107 instance = self.instance
5109 # Activate the instance disks if we're replacing them on a down instance
5110 if instance.status == "down":
5111 _StartInstanceDisks(self, instance, True)
5113 if self.op.mode == constants.REPLACE_DISK_CHG:
5114 fn = self._ExecD8Secondary
5116 fn = self._ExecD8DiskOnly
5118 ret = fn(feedback_fn)
5120 # Deactivate the instance disks if we're replacing them on a down instance
5121 if instance.status == "down":
5122 _SafeShutdownInstanceDisks(self, instance)
5127 class LUGrowDisk(LogicalUnit):
5128 """Grow a disk of an instance.
5132 HTYPE = constants.HTYPE_INSTANCE
5133 _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5136 def ExpandNames(self):
5137 self._ExpandAndLockInstance()
5138 self.needed_locks[locking.LEVEL_NODE] = []
5139 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5141 def DeclareLocks(self, level):
5142 if level == locking.LEVEL_NODE:
5143 self._LockInstancesNodes()
5145 def BuildHooksEnv(self):
5148 This runs on the master, the primary and all the secondaries.
5152 "DISK": self.op.disk,
5153 "AMOUNT": self.op.amount,
5155 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5157 self.cfg.GetMasterNode(),
5158 self.instance.primary_node,
5162 def CheckPrereq(self):
5163 """Check prerequisites.
5165 This checks that the instance is in the cluster.
5168 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5169 assert instance is not None, \
5170 "Cannot retrieve locked instance %s" % self.op.instance_name
5171 _CheckNodeOnline(self, instance.primary_node)
5172 for node in instance.secondary_nodes:
5173 _CheckNodeOnline(self, node)
5176 self.instance = instance
5178 if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5179 raise errors.OpPrereqError("Instance's disk layout does not support"
5182 self.disk = instance.FindDisk(self.op.disk)
5184 nodenames = [instance.primary_node] + list(instance.secondary_nodes)
5185 nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5186 instance.hypervisor)
5187 for node in nodenames:
5188 info = nodeinfo[node]
5189 if info.failed or not info.data:
5190 raise errors.OpPrereqError("Cannot get current information"
5191 " from node '%s'" % node)
5192 vg_free = info.data.get('vg_free', None)
5193 if not isinstance(vg_free, int):
5194 raise errors.OpPrereqError("Can't compute free disk space on"
5196 if self.op.amount > vg_free:
5197 raise errors.OpPrereqError("Not enough disk space on target node %s:"
5198 " %d MiB available, %d MiB required" %
5199 (node, vg_free, self.op.amount))
5201 def Exec(self, feedback_fn):
5202 """Execute disk grow.
5205 instance = self.instance
5207 for node in (instance.secondary_nodes + (instance.primary_node,)):
5208 self.cfg.SetDiskID(disk, node)
5209 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5211 if (not result.data or not isinstance(result.data, (list, tuple)) or
5212 len(result.data) != 2):
5213 raise errors.OpExecError("Grow request failed to node %s" % node)
5214 elif not result.data[0]:
5215 raise errors.OpExecError("Grow request failed to node %s: %s" %
5216 (node, result.data[1]))
5217 disk.RecordGrow(self.op.amount)
5218 self.cfg.Update(instance)
5219 if self.op.wait_for_sync:
5220 disk_abort = not _WaitForSync(self, instance)
5222 self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5223 " status.\nPlease check the instance.")
5226 class LUQueryInstanceData(NoHooksLU):
5227 """Query runtime instance data.
5230 _OP_REQP = ["instances", "static"]
5233 def ExpandNames(self):
5234 self.needed_locks = {}
5235 self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5237 if not isinstance(self.op.instances, list):
5238 raise errors.OpPrereqError("Invalid argument type 'instances'")
5240 if self.op.instances:
5241 self.wanted_names = []
5242 for name in self.op.instances:
5243 full_name = self.cfg.ExpandInstanceName(name)
5244 if full_name is None:
5245 raise errors.OpPrereqError("Instance '%s' not known" % name)
5246 self.wanted_names.append(full_name)
5247 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5249 self.wanted_names = None
5250 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5252 self.needed_locks[locking.LEVEL_NODE] = []
5253 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5255 def DeclareLocks(self, level):
5256 if level == locking.LEVEL_NODE:
5257 self._LockInstancesNodes()
5259 def CheckPrereq(self):
5260 """Check prerequisites.
5262 This only checks the optional instance list against the existing names.
5265 if self.wanted_names is None:
5266 self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5268 self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5269 in self.wanted_names]
5272 def _ComputeDiskStatus(self, instance, snode, dev):
5273 """Compute block device status.
5276 static = self.op.static
5278 self.cfg.SetDiskID(dev, instance.primary_node)
5279 dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5281 dev_pstatus = dev_pstatus.data
5285 if dev.dev_type in constants.LDS_DRBD:
5286 # we change the snode then (otherwise we use the one passed in)
5287 if dev.logical_id[0] == instance.primary_node:
5288 snode = dev.logical_id[1]
5290 snode = dev.logical_id[0]
5292 if snode and not static:
5293 self.cfg.SetDiskID(dev, snode)
5294 dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5296 dev_sstatus = dev_sstatus.data
5301 dev_children = [self._ComputeDiskStatus(instance, snode, child)
5302 for child in dev.children]
5307 "iv_name": dev.iv_name,
5308 "dev_type": dev.dev_type,
5309 "logical_id": dev.logical_id,
5310 "physical_id": dev.physical_id,
5311 "pstatus": dev_pstatus,
5312 "sstatus": dev_sstatus,
5313 "children": dev_children,
5319 def Exec(self, feedback_fn):
5320 """Gather and return data"""
5323 cluster = self.cfg.GetClusterInfo()
5325 for instance in self.wanted_instances:
5326 if not self.op.static:
5327 remote_info = self.rpc.call_instance_info(instance.primary_node,
5329 instance.hypervisor)
5331 remote_info = remote_info.data
5332 if remote_info and "state" in remote_info:
5335 remote_state = "down"
5338 if instance.status == "down":
5339 config_state = "down"
5343 disks = [self._ComputeDiskStatus(instance, None, device)
5344 for device in instance.disks]
5347 "name": instance.name,
5348 "config_state": config_state,
5349 "run_state": remote_state,
5350 "pnode": instance.primary_node,
5351 "snodes": instance.secondary_nodes,
5353 "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5355 "hypervisor": instance.hypervisor,
5356 "network_port": instance.network_port,
5357 "hv_instance": instance.hvparams,
5358 "hv_actual": cluster.FillHV(instance),
5359 "be_instance": instance.beparams,
5360 "be_actual": cluster.FillBE(instance),
5363 result[instance.name] = idict
5368 class LUSetInstanceParams(LogicalUnit):
5369 """Modifies an instances's parameters.
5372 HPATH = "instance-modify"
5373 HTYPE = constants.HTYPE_INSTANCE
5374 _OP_REQP = ["instance_name"]
5377 def CheckArguments(self):
5378 if not hasattr(self.op, 'nics'):
5380 if not hasattr(self.op, 'disks'):
5382 if not hasattr(self.op, 'beparams'):
5383 self.op.beparams = {}
5384 if not hasattr(self.op, 'hvparams'):
5385 self.op.hvparams = {}
5386 self.op.force = getattr(self.op, "force", False)
5387 if not (self.op.nics or self.op.disks or
5388 self.op.hvparams or self.op.beparams):
5389 raise errors.OpPrereqError("No changes submitted")
5391 utils.CheckBEParams(self.op.beparams)
5395 for disk_op, disk_dict in self.op.disks:
5396 if disk_op == constants.DDM_REMOVE:
5399 elif disk_op == constants.DDM_ADD:
5402 if not isinstance(disk_op, int):
5403 raise errors.OpPrereqError("Invalid disk index")
5404 if disk_op == constants.DDM_ADD:
5405 mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5406 if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
5407 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5408 size = disk_dict.get('size', None)
5410 raise errors.OpPrereqError("Required disk parameter size missing")
5413 except ValueError, err:
5414 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5416 disk_dict['size'] = size
5418 # modification of disk
5419 if 'size' in disk_dict:
5420 raise errors.OpPrereqError("Disk size change not possible, use"
5423 if disk_addremove > 1:
5424 raise errors.OpPrereqError("Only one disk add or remove operation"
5425 " supported at a time")
5429 for nic_op, nic_dict in self.op.nics:
5430 if nic_op == constants.DDM_REMOVE:
5433 elif nic_op == constants.DDM_ADD:
5436 if not isinstance(nic_op, int):
5437 raise errors.OpPrereqError("Invalid nic index")
5439 # nic_dict should be a dict
5440 nic_ip = nic_dict.get('ip', None)
5441 if nic_ip is not None:
5442 if nic_ip.lower() == "none":
5443 nic_dict['ip'] = None
5445 if not utils.IsValidIP(nic_ip):
5446 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5447 # we can only check None bridges and assign the default one
5448 nic_bridge = nic_dict.get('bridge', None)
5449 if nic_bridge is None:
5450 nic_dict['bridge'] = self.cfg.GetDefBridge()
5451 # but we can validate MACs
5452 nic_mac = nic_dict.get('mac', None)
5453 if nic_mac is not None:
5454 if self.cfg.IsMacInUse(nic_mac):
5455 raise errors.OpPrereqError("MAC address %s already in use"
5456 " in cluster" % nic_mac)
5457 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5458 if not utils.IsValidMac(nic_mac):
5459 raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5460 if nic_addremove > 1:
5461 raise errors.OpPrereqError("Only one NIC add or remove operation"
5462 " supported at a time")
5464 def ExpandNames(self):
5465 self._ExpandAndLockInstance()
5466 self.needed_locks[locking.LEVEL_NODE] = []
5467 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5469 def DeclareLocks(self, level):
5470 if level == locking.LEVEL_NODE:
5471 self._LockInstancesNodes()
5473 def BuildHooksEnv(self):
5476 This runs on the master, primary and secondaries.
5480 if constants.BE_MEMORY in self.be_new:
5481 args['memory'] = self.be_new[constants.BE_MEMORY]
5482 if constants.BE_VCPUS in self.be_new:
5483 args['vcpus'] = self.be_new[constants.BE_VCPUS]
5484 # FIXME: readd disk/nic changes
5485 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5486 nl = [self.cfg.GetMasterNode(),
5487 self.instance.primary_node] + list(self.instance.secondary_nodes)
5490 def CheckPrereq(self):
5491 """Check prerequisites.
5493 This only checks the instance list against the existing names.
5496 force = self.force = self.op.force
5498 # checking the new params on the primary/secondary nodes
5500 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5501 assert self.instance is not None, \
5502 "Cannot retrieve locked instance %s" % self.op.instance_name
5503 pnode = self.instance.primary_node
5505 nodelist.extend(instance.secondary_nodes)
5507 # hvparams processing
5508 if self.op.hvparams:
5509 i_hvdict = copy.deepcopy(instance.hvparams)
5510 for key, val in self.op.hvparams.iteritems():
5511 if val == constants.VALUE_DEFAULT:
5516 elif val == constants.VALUE_NONE:
5517 i_hvdict[key] = None
5520 cluster = self.cfg.GetClusterInfo()
5521 hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5524 hypervisor.GetHypervisor(
5525 instance.hypervisor).CheckParameterSyntax(hv_new)
5526 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5527 self.hv_new = hv_new # the new actual values
5528 self.hv_inst = i_hvdict # the new dict (without defaults)
5530 self.hv_new = self.hv_inst = {}
5532 # beparams processing
5533 if self.op.beparams:
5534 i_bedict = copy.deepcopy(instance.beparams)
5535 for key, val in self.op.beparams.iteritems():
5536 if val == constants.VALUE_DEFAULT:
5543 cluster = self.cfg.GetClusterInfo()
5544 be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5546 self.be_new = be_new # the new actual values
5547 self.be_inst = i_bedict # the new dict (without defaults)
5549 self.be_new = self.be_inst = {}
5553 if constants.BE_MEMORY in self.op.beparams and not self.force:
5554 mem_check_list = [pnode]
5555 if be_new[constants.BE_AUTO_BALANCE]:
5556 # either we changed auto_balance to yes or it was from before
5557 mem_check_list.extend(instance.secondary_nodes)
5558 instance_info = self.rpc.call_instance_info(pnode, instance.name,
5559 instance.hypervisor)
5560 nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5561 instance.hypervisor)
5562 if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5563 # Assume the primary node is unreachable and go ahead
5564 self.warn.append("Can't get info from primary node %s" % pnode)
5566 if not instance_info.failed and instance_info.data:
5567 current_mem = instance_info.data['memory']
5569 # Assume instance not running
5570 # (there is a slight race condition here, but it's not very probable,
5571 # and we have no other way to check)
5573 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5574 nodeinfo[pnode].data['memory_free'])
5576 raise errors.OpPrereqError("This change will prevent the instance"
5577 " from starting, due to %d MB of memory"
5578 " missing on its primary node" % miss_mem)
5580 if be_new[constants.BE_AUTO_BALANCE]:
5581 for node, nres in nodeinfo.iteritems():
5582 if node not in instance.secondary_nodes:
5584 if nres.failed or not isinstance(nres.data, dict):
5585 self.warn.append("Can't get info from secondary node %s" % node)
5586 elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5587 self.warn.append("Not enough memory to failover instance to"
5588 " secondary node %s" % node)
5591 for nic_op, nic_dict in self.op.nics:
5592 if nic_op == constants.DDM_REMOVE:
5593 if not instance.nics:
5594 raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5596 if nic_op != constants.DDM_ADD:
5598 if nic_op < 0 or nic_op >= len(instance.nics):
5599 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5601 (nic_op, len(instance.nics)))
5602 nic_bridge = nic_dict.get('bridge', None)
5603 if nic_bridge is not None:
5604 if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5605 msg = ("Bridge '%s' doesn't exist on one of"
5606 " the instance nodes" % nic_bridge)
5608 self.warn.append(msg)
5610 raise errors.OpPrereqError(msg)
5613 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5614 raise errors.OpPrereqError("Disk operations not supported for"
5615 " diskless instances")
5616 for disk_op, disk_dict in self.op.disks:
5617 if disk_op == constants.DDM_REMOVE:
5618 if len(instance.disks) == 1:
5619 raise errors.OpPrereqError("Cannot remove the last disk of"
5621 ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5622 ins_l = ins_l[pnode]
5623 if ins_l.failed or not isinstance(ins_l.data, list):
5624 raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5625 if instance.name in ins_l.data:
5626 raise errors.OpPrereqError("Instance is running, can't remove"
5629 if (disk_op == constants.DDM_ADD and
5630 len(instance.nics) >= constants.MAX_DISKS):
5631 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5632 " add more" % constants.MAX_DISKS)
5633 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5635 if disk_op < 0 or disk_op >= len(instance.disks):
5636 raise errors.OpPrereqError("Invalid disk index %s, valid values"
5638 (disk_op, len(instance.disks)))
5642 def Exec(self, feedback_fn):
5643 """Modifies an instance.
5645 All parameters take effect only at the next restart of the instance.
5648 # Process here the warnings from CheckPrereq, as we don't have a
5649 # feedback_fn there.
5650 for warn in self.warn:
5651 feedback_fn("WARNING: %s" % warn)
5654 instance = self.instance
5656 for disk_op, disk_dict in self.op.disks:
5657 if disk_op == constants.DDM_REMOVE:
5658 # remove the last disk
5659 device = instance.disks.pop()
5660 device_idx = len(instance.disks)
5661 for node, disk in device.ComputeNodeTree(instance.primary_node):
5662 self.cfg.SetDiskID(disk, node)
5663 rpc_result = self.rpc.call_blockdev_remove(node, disk)
5664 if rpc_result.failed or not rpc_result.data:
5665 self.proc.LogWarning("Could not remove disk/%d on node %s,"
5666 " continuing anyway", device_idx, node)
5667 result.append(("disk/%d" % device_idx, "remove"))
5668 elif disk_op == constants.DDM_ADD:
5670 if instance.disk_template == constants.DT_FILE:
5671 file_driver, file_path = instance.disks[0].logical_id
5672 file_path = os.path.dirname(file_path)
5674 file_driver = file_path = None
5675 disk_idx_base = len(instance.disks)
5676 new_disk = _GenerateDiskTemplate(self,
5677 instance.disk_template,
5678 instance, instance.primary_node,
5679 instance.secondary_nodes,
5684 new_disk.mode = disk_dict['mode']
5685 instance.disks.append(new_disk)
5686 info = _GetInstanceInfoText(instance)
5688 logging.info("Creating volume %s for instance %s",
5689 new_disk.iv_name, instance.name)
5690 # Note: this needs to be kept in sync with _CreateDisks
5692 for secondary_node in instance.secondary_nodes:
5694 _CreateBlockDevOnSecondary(self, secondary_node, instance,
5695 new_disk, False, info)
5696 except error.OpExecError, err:
5697 self.LogWarning("Failed to create volume %s (%s) on"
5698 " secondary node %s: %s",
5699 new_disk.iv_name, new_disk, secondary_node, err)
5702 _CreateBlockDevOnPrimary(self, instance.primary_node,
5703 instance, new_disk, info)
5704 except errors.OpExecError, err:
5705 self.LogWarning("Failed to create volume %s on primary: %s",
5706 new_disk.iv_name, err)
5707 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5708 (new_disk.size, new_disk.mode)))
5710 # change a given disk
5711 instance.disks[disk_op].mode = disk_dict['mode']
5712 result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5714 for nic_op, nic_dict in self.op.nics:
5715 if nic_op == constants.DDM_REMOVE:
5716 # remove the last nic
5717 del instance.nics[-1]
5718 result.append(("nic.%d" % len(instance.nics), "remove"))
5719 elif nic_op == constants.DDM_ADD:
5721 if 'mac' not in nic_dict:
5722 mac = constants.VALUE_GENERATE
5724 mac = nic_dict['mac']
5725 if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5726 mac = self.cfg.GenerateMAC()
5727 new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5728 bridge=nic_dict.get('bridge', None))
5729 instance.nics.append(new_nic)
5730 result.append(("nic.%d" % (len(instance.nics) - 1),
5731 "add:mac=%s,ip=%s,bridge=%s" %
5732 (new_nic.mac, new_nic.ip, new_nic.bridge)))
5734 # change a given nic
5735 for key in 'mac', 'ip', 'bridge':
5737 setattr(instance.nics[nic_op], key, nic_dict[key])
5738 result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5741 if self.op.hvparams:
5742 instance.hvparams = self.hv_new
5743 for key, val in self.op.hvparams.iteritems():
5744 result.append(("hv/%s" % key, val))
5747 if self.op.beparams:
5748 instance.beparams = self.be_inst
5749 for key, val in self.op.beparams.iteritems():
5750 result.append(("be/%s" % key, val))
5752 self.cfg.Update(instance)
5757 class LUQueryExports(NoHooksLU):
5758 """Query the exports list
5761 _OP_REQP = ['nodes']
5764 def ExpandNames(self):
5765 self.needed_locks = {}
5766 self.share_locks[locking.LEVEL_NODE] = 1
5767 if not self.op.nodes:
5768 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5770 self.needed_locks[locking.LEVEL_NODE] = \
5771 _GetWantedNodes(self, self.op.nodes)
5773 def CheckPrereq(self):
5774 """Check prerequisites.
5777 self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5779 def Exec(self, feedback_fn):
5780 """Compute the list of all the exported system images.
5783 @return: a dictionary with the structure node->(export-list)
5784 where export-list is a list of the instances exported on
5788 rpcresult = self.rpc.call_export_list(self.nodes)
5790 for node in rpcresult:
5791 if rpcresult[node].failed:
5792 result[node] = False
5794 result[node] = rpcresult[node].data
5799 class LUExportInstance(LogicalUnit):
5800 """Export an instance to an image in the cluster.
5803 HPATH = "instance-export"
5804 HTYPE = constants.HTYPE_INSTANCE
5805 _OP_REQP = ["instance_name", "target_node", "shutdown"]
5808 def ExpandNames(self):
5809 self._ExpandAndLockInstance()
5810 # FIXME: lock only instance primary and destination node
5812 # Sad but true, for now we have do lock all nodes, as we don't know where
5813 # the previous export might be, and and in this LU we search for it and
5814 # remove it from its current node. In the future we could fix this by:
5815 # - making a tasklet to search (share-lock all), then create the new one,
5816 # then one to remove, after
5817 # - removing the removal operation altoghether
5818 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5820 def DeclareLocks(self, level):
5821 """Last minute lock declaration."""
5822 # All nodes are locked anyway, so nothing to do here.
5824 def BuildHooksEnv(self):
5827 This will run on the master, primary node and target node.
5831 "EXPORT_NODE": self.op.target_node,
5832 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5834 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5835 nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5836 self.op.target_node]
5839 def CheckPrereq(self):
5840 """Check prerequisites.
5842 This checks that the instance and node names are valid.
5845 instance_name = self.op.instance_name
5846 self.instance = self.cfg.GetInstanceInfo(instance_name)
5847 assert self.instance is not None, \
5848 "Cannot retrieve locked instance %s" % self.op.instance_name
5849 _CheckNodeOnline(self, self.instance.primary_node)
5851 self.dst_node = self.cfg.GetNodeInfo(
5852 self.cfg.ExpandNodeName(self.op.target_node))
5854 if self.dst_node is None:
5855 # This is wrong node name, not a non-locked node
5856 raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5857 _CheckNodeOnline(self, self.dst_node.name)
5859 # instance disk type verification
5860 for disk in self.instance.disks:
5861 if disk.dev_type == constants.LD_FILE:
5862 raise errors.OpPrereqError("Export not supported for instances with"
5863 " file-based disks")
5865 def Exec(self, feedback_fn):
5866 """Export an instance to an image in the cluster.
5869 instance = self.instance
5870 dst_node = self.dst_node
5871 src_node = instance.primary_node
5872 if self.op.shutdown:
5873 # shutdown the instance, but not the disks
5874 result = self.rpc.call_instance_shutdown(src_node, instance)
5877 raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5878 (instance.name, src_node))
5880 vgname = self.cfg.GetVGName()
5884 # set the disks ID correctly since call_instance_start needs the
5885 # correct drbd minor to create the symlinks
5886 for disk in instance.disks:
5887 self.cfg.SetDiskID(disk, src_node)
5890 for disk in instance.disks:
5891 # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5892 new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5893 if new_dev_name.failed or not new_dev_name.data:
5894 self.LogWarning("Could not snapshot block device %s on node %s",
5895 disk.logical_id[1], src_node)
5896 snap_disks.append(False)
5898 new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5899 logical_id=(vgname, new_dev_name.data),
5900 physical_id=(vgname, new_dev_name.data),
5901 iv_name=disk.iv_name)
5902 snap_disks.append(new_dev)
5905 if self.op.shutdown and instance.status == "up":
5906 result = self.rpc.call_instance_start(src_node, instance, None)
5907 if result.failed or not result.data:
5908 _ShutdownInstanceDisks(self, instance)
5909 raise errors.OpExecError("Could not start instance")
5911 # TODO: check for size
5913 cluster_name = self.cfg.GetClusterName()
5914 for idx, dev in enumerate(snap_disks):
5916 result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5917 instance, cluster_name, idx)
5918 if result.failed or not result.data:
5919 self.LogWarning("Could not export block device %s from node %s to"
5920 " node %s", dev.logical_id[1], src_node,
5922 result = self.rpc.call_blockdev_remove(src_node, dev)
5923 if result.failed or not result.data:
5924 self.LogWarning("Could not remove snapshot block device %s from node"
5925 " %s", dev.logical_id[1], src_node)
5927 result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
5928 if result.failed or not result.data:
5929 self.LogWarning("Could not finalize export for instance %s on node %s",
5930 instance.name, dst_node.name)
5932 nodelist = self.cfg.GetNodeList()
5933 nodelist.remove(dst_node.name)
5935 # on one-node clusters nodelist will be empty after the removal
5936 # if we proceed the backup would be removed because OpQueryExports
5937 # substitutes an empty list with the full cluster node list.
5939 exportlist = self.rpc.call_export_list(nodelist)
5940 for node in exportlist:
5941 if exportlist[node].failed:
5943 if instance.name in exportlist[node].data:
5944 if not self.rpc.call_export_remove(node, instance.name):
5945 self.LogWarning("Could not remove older export for instance %s"
5946 " on node %s", instance.name, node)
5949 class LURemoveExport(NoHooksLU):
5950 """Remove exports related to the named instance.
5953 _OP_REQP = ["instance_name"]
5956 def ExpandNames(self):
5957 self.needed_locks = {}
5958 # We need all nodes to be locked in order for RemoveExport to work, but we
5959 # don't need to lock the instance itself, as nothing will happen to it (and
5960 # we can remove exports also for a removed instance)
5961 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5963 def CheckPrereq(self):
5964 """Check prerequisites.
5968 def Exec(self, feedback_fn):
5969 """Remove any export.
5972 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5973 # If the instance was not found we'll try with the name that was passed in.
5974 # This will only work if it was an FQDN, though.
5976 if not instance_name:
5978 instance_name = self.op.instance_name
5980 exportlist = self.rpc.call_export_list(self.acquired_locks[
5981 locking.LEVEL_NODE])
5983 for node in exportlist:
5984 if exportlist[node].failed:
5985 self.LogWarning("Failed to query node %s, continuing" % node)
5987 if instance_name in exportlist[node].data:
5989 result = self.rpc.call_export_remove(node, instance_name)
5990 if result.failed or not result.data:
5991 logging.error("Could not remove export for instance %s"
5992 " on node %s", instance_name, node)
5994 if fqdn_warn and not found:
5995 feedback_fn("Export not found. If trying to remove an export belonging"
5996 " to a deleted instance please use its Fully Qualified"
6000 class TagsLU(NoHooksLU):
6003 This is an abstract class which is the parent of all the other tags LUs.
6007 def ExpandNames(self):
6008 self.needed_locks = {}
6009 if self.op.kind == constants.TAG_NODE:
6010 name = self.cfg.ExpandNodeName(self.op.name)
6012 raise errors.OpPrereqError("Invalid node name (%s)" %
6015 self.needed_locks[locking.LEVEL_NODE] = name
6016 elif self.op.kind == constants.TAG_INSTANCE:
6017 name = self.cfg.ExpandInstanceName(self.op.name)
6019 raise errors.OpPrereqError("Invalid instance name (%s)" %
6022 self.needed_locks[locking.LEVEL_INSTANCE] = name
6024 def CheckPrereq(self):
6025 """Check prerequisites.
6028 if self.op.kind == constants.TAG_CLUSTER:
6029 self.target = self.cfg.GetClusterInfo()
6030 elif self.op.kind == constants.TAG_NODE:
6031 self.target = self.cfg.GetNodeInfo(self.op.name)
6032 elif self.op.kind == constants.TAG_INSTANCE:
6033 self.target = self.cfg.GetInstanceInfo(self.op.name)
6035 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6039 class LUGetTags(TagsLU):
6040 """Returns the tags of a given object.
6043 _OP_REQP = ["kind", "name"]
6046 def Exec(self, feedback_fn):
6047 """Returns the tag list.
6050 return list(self.target.GetTags())
6053 class LUSearchTags(NoHooksLU):
6054 """Searches the tags for a given pattern.
6057 _OP_REQP = ["pattern"]
6060 def ExpandNames(self):
6061 self.needed_locks = {}
6063 def CheckPrereq(self):
6064 """Check prerequisites.
6066 This checks the pattern passed for validity by compiling it.
6070 self.re = re.compile(self.op.pattern)
6071 except re.error, err:
6072 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6073 (self.op.pattern, err))
6075 def Exec(self, feedback_fn):
6076 """Returns the tag list.
6080 tgts = [("/cluster", cfg.GetClusterInfo())]
6081 ilist = cfg.GetAllInstancesInfo().values()
6082 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6083 nlist = cfg.GetAllNodesInfo().values()
6084 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6086 for path, target in tgts:
6087 for tag in target.GetTags():
6088 if self.re.search(tag):
6089 results.append((path, tag))
6093 class LUAddTags(TagsLU):
6094 """Sets a tag on a given object.
6097 _OP_REQP = ["kind", "name", "tags"]
6100 def CheckPrereq(self):
6101 """Check prerequisites.
6103 This checks the type and length of the tag name and value.
6106 TagsLU.CheckPrereq(self)
6107 for tag in self.op.tags:
6108 objects.TaggableObject.ValidateTag(tag)
6110 def Exec(self, feedback_fn):
6115 for tag in self.op.tags:
6116 self.target.AddTag(tag)
6117 except errors.TagError, err:
6118 raise errors.OpExecError("Error while setting tag: %s" % str(err))
6120 self.cfg.Update(self.target)
6121 except errors.ConfigurationError:
6122 raise errors.OpRetryError("There has been a modification to the"
6123 " config file and the operation has been"
6124 " aborted. Please retry.")
6127 class LUDelTags(TagsLU):
6128 """Delete a list of tags from a given object.
6131 _OP_REQP = ["kind", "name", "tags"]
6134 def CheckPrereq(self):
6135 """Check prerequisites.
6137 This checks that we have the given tag.
6140 TagsLU.CheckPrereq(self)
6141 for tag in self.op.tags:
6142 objects.TaggableObject.ValidateTag(tag)
6143 del_tags = frozenset(self.op.tags)
6144 cur_tags = self.target.GetTags()
6145 if not del_tags <= cur_tags:
6146 diff_tags = del_tags - cur_tags
6147 diff_names = ["'%s'" % tag for tag in diff_tags]
6149 raise errors.OpPrereqError("Tag(s) %s not found" %
6150 (",".join(diff_names)))
6152 def Exec(self, feedback_fn):
6153 """Remove the tag from the object.
6156 for tag in self.op.tags:
6157 self.target.RemoveTag(tag)
6159 self.cfg.Update(self.target)
6160 except errors.ConfigurationError:
6161 raise errors.OpRetryError("There has been a modification to the"
6162 " config file and the operation has been"
6163 " aborted. Please retry.")
6166 class LUTestDelay(NoHooksLU):
6167 """Sleep for a specified amount of time.
6169 This LU sleeps on the master and/or nodes for a specified amount of
6173 _OP_REQP = ["duration", "on_master", "on_nodes"]
6176 def ExpandNames(self):
6177 """Expand names and set required locks.
6179 This expands the node list, if any.
6182 self.needed_locks = {}
6183 if self.op.on_nodes:
6184 # _GetWantedNodes can be used here, but is not always appropriate to use
6185 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6187 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6188 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6190 def CheckPrereq(self):
6191 """Check prerequisites.
6195 def Exec(self, feedback_fn):
6196 """Do the actual sleep.
6199 if self.op.on_master:
6200 if not utils.TestDelay(self.op.duration):
6201 raise errors.OpExecError("Error during master delay test")
6202 if self.op.on_nodes:
6203 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6205 raise errors.OpExecError("Complete failure from rpc call")
6206 for node, node_result in result.items():
6208 if not node_result.data:
6209 raise errors.OpExecError("Failure during rpc call to node %s,"
6210 " result: %s" % (node, node_result.data))
6213 class IAllocator(object):
6214 """IAllocator framework.
6216 An IAllocator instance has three sets of attributes:
6217 - cfg that is needed to query the cluster
6218 - input data (all members of the _KEYS class attribute are required)
6219 - four buffer attributes (in|out_data|text), that represent the
6220 input (to the external script) in text and data structure format,
6221 and the output from it, again in two formats
6222 - the result variables from the script (success, info, nodes) for
6227 "mem_size", "disks", "disk_template",
6228 "os", "tags", "nics", "vcpus", "hypervisor",
6234 def __init__(self, lu, mode, name, **kwargs):
6236 # init buffer variables
6237 self.in_text = self.out_text = self.in_data = self.out_data = None
6238 # init all input fields so that pylint is happy
6241 self.mem_size = self.disks = self.disk_template = None
6242 self.os = self.tags = self.nics = self.vcpus = None
6243 self.hypervisor = None
6244 self.relocate_from = None
6246 self.required_nodes = None
6247 # init result fields
6248 self.success = self.info = self.nodes = None
6249 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6250 keyset = self._ALLO_KEYS
6251 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6252 keyset = self._RELO_KEYS
6254 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6255 " IAllocator" % self.mode)
6257 if key not in keyset:
6258 raise errors.ProgrammerError("Invalid input parameter '%s' to"
6259 " IAllocator" % key)
6260 setattr(self, key, kwargs[key])
6262 if key not in kwargs:
6263 raise errors.ProgrammerError("Missing input parameter '%s' to"
6264 " IAllocator" % key)
6265 self._BuildInputData()
6267 def _ComputeClusterData(self):
6268 """Compute the generic allocator input data.
6270 This is the data that is independent of the actual operation.
6274 cluster_info = cfg.GetClusterInfo()
6278 "cluster_name": cfg.GetClusterName(),
6279 "cluster_tags": list(cluster_info.GetTags()),
6280 "enable_hypervisors": list(cluster_info.enabled_hypervisors),
6281 # we don't have job IDs
6283 iinfo = cfg.GetAllInstancesInfo().values()
6284 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6288 node_list = cfg.GetNodeList()
6290 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6291 hypervisor_name = self.hypervisor
6292 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6293 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6295 node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6297 node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6298 cluster_info.enabled_hypervisors)
6299 for nname in node_list:
6300 ninfo = cfg.GetNodeInfo(nname)
6301 node_data[nname].Raise()
6302 if not isinstance(node_data[nname].data, dict):
6303 raise errors.OpExecError("Can't get data for node %s" % nname)
6304 remote_info = node_data[nname].data
6305 for attr in ['memory_total', 'memory_free', 'memory_dom0',
6306 'vg_size', 'vg_free', 'cpu_total']:
6307 if attr not in remote_info:
6308 raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
6311 remote_info[attr] = int(remote_info[attr])
6312 except ValueError, err:
6313 raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
6314 " %s" % (nname, attr, str(err)))
6315 # compute memory used by primary instances
6316 i_p_mem = i_p_up_mem = 0
6317 for iinfo, beinfo in i_list:
6318 if iinfo.primary_node == nname:
6319 i_p_mem += beinfo[constants.BE_MEMORY]
6320 if iinfo.name not in node_iinfo[nname]:
6323 i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
6324 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6325 remote_info['memory_free'] -= max(0, i_mem_diff)
6327 if iinfo.status == "up":
6328 i_p_up_mem += beinfo[constants.BE_MEMORY]
6330 # compute memory used by instances
6332 "tags": list(ninfo.GetTags()),
6333 "total_memory": remote_info['memory_total'],
6334 "reserved_memory": remote_info['memory_dom0'],
6335 "free_memory": remote_info['memory_free'],
6336 "i_pri_memory": i_p_mem,
6337 "i_pri_up_memory": i_p_up_mem,
6338 "total_disk": remote_info['vg_size'],
6339 "free_disk": remote_info['vg_free'],
6340 "primary_ip": ninfo.primary_ip,
6341 "secondary_ip": ninfo.secondary_ip,
6342 "total_cpus": remote_info['cpu_total'],
6343 "offline": ninfo.offline,
6345 node_results[nname] = pnr
6346 data["nodes"] = node_results
6350 for iinfo, beinfo in i_list:
6351 nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6352 for n in iinfo.nics]
6354 "tags": list(iinfo.GetTags()),
6355 "should_run": iinfo.status == "up",
6356 "vcpus": beinfo[constants.BE_VCPUS],
6357 "memory": beinfo[constants.BE_MEMORY],
6359 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6361 "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
6362 "disk_template": iinfo.disk_template,
6363 "hypervisor": iinfo.hypervisor,
6365 instance_data[iinfo.name] = pir
6367 data["instances"] = instance_data
6371 def _AddNewInstance(self):
6372 """Add new instance data to allocator structure.
6374 This in combination with _AllocatorGetClusterData will create the
6375 correct structure needed as input for the allocator.
6377 The checks for the completeness of the opcode must have already been
6382 if len(self.disks) != 2:
6383 raise errors.OpExecError("Only two-disk configurations supported")
6385 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6387 if self.disk_template in constants.DTS_NET_MIRROR:
6388 self.required_nodes = 2
6390 self.required_nodes = 1
6394 "disk_template": self.disk_template,
6397 "vcpus": self.vcpus,
6398 "memory": self.mem_size,
6399 "disks": self.disks,
6400 "disk_space_total": disk_space,
6402 "required_nodes": self.required_nodes,
6404 data["request"] = request
6406 def _AddRelocateInstance(self):
6407 """Add relocate instance data to allocator structure.
6409 This in combination with _IAllocatorGetClusterData will create the
6410 correct structure needed as input for the allocator.
6412 The checks for the completeness of the opcode must have already been
6416 instance = self.lu.cfg.GetInstanceInfo(self.name)
6417 if instance is None:
6418 raise errors.ProgrammerError("Unknown instance '%s' passed to"
6419 " IAllocator" % self.name)
6421 if instance.disk_template not in constants.DTS_NET_MIRROR:
6422 raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6424 if len(instance.secondary_nodes) != 1:
6425 raise errors.OpPrereqError("Instance has not exactly one secondary node")
6427 self.required_nodes = 1
6428 disk_sizes = [{'size': disk.size} for disk in instance.disks]
6429 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6434 "disk_space_total": disk_space,
6435 "required_nodes": self.required_nodes,
6436 "relocate_from": self.relocate_from,
6438 self.in_data["request"] = request
6440 def _BuildInputData(self):
6441 """Build input data structures.
6444 self._ComputeClusterData()
6446 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6447 self._AddNewInstance()
6449 self._AddRelocateInstance()
6451 self.in_text = serializer.Dump(self.in_data)
6453 def Run(self, name, validate=True, call_fn=None):
6454 """Run an instance allocator and return the results.
6458 call_fn = self.lu.rpc.call_iallocator_runner
6461 result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6464 if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6465 raise errors.OpExecError("Invalid result from master iallocator runner")
6467 rcode, stdout, stderr, fail = result.data
6469 if rcode == constants.IARUN_NOTFOUND:
6470 raise errors.OpExecError("Can't find allocator '%s'" % name)
6471 elif rcode == constants.IARUN_FAILURE:
6472 raise errors.OpExecError("Instance allocator call failed: %s,"
6473 " output: %s" % (fail, stdout+stderr))
6474 self.out_text = stdout
6476 self._ValidateResult()
6478 def _ValidateResult(self):
6479 """Process the allocator results.
6481 This will process and if successful save the result in
6482 self.out_data and the other parameters.
6486 rdict = serializer.Load(self.out_text)
6487 except Exception, err:
6488 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6490 if not isinstance(rdict, dict):
6491 raise errors.OpExecError("Can't parse iallocator results: not a dict")
6493 for key in "success", "info", "nodes":
6494 if key not in rdict:
6495 raise errors.OpExecError("Can't parse iallocator results:"
6496 " missing key '%s'" % key)
6497 setattr(self, key, rdict[key])
6499 if not isinstance(rdict["nodes"], list):
6500 raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6502 self.out_data = rdict
6505 class LUTestAllocator(NoHooksLU):
6506 """Run allocator tests.
6508 This LU runs the allocator tests
6511 _OP_REQP = ["direction", "mode", "name"]
6513 def CheckPrereq(self):
6514 """Check prerequisites.
6516 This checks the opcode parameters depending on the director and mode test.
6519 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6520 for attr in ["name", "mem_size", "disks", "disk_template",
6521 "os", "tags", "nics", "vcpus"]:
6522 if not hasattr(self.op, attr):
6523 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6525 iname = self.cfg.ExpandInstanceName(self.op.name)
6526 if iname is not None:
6527 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6529 if not isinstance(self.op.nics, list):
6530 raise errors.OpPrereqError("Invalid parameter 'nics'")
6531 for row in self.op.nics:
6532 if (not isinstance(row, dict) or
6535 "bridge" not in row):
6536 raise errors.OpPrereqError("Invalid contents of the"
6537 " 'nics' parameter")
6538 if not isinstance(self.op.disks, list):
6539 raise errors.OpPrereqError("Invalid parameter 'disks'")
6540 if len(self.op.disks) != 2:
6541 raise errors.OpPrereqError("Only two-disk configurations supported")
6542 for row in self.op.disks:
6543 if (not isinstance(row, dict) or
6544 "size" not in row or
6545 not isinstance(row["size"], int) or
6546 "mode" not in row or
6547 row["mode"] not in ['r', 'w']):
6548 raise errors.OpPrereqError("Invalid contents of the"
6549 " 'disks' parameter")
6550 if self.op.hypervisor is None:
6551 self.op.hypervisor = self.cfg.GetHypervisorType()
6552 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6553 if not hasattr(self.op, "name"):
6554 raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6555 fname = self.cfg.ExpandInstanceName(self.op.name)
6557 raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6559 self.op.name = fname
6560 self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6562 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6565 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6566 if not hasattr(self.op, "allocator") or self.op.allocator is None:
6567 raise errors.OpPrereqError("Missing allocator name")
6568 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6569 raise errors.OpPrereqError("Wrong allocator test '%s'" %
6572 def Exec(self, feedback_fn):
6573 """Run the allocator test.
6576 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6577 ial = IAllocator(self,
6580 mem_size=self.op.mem_size,
6581 disks=self.op.disks,
6582 disk_template=self.op.disk_template,
6586 vcpus=self.op.vcpus,
6587 hypervisor=self.op.hypervisor,
6590 ial = IAllocator(self,
6593 relocate_from=list(self.relocate_from),
6596 if self.op.direction == constants.IALLOCATOR_DIR_IN:
6597 result = ial.in_text
6599 ial.Run(self.op.allocator, validate=False)
6600 result = ial.out_text