from ganeti import utils
from ganeti import errors
from ganeti import hypervisor
-from ganeti import config
+from ganeti import locking
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
-from ganeti import ssconf
from ganeti import serializer
"""Logical Unit base class.
Subclasses must follow these rules:
- - implement CheckPrereq which also fills in the opcode instance
- with all the fields (even if as None)
+ - implement ExpandNames
+ - implement CheckPrereq
- implement Exec
- implement BuildHooksEnv
- redefine HPATH and HTYPE
REQ_WSSTORE = False
REQ_BGL = True
- def __init__(self, processor, op, cfg, sstore):
+ def __init__(self, processor, op, context, sstore):
"""Constructor for LogicalUnit.
This needs to be overriden in derived classes in order to check op
"""
self.proc = processor
self.op = op
- self.cfg = cfg
+ self.cfg = context.cfg
self.sstore = sstore
+ self.context = context
+ self.needed_locks = None
+ self.acquired_locks = {}
+ self.share_locks = dict(((i, 0) for i in locking.LEVELS))
+ # Used to force good behavior when calling helper functions
+ self.recalculate_locks = {}
self.__ssh = None
for attr_name in self._OP_REQP:
raise errors.OpPrereqError("Required parameter '%s' missing" %
attr_name)
- if not cfg.IsCluster():
+ if not self.cfg.IsCluster():
raise errors.OpPrereqError("Cluster not initialized yet,"
" use 'gnt-cluster init' first.")
if self.REQ_MASTER:
ssh = property(fget=__GetSSH)
+ def ExpandNames(self):
+ """Expand names for this LU.
+
+ This method is called before starting to execute the opcode, and it should
+ update all the parameters of the opcode to their canonical form (e.g. a
+ short node name must be fully expanded after this method has successfully
+ completed). This way locking, hooks, logging, ecc. can work correctly.
+
+ LUs which implement this method must also populate the self.needed_locks
+ member, as a dict with lock levels as keys, and a list of needed lock names
+ as values. Rules:
+ - Use an empty dict if you don't need any lock
+ - If you don't need any lock at a particular level omit that level
+ - Don't put anything for the BGL level
+ - If you want all locks at a level use locking.ALL_SET as a value
+
+ If you need to share locks (rather than acquire them exclusively) at one
+ level you can modify self.share_locks, setting a true value (usually 1) for
+ that level. By default locks are not shared.
+
+ Examples:
+ # Acquire all nodes and one instance
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_INSTANCE: ['instance1.example.tld'],
+ }
+ # Acquire just two nodes
+ self.needed_locks = {
+ locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
+ }
+ # Acquire no locks
+ self.needed_locks = {} # No, you can't leave it to the default value None
+
+ """
+ # The implementation of this method is mandatory only if the new LU is
+ # concurrent, so that old LUs don't need to be changed all at the same
+ # time.
+ if self.REQ_BGL:
+ self.needed_locks = {} # Exclusive LUs don't need locks.
+ else:
+ raise NotImplementedError
+
+ def DeclareLocks(self, level):
+ """Declare LU locking needs for a level
+
+ While most LUs can just declare their locking needs at ExpandNames time,
+ sometimes there's the need to calculate some locks after having acquired
+ the ones before. This function is called just before acquiring locks at a
+ particular level, but after acquiring the ones at lower levels, and permits
+ such calculations. It can be used to modify self.needed_locks, and by
+ default it does nothing.
+
+ This function is only called if you have something already set in
+ self.needed_locks for the level.
+
+ @param level: Locking level which is going to be locked
+ @type level: member of ganeti.locking.LEVELS
+
+ """
+
def CheckPrereq(self):
"""Check prerequisites for this LU.
not fulfilled. Its return value is ignored.
This method should also update all the parameters of the opcode to
- their canonical form; e.g. a short node name must be fully
- expanded after this method has successfully completed (so that
- hooks, logging, etc. work correctly).
+ their canonical form if it hasn't been done by ExpandNames before.
"""
raise NotImplementedError
"""
return lu_result
+ def _ExpandAndLockInstance(self):
+ """Helper function to expand and lock an instance.
+
+ Many LUs that work on an instance take its name in self.op.instance_name
+ and need to expand it and then declare the expanded name for locking. This
+ function does it, and then updates self.op.instance_name to the expanded
+ name. It also initializes needed_locks as a dict, if this hasn't been done
+ before.
+
+ """
+ if self.needed_locks is None:
+ self.needed_locks = {}
+ else:
+ assert locking.LEVEL_INSTANCE not in self.needed_locks, \
+ "_ExpandAndLockInstance called with instance-level locks set"
+ expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
+ if expanded_name is None:
+ raise errors.OpPrereqError("Instance '%s' not known" %
+ self.op.instance_name)
+ self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
+ self.op.instance_name = expanded_name
+
+ def _LockInstancesNodes(self, primary_only=False):
+ """Helper function to declare instances' nodes for locking.
+
+ This function should be called after locking one or more instances to lock
+ their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
+ with all primary or secondary nodes for instances already locked and
+ present in self.needed_locks[locking.LEVEL_INSTANCE].
+
+ It should be called from DeclareLocks, and for safety only works if
+ self.recalculate_locks[locking.LEVEL_NODE] is set.
+
+ In the future it may grow parameters to just lock some instance's nodes, or
+ to just lock primaries or secondary nodes, if needed.
+
+ If should be called in DeclareLocks in a way similar to:
+
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
+
+ @type primary_only: boolean
+ @param primary_only: only lock primary nodes of locked instances
+
+ """
+ assert locking.LEVEL_NODE in self.recalculate_locks, \
+ "_LockInstancesNodes helper function called with no nodes to recalculate"
+
+ # TODO: check if we're really been called with the instance locks held
+
+ # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
+ # future we might want to have different behaviors depending on the value
+ # of self.recalculate_locks[locking.LEVEL_NODE]
+ wanted_nodes = []
+ for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
+ instance = self.context.cfg.GetInstanceInfo(instance_name)
+ wanted_nodes.append(instance.primary_node)
+ if not primary_only:
+ wanted_nodes.extend(instance.secondary_nodes)
+
+ if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
+ self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
+ elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
+ self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
+
+ del self.recalculate_locks[locking.LEVEL_NODE]
+
class NoHooksLU(LogicalUnit):
"""Simple LU which runs no hooks.
if not isinstance(nodes, list):
raise errors.OpPrereqError("Invalid argument type 'nodes'")
- if nodes:
- wanted = []
+ if not nodes:
+ raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
+ " non-empty list of nodes whose name is to be expanded.")
- for name in nodes:
- node = lu.cfg.ExpandNodeName(name)
- if node is None:
- raise errors.OpPrereqError("No such node name '%s'" % name)
- wanted.append(node)
+ wanted = []
+ for name in nodes:
+ node = lu.cfg.ExpandNodeName(name)
+ if node is None:
+ raise errors.OpPrereqError("No such node name '%s'" % name)
+ wanted.append(node)
- else:
- wanted = lu.cfg.GetNodeList()
return utils.NiceSort(wanted)
"""
master = self.sstore.GetMasterNode()
- if not rpc.call_node_stop_master(master):
+ if not rpc.call_node_stop_master(master, False):
raise errors.OpExecError("Could not disable the master role")
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
utils.CreateBackup(priv_key)
utils.CreateBackup(pub_key)
- rpc.call_node_leave_cluster(master)
+ return master
class LUVerifyCluster(LogicalUnit):
HPATH = "cluster-verify"
HTYPE = constants.HTYPE_CLUSTER
_OP_REQP = ["skip_checks"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_INSTANCE: locking.ALL_SET,
+ }
+ self.share_locks = dict(((i, 1) for i in locking.LEVELS))
def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
remote_version, feedback_fn):
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
% len(i_non_redundant))
- return int(bad)
+ return not bad
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
"""Analize the post-hooks' result, handle it, and send some
lu_result: previous Exec result
"""
- # We only really run POST phase hooks, and are only interested in their results
+ # We only really run POST phase hooks, and are only interested in
+ # their results
if phase == constants.HOOKS_PHASE_POST:
# Used to change hooks' output to proper indentation
indent_re = re.compile('^', re.M)
"""
_OP_REQP = []
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_INSTANCE: locking.ALL_SET,
+ }
+ self.share_locks = dict(((i, 1) for i in locking.LEVELS))
def CheckPrereq(self):
"""Check prerequisites.
# shutdown the master IP
master = ss.GetMasterNode()
- if not rpc.call_node_stop_master(master):
+ if not rpc.call_node_stop_master(master, False):
raise errors.OpExecError("Could not disable the master role")
try:
logger.Error("copy of file %s to node %s failed" %
(fname, to_node))
finally:
- if not rpc.call_node_start_master(master):
+ if not rpc.call_node_start_master(master, False):
logger.Error("Could not re-enable the master role on the master,"
" please restart manually.")
if done or oneshot:
break
- if unlock:
- #utils.Unlock('cmd')
- pass
- try:
- time.sleep(min(60, max_time))
- finally:
- if unlock:
- #utils.Lock('cmd')
- pass
+ time.sleep(min(60, max_time))
if done:
proc.LogInfo("Instance %s's disks are in sync." % instance.name)
"""
_OP_REQP = ["output_fields", "names"]
+ REQ_BGL = False
- def CheckPrereq(self):
- """Check prerequisites.
-
- This always succeeds, since this is a pure query LU.
-
- """
+ def ExpandNames(self):
if self.op.names:
raise errors.OpPrereqError("Selective OS query not supported")
dynamic=self.dynamic_fields,
selected=self.op.output_fields)
+ # Lock all nodes, in shared mode
+ self.needed_locks = {}
+ self.share_locks[locking.LEVEL_NODE] = 1
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
+
@staticmethod
def _DiagnoseByOS(node_list, rlist):
"""Remaps a per-node return list into an a per-os per-node dictionary
"""Compute the list of OSes.
"""
- node_list = self.cfg.GetNodeList()
+ node_list = self.acquired_locks[locking.LEVEL_NODE]
node_data = rpc.call_os_diagnose(node_list)
if node_data == False:
raise errors.OpExecError("Can't gather the list of OSes")
logger.Info("stopping the node daemon and removing configs from node %s" %
node.name)
- rpc.call_node_leave_cluster(node.name)
-
- logger.Info("Removing node %s from config" % node.name)
-
- self.cfg.RemoveNode(node.name)
+ self.context.RemoveNode(node.name)
- utils.RemoveHostFromEtcHosts(node.name)
+ rpc.call_node_leave_cluster(node.name)
class LUQueryNodes(NoHooksLU):
"""
_OP_REQP = ["output_fields", "names"]
+ REQ_BGL = False
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the fields required are valid output fields.
-
- """
+ def ExpandNames(self):
self.dynamic_fields = frozenset([
"dtotal", "dfree",
"mtotal", "mnode", "mfree",
"ctotal",
])
- _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
- "pinst_list", "sinst_list",
- "pip", "sip", "tags"],
+ self.static_fields = frozenset([
+ "name", "pinst_cnt", "sinst_cnt",
+ "pinst_list", "sinst_list",
+ "pip", "sip", "tags",
+ ])
+
+ _CheckOutputFields(static=self.static_fields,
dynamic=self.dynamic_fields,
selected=self.op.output_fields)
- self.wanted = _GetWantedNodes(self, self.op.names)
+ self.needed_locks = {}
+ self.share_locks[locking.LEVEL_NODE] = 1
+
+ if self.op.names:
+ self.wanted = _GetWantedNodes(self, self.op.names)
+ else:
+ self.wanted = locking.ALL_SET
+
+ self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
+ if self.do_locking:
+ # if we don't request only static fields, we need to lock the nodes
+ self.needed_locks[locking.LEVEL_NODE] = self.wanted
+
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
+ # The validation of the node list is done in the _GetWantedNodes,
+ # if non empty, and if empty, there's no validation to do
+ pass
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
- nodenames = self.wanted
- nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
+ all_info = self.cfg.GetAllNodesInfo()
+ if self.do_locking:
+ nodenames = self.acquired_locks[locking.LEVEL_NODE]
+ else:
+ nodenames = all_info.keys()
+ nodelist = [all_info[name] for name in nodenames]
# begin data gathering
"""
_OP_REQP = ["nodes", "output_fields"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ _CheckOutputFields(static=["node"],
+ dynamic=["phys", "vg", "name", "size", "instance"],
+ selected=self.op.output_fields)
+
+ self.needed_locks = {}
+ self.share_locks[locking.LEVEL_NODE] = 1
+ if not self.op.nodes:
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ else:
+ self.needed_locks[locking.LEVEL_NODE] = \
+ _GetWantedNodes(self, self.op.nodes)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the fields required are valid output fields.
"""
- self.nodes = _GetWantedNodes(self, self.op.nodes)
-
- _CheckOutputFields(static=["node"],
- dynamic=["phys", "vg", "name", "size", "instance"],
- selected=self.op.output_fields)
-
+ self.nodes = self.acquired_locks[locking.LEVEL_NODE]
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
if not result[node]:
logger.Error("could not copy file %s to node %s" % (fname, node))
- if not self.op.readd:
- logger.Info("adding node %s to cluster.conf" % node)
- self.cfg.AddNode(new_node)
-
-
-class LUMasterFailover(LogicalUnit):
- """Failover the master node to the current node.
-
- This is a special LU in that it must run on a non-master node.
-
- """
- HPATH = "master-failover"
- HTYPE = constants.HTYPE_CLUSTER
- REQ_MASTER = False
- REQ_WSSTORE = True
- _OP_REQP = []
-
- def BuildHooksEnv(self):
- """Build hooks env.
-
- This will run on the new master only in the pre phase, and on all
- the nodes in the post phase.
-
- """
- env = {
- "OP_TARGET": self.new_master,
- "NEW_MASTER": self.new_master,
- "OLD_MASTER": self.old_master,
- }
- return env, [self.new_master], self.cfg.GetNodeList()
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that we are not already the master.
-
- """
- self.new_master = utils.HostInfo().name
- self.old_master = self.sstore.GetMasterNode()
-
- if self.old_master == self.new_master:
- raise errors.OpPrereqError("This commands must be run on the node"
- " where you want the new master to be."
- " %s is already the master" %
- self.old_master)
-
- def Exec(self, feedback_fn):
- """Failover the master node.
-
- This command, when run on a non-master node, will cause the current
- master to cease being master, and the non-master to become new
- master.
-
- """
- #TODO: do not rely on gethostname returning the FQDN
- logger.Info("setting master to %s, old master: %s" %
- (self.new_master, self.old_master))
-
- if not rpc.call_node_stop_master(self.old_master):
- logger.Error("could disable the master role on the old master"
- " %s, please disable manually" % self.old_master)
-
- ss = self.sstore
- ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
- if not rpc.call_upload_file(self.cfg.GetNodeList(),
- ss.KeyToFilename(ss.SS_MASTER_NODE)):
- logger.Error("could not distribute the new simple store master file"
- " to the other nodes, please check.")
-
- if not rpc.call_node_start_master(self.new_master):
- logger.Error("could not start the master role on the new master"
- " %s, please check" % self.new_master)
- feedback_fn("Error in activating the master IP on the new master,"
- " please fix manually.")
-
+ if self.op.readd:
+ self.context.ReaddNode(new_node)
+ else:
+ self.context.AddNode(new_node)
class LUQueryClusterInfo(NoHooksLU):
"""
_OP_REQP = []
REQ_MASTER = False
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {}
def CheckPrereq(self):
"""No prerequsites needed for this LU.
"""
_OP_REQP = []
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {}
def CheckPrereq(self):
"""No prerequisites.
"""
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
- self.instance = instance
-
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Activate the disks.
"""
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
- self.instance = instance
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Deactivate the disks
"""
instance = self.instance
- ins_l = rpc.call_instance_list([instance.primary_node])
- ins_l = ins_l[instance.primary_node]
- if not type(ins_l) is list:
- raise errors.OpExecError("Can't contact node '%s'" %
- instance.primary_node)
+ _SafeShutdownInstanceDisks(instance, self.cfg)
- if self.instance.name in ins_l:
- raise errors.OpExecError("Instance is running, can't shutdown"
- " block devices.")
- _ShutdownInstanceDisks(instance, self.cfg)
+def _SafeShutdownInstanceDisks(instance, cfg):
+ """Shutdown block devices of an instance.
+
+ This function checks if an instance is running, before calling
+ _ShutdownInstanceDisks.
+
+ """
+ ins_l = rpc.call_instance_list([instance.primary_node])
+ ins_l = ins_l[instance.primary_node]
+ if not type(ins_l) is list:
+ raise errors.OpExecError("Can't contact node '%s'" %
+ instance.primary_node)
+
+ if instance.name in ins_l:
+ raise errors.OpExecError("Instance is running, can't shutdown"
+ " block devices.")
+
+ _ShutdownInstanceDisks(instance, cfg)
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
HPATH = "instance-start"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "force"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
+ self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
# check bridges existance
_CheckInstanceBridgesExist(instance)
"starting instance %s" % instance.name,
instance.memory)
- self.instance = instance
- self.op.instance_name = instance.name
-
def Exec(self, feedback_fn):
"""Start the instance.
HPATH = "instance-reboot"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
+ constants.INSTANCE_REBOOT_HARD,
+ constants.INSTANCE_REBOOT_FULL]:
+ raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
+ (constants.INSTANCE_REBOOT_SOFT,
+ constants.INSTANCE_REBOOT_HARD,
+ constants.INSTANCE_REBOOT_FULL))
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ primary_only = not constants.INSTANCE_REBOOT_FULL
+ self._LockInstancesNodes(primary_only=primary_only)
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
+ self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
# check bridges existance
_CheckInstanceBridgesExist(instance)
- self.instance = instance
- self.op.instance_name = instance.name
-
def Exec(self, feedback_fn):
"""Reboot the instance.
node_current = instance.primary_node
- if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
- constants.INSTANCE_REBOOT_HARD,
- constants.INSTANCE_REBOOT_FULL]:
- raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
- (constants.INSTANCE_REBOOT_SOFT,
- constants.INSTANCE_REBOOT_HARD,
- constants.INSTANCE_REBOOT_FULL))
-
if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
constants.INSTANCE_REBOOT_HARD]:
if not rpc.call_instance_reboot(node_current, instance,
HPATH = "instance-stop"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
- self.instance = instance
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Shutdown the instance.
HPATH = "instance-reinstall"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance is in the cluster and is not running.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
+ instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
+
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name)
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
self.cfg.RenameInstance(inst.name, self.op.new_name)
+ # Change the instance lock. This is definitely safe while we hold the BGL
+ self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
+ self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
# re-read the instance from the configuration after rename
inst = self.cfg.GetInstanceInfo(self.op.new_name)
try:
if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
"sda", "sdb"):
- msg = ("Could run OS rename script for instance %s on node %s (but the"
- " instance has been renamed in Ganeti)" %
+ msg = ("Could not run OS rename script for instance %s on node %s"
+ " (but the instance has been renamed in Ganeti)" %
(inst.name, inst.primary_node))
logger.Error(msg)
finally:
logger.Info("removing instance %s out of cluster config" % instance.name)
self.cfg.RemoveInstance(instance.name)
+ # Remove the new instance from the Ganeti Lock Manager
+ self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
class LUQueryInstances(NoHooksLU):
"""
_OP_REQP = ["output_fields", "names"]
+ REQ_BGL = False
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the fields required are valid output fields.
-
- """
+ def ExpandNames(self):
self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
- _CheckOutputFields(static=["name", "os", "pnode", "snodes",
- "admin_state", "admin_ram",
- "disk_template", "ip", "mac", "bridge",
- "sda_size", "sdb_size", "vcpus", "tags"],
+ self.static_fields = frozenset([
+ "name", "os", "pnode", "snodes",
+ "admin_state", "admin_ram",
+ "disk_template", "ip", "mac", "bridge",
+ "sda_size", "sdb_size", "vcpus", "tags",
+ "auto_balance",
+ "network_port", "kernel_path", "initrd_path",
+ "hvm_boot_order", "hvm_acpi", "hvm_pae",
+ "hvm_cdrom_image_path", "hvm_nic_type",
+ "hvm_disk_type", "vnc_bind_address",
+ ])
+ _CheckOutputFields(static=self.static_fields,
dynamic=self.dynamic_fields,
selected=self.op.output_fields)
- self.wanted = _GetWantedInstances(self, self.op.names)
+ self.needed_locks = {}
+ self.share_locks[locking.LEVEL_INSTANCE] = 1
+ self.share_locks[locking.LEVEL_NODE] = 1
+
+ if self.op.names:
+ self.wanted = _GetWantedInstances(self, self.op.names)
+ else:
+ self.wanted = locking.ALL_SET
+
+ self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
+ if self.do_locking:
+ self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE and self.do_locking:
+ self._LockInstancesNodes()
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
+ pass
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
- instance_names = self.wanted
- instance_list = [self.cfg.GetInstanceInfo(iname) for iname
- in instance_names]
+ all_info = self.cfg.GetAllInstancesInfo()
+ if self.do_locking:
+ instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+ else:
+ instance_names = all_info.keys()
+ instance_list = [all_info[iname] for iname in instance_names]
# begin data gathering
val = instance.vcpus
elif field == "tags":
val = list(instance.GetTags())
+ elif field in ("network_port", "kernel_path", "initrd_path",
+ "hvm_boot_order", "hvm_acpi", "hvm_pae",
+ "hvm_cdrom_image_path", "hvm_nic_type",
+ "hvm_disk_type", "vnc_bind_address"):
+ val = getattr(instance, field, None)
+ if val is not None:
+ pass
+ elif field in ("hvm_nic_type", "hvm_disk_type",
+ "kernel_path", "initrd_path"):
+ val = "default"
+ else:
+ val = "-"
else:
raise errors.ParameterError(field)
iout.append(val)
HPATH = "instance-failover"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "ignore_consistency"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
+ self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
if instance.disk_template not in constants.DTS_NET_MIRROR:
raise errors.OpPrereqError("Instance's disk layout is not"
" exist on destination node '%s'" %
(brlist, target_node))
- self.instance = instance
-
def Exec(self, feedback_fn):
"""Failover an instance.
# set optional parameters to none if they don't exist
for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
"iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
- "vnc_bind_address"]:
+ "hvm_nic_type", "hvm_disk_type", "vnc_bind_address"]:
if not hasattr(self.op, attr):
setattr(self.op, attr, None)
" like a valid IP address" %
self.op.vnc_bind_address)
+ # Xen HVM device type checks
+ if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+ if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
+ raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
+ " hypervisor" % self.op.hvm_nic_type)
+ if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
+ raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
+ " hypervisor" % self.op.hvm_disk_type)
+
if self.op.start:
self.instance_status = 'up'
else:
hvm_pae=self.op.hvm_pae,
hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
vnc_bind_address=self.op.vnc_bind_address,
+ hvm_nic_type=self.op.hvm_nic_type,
+ hvm_disk_type=self.op.hvm_disk_type,
)
feedback_fn("* creating instance disks...")
feedback_fn("adding instance %s to cluster config" % instance)
self.cfg.AddInstance(iobj)
+ # Add the new instance to the Ganeti Lock Manager
+ self.context.glm.add(locking.LEVEL_INSTANCE, instance)
if self.op.wait_for_sync:
disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
if disk_abort:
_RemoveDisks(iobj, self.cfg)
self.cfg.RemoveInstance(iobj.name)
+ # Remove the new instance from the Ganeti Lock Manager
+ self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
raise errors.OpExecError("There are some degraded disks for"
" this instance")
"""
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
- self.instance = instance
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Connect to the console of an instance
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "mode", "disks"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+
+ if not hasattr(self.op, "remote_node"):
+ self.op.remote_node = None
+
+ ia_name = getattr(self.op, "iallocator", None)
+ if ia_name is not None:
+ if self.op.remote_node is not None:
+ raise errors.OpPrereqError("Give either the iallocator or the new"
+ " secondary, not both")
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ elif self.op.remote_node is not None:
+ remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
+ if remote_node is None:
+ raise errors.OpPrereqError("Node '%s' not known" %
+ self.op.remote_node)
+ self.op.remote_node = remote_node
+ self.needed_locks[locking.LEVEL_NODE] = [remote_node]
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+ else:
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ # If we're not already locking all nodes in the set we have to declare the
+ # instance's primary/secondary nodes.
+ if (level == locking.LEVEL_NODE and
+ self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
+ self._LockInstancesNodes()
def _RunAllocator(self):
"""Compute a new secondary node using an IAllocator.
This checks that the instance is in the cluster.
"""
- if not hasattr(self.op, "remote_node"):
- self.op.remote_node = None
-
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
+ instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
self.instance = instance
- self.op.instance_name = instance.name
if instance.disk_template not in constants.DTS_NET_MIRROR:
raise errors.OpPrereqError("Instance's disk layout is not"
ia_name = getattr(self.op, "iallocator", None)
if ia_name is not None:
- if self.op.remote_node is not None:
- raise errors.OpPrereqError("Give either the iallocator or the new"
- " secondary, not both")
- self.op.remote_node = self._RunAllocator()
+ self._RunAllocator()
remote_node = self.op.remote_node
if remote_node is not None:
- remote_node = self.cfg.ExpandNodeName(remote_node)
- if remote_node is None:
- raise errors.OpPrereqError("Node '%s' not known" %
- self.op.remote_node)
self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
+ assert self.remote_node_info is not None, \
+ "Cannot retrieve locked node %s" % remote_node
else:
self.remote_node_info = None
if remote_node == instance.primary_node:
if instance.FindDisk(name) is None:
raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
(name, instance.name))
- self.op.remote_node = remote_node
def _ExecD8DiskOnly(self, feedback_fn):
"""Replace a disk on the primary or secondary for dbrd8.
# Activate the instance disks if we're replacing them on a down instance
if instance.status == "down":
- op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
- self.proc.ChainOpCode(op)
+ _StartInstanceDisks(self.cfg, instance, True)
if instance.disk_template == constants.DT_DRBD8:
if self.op.remote_node is None:
# Deactivate the instance disks if we're replacing them on a down instance
if instance.status == "down":
- op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
- self.proc.ChainOpCode(op)
+ _SafeShutdownInstanceDisks(instance, self.cfg)
return ret
HPATH = "disk-grow"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "disk", "amount"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
+ instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
+
self.instance = instance
- self.op.instance_name = instance.name
if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
raise errors.OpPrereqError("Instance's disk layout does not support"
for node in (instance.secondary_nodes + (instance.primary_node,)):
self.cfg.SetDiskID(disk, node)
result = rpc.call_blockdev_grow(node, disk, self.op.amount)
- if not result or not isinstance(result, tuple) or len(result) != 2:
+ if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
raise errors.OpExecError("grow request failed to node %s" % node)
elif not result[0]:
raise errors.OpExecError("grow request failed to node %s: %s" %
"""
_OP_REQP = ["instances"]
+ REQ_BGL = False
+ def ExpandNames(self):
+ self.needed_locks = {}
+ self.share_locks = dict(((i, 1) for i in locking.LEVELS))
+
+ if not isinstance(self.op.instances, list):
+ raise errors.OpPrereqError("Invalid argument type 'instances'")
+
+ if self.op.instances:
+ self.wanted_names = []
+ for name in self.op.instances:
+ full_name = self.cfg.ExpandInstanceName(name)
+ if full_name is None:
+ raise errors.OpPrereqError("Instance '%s' not known" %
+ self.op.instance_name)
+ self.wanted_names.append(full_name)
+ self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+ else:
+ self.wanted_names = None
+ self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This only checks the optional instance list against the existing names.
"""
- if not isinstance(self.op.instances, list):
- raise errors.OpPrereqError("Invalid argument type 'instances'")
- if self.op.instances:
- self.wanted_instances = []
- names = self.op.instances
- for name in names:
- instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
- if instance is None:
- raise errors.OpPrereqError("No such instance name '%s'" % name)
- self.wanted_instances.append(instance)
- else:
- self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
- in self.cfg.GetInstanceList()]
- return
+ if self.wanted_names is None:
+ self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+ self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
+ in self.wanted_names]
+ return
def _ComputeDiskStatus(self, instance, snode, dev):
"""Compute block device status.
idict["hvm_acpi"] = instance.hvm_acpi
idict["hvm_pae"] = instance.hvm_pae
idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
+ idict["hvm_nic_type"] = instance.hvm_nic_type
+ idict["hvm_disk_type"] = instance.hvm_disk_type
if htkind in constants.HTS_REQ_PORT:
- idict["vnc_bind_address"] = instance.vnc_bind_address
+ if instance.vnc_bind_address is None:
+ vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
+ else:
+ vnc_bind_address = instance.vnc_bind_address
+ if instance.network_port is None:
+ vnc_console_port = None
+ elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
+ vnc_console_port = "%s:%s" % (instance.primary_node,
+ instance.network_port)
+ elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
+ vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
+ instance.network_port,
+ instance.primary_node)
+ else:
+ vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
+ instance.network_port)
+ idict["vnc_console_port"] = vnc_console_port
+ idict["vnc_bind_address"] = vnc_bind_address
idict["network_port"] = instance.network_port
result[instance.name] = idict
HPATH = "instance-modify"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This only checks the instance list against the existing names.
"""
+ # FIXME: all the parameters could be checked before, in ExpandNames, or in
+ # a separate CheckArguments function, if we implement one, so the operation
+ # can be aborted without waiting for any lock, should it have an error...
self.mem = getattr(self.op, "mem", None)
self.vcpus = getattr(self.op, "vcpus", None)
self.ip = getattr(self.op, "ip", None)
self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
self.hvm_pae = getattr(self.op, "hvm_pae", None)
+ self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
+ self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
+ self.force = getattr(self.op, "force", None)
all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
self.kernel_path, self.initrd_path, self.hvm_boot_order,
self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
- self.vnc_bind_address]
+ self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
if all_parms.count(None) == len(all_parms):
raise errors.OpPrereqError("No changes submitted")
if self.mem is not None:
# hvm_cdrom_image_path verification
if self.op.hvm_cdrom_image_path is not None:
- if not os.path.isabs(self.op.hvm_cdrom_image_path):
+ if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
+ self.op.hvm_cdrom_image_path.lower() == "none"):
raise errors.OpPrereqError("The path to the HVM CDROM image must"
" be an absolute path or None, not %s" %
self.op.hvm_cdrom_image_path)
- if not os.path.isfile(self.op.hvm_cdrom_image_path):
+ if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
+ self.op.hvm_cdrom_image_path.lower() == "none"):
raise errors.OpPrereqError("The HVM CDROM image must either be a"
" regular file or a symlink pointing to"
" an existing regular file, not %s" %
" like a valid IP address" %
self.op.vnc_bind_address)
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("No such instance name '%s'" %
- self.op.instance_name)
- self.op.instance_name = instance.name
- self.instance = instance
+ instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
+ self.warn = []
+ if self.mem is not None and not self.force:
+ pnode = self.instance.primary_node
+ nodelist = [pnode]
+ nodelist.extend(instance.secondary_nodes)
+ instance_info = rpc.call_instance_info(pnode, instance.name)
+ nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
+
+ if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
+ # Assume the primary node is unreachable and go ahead
+ self.warn.append("Can't get info from primary node %s" % pnode)
+ else:
+ if instance_info:
+ current_mem = instance_info['memory']
+ else:
+ # Assume instance not running
+ # (there is a slight race condition here, but it's not very probable,
+ # and we have no other way to check)
+ current_mem = 0
+ miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
+ if miss_mem > 0:
+ raise errors.OpPrereqError("This change will prevent the instance"
+ " from starting, due to %d MB of memory"
+ " missing on its primary node" % miss_mem)
+
+ for node in instance.secondary_nodes:
+ if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
+ self.warn.append("Can't get info from secondary node %s" % node)
+ elif self.mem > nodeinfo[node]['memory_free']:
+ self.warn.append("Not enough memory to failover instance to secondary"
+ " node %s" % node)
+
+ # Xen HVM device type checks
+ if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+ if self.op.hvm_nic_type is not None:
+ if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
+ raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
+ " HVM hypervisor" % self.op.hvm_nic_type)
+ if self.op.hvm_disk_type is not None:
+ if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
+ raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
+ " HVM hypervisor" % self.op.hvm_disk_type)
+
return
def Exec(self, feedback_fn):
All parameters take effect only at the next restart of the instance.
"""
+ # Process here the warnings from CheckPrereq, as we don't have a
+ # feedback_fn there.
+ for warn in self.warn:
+ feedback_fn("WARNING: %s" % warn)
+
result = []
instance = self.instance
if self.mem:
else:
instance.hvm_boot_order = self.hvm_boot_order
result.append(("hvm_boot_order", self.hvm_boot_order))
- if self.hvm_acpi:
+ if self.hvm_acpi is not None:
instance.hvm_acpi = self.hvm_acpi
result.append(("hvm_acpi", self.hvm_acpi))
- if self.hvm_pae:
+ if self.hvm_pae is not None:
instance.hvm_pae = self.hvm_pae
result.append(("hvm_pae", self.hvm_pae))
+ if self.hvm_nic_type is not None:
+ instance.hvm_nic_type = self.hvm_nic_type
+ result.append(("hvm_nic_type", self.hvm_nic_type))
+ if self.hvm_disk_type is not None:
+ instance.hvm_disk_type = self.hvm_disk_type
+ result.append(("hvm_disk_type", self.hvm_disk_type))
if self.hvm_cdrom_image_path:
- instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
+ if self.hvm_cdrom_image_path == constants.VALUE_NONE:
+ instance.hvm_cdrom_image_path = None
+ else:
+ instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
if self.vnc_bind_address:
instance.vnc_bind_address = self.vnc_bind_address
result.append(("vnc_bind_address", self.vnc_bind_address))
- self.cfg.AddInstance(instance)
+ self.cfg.Update(instance)
return result
"""Query the exports list
"""
- _OP_REQP = []
+ _OP_REQP = ['nodes']
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {}
+ self.share_locks[locking.LEVEL_NODE] = 1
+ if not self.op.nodes:
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ else:
+ self.needed_locks[locking.LEVEL_NODE] = \
+ _GetWantedNodes(self, self.op.nodes)
def CheckPrereq(self):
- """Check that the nodelist contains only existing nodes.
+ """Check prerequisites.
"""
- self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
+ self.nodes = self.acquired_locks[locking.LEVEL_NODE]
def Exec(self, feedback_fn):
"""Compute the list of all the exported system images.
HPATH = "instance-export"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "target_node", "shutdown"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ # FIXME: lock only instance primary and destination node
+ #
+ # Sad but true, for now we have do lock all nodes, as we don't know where
+ # the previous export might be, and and in this LU we search for it and
+ # remove it from its current node. In the future we could fix this by:
+ # - making a tasklet to search (share-lock all), then create the new one,
+ # then one to remove, after
+ # - removing the removal operation altoghether
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+ def DeclareLocks(self, level):
+ """Last minute lock declaration."""
+ # All nodes are locked anyway, so nothing to do here.
def BuildHooksEnv(self):
"""Build hooks env.
This checks that the instance and node names are valid.
"""
- instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
+ instance_name = self.op.instance_name
self.instance = self.cfg.GetInstanceInfo(instance_name)
- if self.instance is None:
- raise errors.OpPrereqError("Instance '%s' not found" %
- self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
- # node verification
- dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
- self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
+ self.dst_node = self.cfg.GetNodeInfo(
+ self.cfg.ExpandNodeName(self.op.target_node))
- if self.dst_node is None:
- raise errors.OpPrereqError("Destination node '%s' is unknown." %
- self.op.target_node)
- self.op.target_node = self.dst_node.name
+ assert self.dst_node is not None, \
+ "Cannot retrieve locked node %s" % self.op.target_node
# instance disk type verification
for disk in self.instance.disks:
if self.op.shutdown:
# shutdown the instance, but not the disks
if not rpc.call_instance_shutdown(src_node, instance):
- raise errors.OpExecError("Could not shutdown instance %s on node %s" %
- (instance.name, src_node))
+ raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+ (instance.name, src_node))
vgname = self.cfg.GetVGName()
# if we proceed the backup would be removed because OpQueryExports
# substitutes an empty list with the full cluster node list.
if nodelist:
- op = opcodes.OpQueryExports(nodes=nodelist)
- exportlist = self.proc.ChainOpCode(op)
+ exportlist = rpc.call_export_list(nodelist)
for node in exportlist:
if instance.name in exportlist[node]:
if not rpc.call_export_remove(node, instance.name):
fqdn_warn = True
instance_name = self.op.instance_name
- op = opcodes.OpQueryExports(nodes=[])
- exportlist = self.proc.ChainOpCode(op)
+ exportlist = rpc.call_export_list(self.cfg.GetNodeList())
found = False
for node in exportlist:
if instance_name in exportlist[node]:
"""Returns the tag list.
"""
- return self.target.GetTags()
+ return list(self.target.GetTags())
class LUSearchTags(NoHooksLU):
" config file and the operation has been"
" aborted. Please retry.")
+
class LUTestDelay(NoHooksLU):
"""Sleep for a specified amount of time.
- This LU sleeps on the master and/or nodes for a specified amoutn of
+ This LU sleeps on the master and/or nodes for a specified amount of
time.
"""
_OP_REQP = ["duration", "on_master", "on_nodes"]
+ REQ_BGL = False
- def CheckPrereq(self):
- """Check prerequisites.
+ def ExpandNames(self):
+ """Expand names and set required locks.
- This checks that we have a good list of nodes and/or the duration
- is valid.
+ This expands the node list, if any.
"""
-
+ self.needed_locks = {}
if self.op.on_nodes:
+ # _GetWantedNodes can be used here, but is not always appropriate to use
+ # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
+ # more information.
self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
+ self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
def Exec(self, feedback_fn):
"""Do the actual sleep.
result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
- if not isinstance(result, tuple) or len(result) != 4:
+ if not isinstance(result, (list, tuple)) or len(result) != 4:
raise errors.OpExecError("Invalid result from master iallocator runner")
rcode, stdout, stderr, fail = result
if rcode == constants.IARUN_NOTFOUND:
raise errors.OpExecError("Can't find allocator '%s'" % name)
elif rcode == constants.IARUN_FAILURE:
- raise errors.OpExecError("Instance allocator call failed: %s,"
- " output: %s" %
- (fail, stdout+stderr))
+ raise errors.OpExecError("Instance allocator call failed: %s,"
+ " output: %s" % (fail, stdout+stderr))
self.out_text = stdout
if validate:
self._ValidateResult()