from ganeti import utils
from ganeti import errors
from ganeti import hypervisor
+from ganeti import locking
from ganeti import config
from ganeti import constants
from ganeti import objects
"""Logical Unit base class.
Subclasses must follow these rules:
- - implement CheckPrereq which also fills in the opcode instance
- with all the fields (even if as None)
+ - implement ExpandNames
+ - implement CheckPrereq
- implement Exec
- implement BuildHooksEnv
- redefine HPATH and HTYPE
- optionally redefine their run requirements:
REQ_MASTER: the LU needs to run on the master node
REQ_WSSTORE: the LU needs a writable SimpleStore
+ REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
Note that all commands require root permissions.
_OP_REQP = []
REQ_MASTER = True
REQ_WSSTORE = False
+ REQ_BGL = True
- def __init__(self, processor, op, cfg, sstore):
+ def __init__(self, processor, op, context, sstore):
"""Constructor for LogicalUnit.
This needs to be overriden in derived classes in order to check op
"""
self.proc = processor
self.op = op
- self.cfg = cfg
+ self.cfg = context.cfg
self.sstore = sstore
+ self.context = context
+ self.needed_locks = None
self.__ssh = None
for attr_name in self._OP_REQP:
raise errors.OpPrereqError("Required parameter '%s' missing" %
attr_name)
- if not cfg.IsCluster():
+ if not self.cfg.IsCluster():
raise errors.OpPrereqError("Cluster not initialized yet,"
" use 'gnt-cluster init' first.")
if self.REQ_MASTER:
ssh = property(fget=__GetSSH)
+ def ExpandNames(self):
+ """Expand names for this LU.
+
+ This method is called before starting to execute the opcode, and it should
+ update all the parameters of the opcode to their canonical form (e.g. a
+ short node name must be fully expanded after this method has successfully
+ completed). This way locking, hooks, logging, ecc. can work correctly.
+
+ LUs which implement this method must also populate the self.needed_locks
+ member, as a dict with lock levels as keys, and a list of needed lock names
+ as values. Rules:
+ - Use an empty dict if you don't need any lock
+ - If you don't need any lock at a particular level omit that level
+ - Don't put anything for the BGL level
+ - If you want all locks at a level use None as a value
+ (this reflects what LockSet does, and will be replaced before
+ CheckPrereq with the full list of nodes that have been locked)
+
+ Examples:
+ # Acquire all nodes and one instance
+ self.needed_locks = {
+ locking.LEVEL_NODE: None,
+ locking.LEVEL_INSTANCES: ['instance1.example.tld'],
+ }
+ # Acquire just two nodes
+ self.needed_locks = {
+ locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
+ }
+ # Acquire no locks
+ self.needed_locks = {} # No, you can't leave it to the default value None
+
+ """
+ # The implementation of this method is mandatory only if the new LU is
+ # concurrent, so that old LUs don't need to be changed all at the same
+ # time.
+ if self.REQ_BGL:
+ self.needed_locks = {} # Exclusive LUs don't need locks.
+ else:
+ raise NotImplementedError
+
def CheckPrereq(self):
"""Check prerequisites for this LU.
not fulfilled. Its return value is ignored.
This method should also update all the parameters of the opcode to
- their canonical form; e.g. a short node name must be fully
- expanded after this method has successfully completed (so that
- hooks, logging, etc. work correctly).
+ their canonical form if it hasn't been done by ExpandNames before.
"""
raise NotImplementedError
"""
return lu_result
+ def _ExpandAndLockInstance(self):
+ """Helper function to expand and lock an instance.
+
+ Many LUs that work on an instance take its name in self.op.instance_name
+ and need to expand it and then declare the expanded name for locking. This
+ function does it, and then updates self.op.instance_name to the expanded
+ name. It also initializes needed_locks as a dict, if this hasn't been done
+ before.
+
+ """
+ if self.needed_locks is None:
+ self.needed_locks = {}
+ else:
+ assert locking.LEVEL_INSTANCE not in self.needed_locks, \
+ "_ExpandAndLockInstance called with instance-level locks set"
+ expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
+ if expanded_name is None:
+ raise errors.OpPrereqError("Instance '%s' not known" %
+ self.op.instance_name)
+ self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
+ self.op.instance_name = expanded_name
+
class NoHooksLU(LogicalUnit):
"""Simple LU which runs no hooks.
raise errors.OpPrereqError("Neither the name nor the IP address of the"
" cluster has changed")
if new_ip != old_ip:
- result = utils.RunCmd(["fping", "-q", new_ip])
- if not result.failed:
+ if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("The given cluster IP address (%s) is"
" reachable on the network. Aborting." %
new_ip)
if done or oneshot:
break
- if unlock:
- #utils.Unlock('cmd')
- pass
- try:
- time.sleep(min(60, max_time))
- finally:
- if unlock:
- #utils.Lock('cmd')
- pass
+ time.sleep(min(60, max_time))
if done:
proc.LogInfo("Instance %s's disks are in sync." % instance.name)
rpc.call_node_leave_cluster(node.name)
- self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
-
logger.Info("Removing node %s from config" % node.name)
self.cfg.RemoveNode(node.name)
+ # Remove the node from the Ganeti Lock Manager
+ self.context.glm.remove(locking.LEVEL_NODE, node.name)
utils.RemoveHostFromEtcHosts(node.name)
new_node = self.new_node
node = new_node.name
- # set up inter-node password and certificate and restarts the node daemon
- gntpass = self.sstore.GetNodeDaemonPassword()
- if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
- raise errors.OpExecError("ganeti password corruption detected")
- f = open(constants.SSL_CERT_FILE)
- try:
- gntpem = f.read(8192)
- finally:
- f.close()
- # in the base64 pem encoding, neither '!' nor '.' are valid chars,
- # so we use this to detect an invalid certificate; as long as the
- # cert doesn't contain this, the here-document will be correctly
- # parsed by the shell sequence below
- if re.search('^!EOF\.', gntpem, re.MULTILINE):
- raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
- if not gntpem.endswith("\n"):
- raise errors.OpExecError("PEM must end with newline")
- logger.Info("copy cluster pass to %s and starting the node daemon" % node)
-
- # and then connect with ssh to set password and start ganeti-noded
- # note that all the below variables are sanitized at this point,
- # either by being constants or by the checks above
- ss = self.sstore
- mycommand = ("umask 077 && "
- "echo '%s' > '%s' && "
- "cat > '%s' << '!EOF.' && \n"
- "%s!EOF.\n%s restart" %
- (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
- constants.SSL_CERT_FILE, gntpem,
- constants.NODE_INITD_SCRIPT))
-
- result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
- if result.failed:
- raise errors.OpExecError("Remote command on node %s, error: %s,"
- " output: %s" %
- (node, result.fail_reason, result.output))
-
# check connectivity
- time.sleep(4)
-
result = rpc.call_version([node])[node]
if result:
if constants.PROTOCOL_VERSION == result:
" you gave (%s). Please fix and re-run this"
" command." % new_node.secondary_ip)
- success, msg = self.ssh.VerifyNodeHostname(node)
- if not success:
- raise errors.OpExecError("Node '%s' claims it has a different hostname"
- " than the one the resolver gives: %s."
- " Please fix and re-run this command." %
- (node, msg))
+ node_verify_list = [self.sstore.GetMasterNode()]
+ node_verify_param = {
+ 'nodelist': [node],
+ # TODO: do a node-net-test as well?
+ }
+
+ result = rpc.call_node_verify(node_verify_list, node_verify_param)
+ for verifier in node_verify_list:
+ if not result[verifier]:
+ raise errors.OpExecError("Cannot communicate with %s's node daemon"
+ " for remote verification" % verifier)
+ if result[verifier]['nodelist']:
+ for failed in result[verifier]['nodelist']:
+ feedback_fn("ssh/hostname verification failed %s -> %s" %
+ (verifier, result[verifier]['nodelist'][failed]))
+ raise errors.OpExecError("ssh/hostname verification failed.")
# Distribute updated /etc/hosts and known_hosts to all nodes,
# including the node just added
logger.Error("copy of file %s to node %s failed" %
(fname, to_node))
- to_copy = ss.GetFileList()
+ to_copy = self.sstore.GetFileList()
if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
to_copy.append(constants.VNC_PASSWORD_FILE)
for fname in to_copy:
- if not self.ssh.CopyFileToNode(node, fname):
+ result = rpc.call_upload_file([node], fname)
+ if not result[node]:
logger.Error("could not copy file %s to node %s" % (fname, node))
if not self.op.readd:
logger.Info("adding node %s to cluster.conf" % node)
self.cfg.AddNode(new_node)
+ # Add the new node to the Ganeti Lock Manager
+ self.context.glm.add(locking.LEVEL_NODE, node)
class LUMasterFailover(LogicalUnit):
"""
_OP_REQP = []
REQ_MASTER = False
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {}
def CheckPrereq(self):
"""No prerequsites needed for this LU.
"""
_OP_REQP = []
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {}
def CheckPrereq(self):
"""No prerequisites.
new_name)
if not getattr(self.op, "ignore_ip", False):
- command = ["fping", "-q", name_info.ip]
- result = utils.RunCmd(command)
- if not result.failed:
+ if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(name_info.ip, new_name))
logger.Info("removing instance %s out of cluster config" % instance.name)
self.cfg.RemoveInstance(instance.name)
+ # Remove the new instance from the Ganeti Lock Manager
+ self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
class LUQueryInstances(NoHooksLU):
feedback_fn("adding instance %s to cluster config" % instance)
self.cfg.AddInstance(iobj)
+ # Add the new instance to the Ganeti Lock Manager
+ self.context.glm.add(locking.LEVEL_INSTANCE, instance)
if self.op.wait_for_sync:
disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
if disk_abort:
_RemoveDisks(iobj, self.cfg)
self.cfg.RemoveInstance(iobj.name)
+ # Remove the new instance from the Ganeti Lock Manager
+ self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
raise errors.OpExecError("There are some degraded disks for"
" this instance")
"""
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name)
- self.instance = instance
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Connect to the console of an instance
HPATH = "instance-modify"
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name"]
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This only checks the instance list against the existing names.
"""
+ # FIXME: all the parameters could be checked before, in ExpandNames, or in
+ # a separate CheckArguments function, if we implement one, so the operation
+ # can be aborted without waiting for any lock, should it have an error...
self.mem = getattr(self.op, "mem", None)
self.vcpus = getattr(self.op, "vcpus", None)
self.ip = getattr(self.op, "ip", None)
" like a valid IP address" %
self.op.vnc_bind_address)
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("No such instance name '%s'" %
- self.op.instance_name)
- self.op.instance_name = instance.name
- self.instance = instance
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
return
def Exec(self, feedback_fn):
instance.vnc_bind_address = self.vnc_bind_address
result.append(("vnc_bind_address", self.vnc_bind_address))
- self.cfg.AddInstance(instance)
+ self.cfg.Update(instance)
return result
" config file and the operation has been"
" aborted. Please retry.")
+
class LUTestDelay(NoHooksLU):
"""Sleep for a specified amount of time.
- This LU sleeps on the master and/or nodes for a specified amoutn of
+ This LU sleeps on the master and/or nodes for a specified amount of
time.
"""
_OP_REQP = ["duration", "on_master", "on_nodes"]
+ REQ_BGL = False
- def CheckPrereq(self):
- """Check prerequisites.
+ def ExpandNames(self):
+ """Expand names and set required locks.
- This checks that we have a good list of nodes and/or the duration
- is valid.
+ This expands the node list, if any.
"""
-
+ self.needed_locks = {}
if self.op.on_nodes:
+ # _GetWantedNodes can be used here, but is not always appropriate to use
+ # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
+ # more information.
self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
+ self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
def Exec(self, feedback_fn):
"""Do the actual sleep.