-#!/usr/bin/python
+#
#
# Copyright (C) 2006, 2007 Google Inc.
import os
import os.path
import sha
-import socket
import time
import tempfile
import re
from ganeti import objects
from ganeti import opcodes
from ganeti import ssconf
+from ganeti import serializer
+
class LogicalUnit(object):
"""Logical Unit base class.
validity.
"""
- self.processor = processor
+ self.proc = processor
self.op = op
self.cfg = cfg
self.sstore = sstore
" use 'gnt-cluster init' first.")
if self.REQ_MASTER:
master = sstore.GetMasterNode()
- if master != socket.gethostname():
+ if master != utils.HostInfo().name:
raise errors.OpPrereqError("Commands must be run on the master"
" node %s" % master)
This is a no-op, since we don't run hooks.
"""
- return
+ return {}, [], []
+
+
+def _AddHostToEtcHosts(hostname):
+ """Wrapper around utils.SetEtcHostsEntry.
+
+ """
+ hi = utils.HostInfo(name=hostname)
+ utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
+
+
+def _RemoveHostFromEtcHosts(hostname):
+ """Wrapper around utils.RemoveEtcHostsEntry.
+
+ """
+ hi = utils.HostInfo(name=hostname)
+ utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
+ utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
def _GetWantedNodes(lu, nodes):
secondary_nodes: List of secondary nodes as strings
"""
env = {
+ "OP_TARGET": name,
"INSTANCE_NAME": name,
"INSTANCE_PRIMARY": primary_node,
"INSTANCE_SECONDARIES": " ".join(secondary_nodes),
if nics:
nic_count = len(nics)
- for idx, (ip, bridge) in enumerate(nics):
+ for idx, (ip, bridge, mac) in enumerate(nics):
if ip is None:
ip = ""
env["INSTANCE_NIC%d_IP" % idx] = ip
env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
+ env["INSTANCE_NIC%d_HWADDR" % idx] = mac
else:
nic_count = 0
'status': instance.os,
'memory': instance.memory,
'vcpus': instance.vcpus,
- 'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
+ 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
}
if override:
args.update(override)
return _BuildInstanceHookEnv(**args)
-def _UpdateEtcHosts(fullnode, ip):
- """Ensure a node has a correct entry in /etc/hosts.
-
- Args:
- fullnode - Fully qualified domain name of host. (str)
- ip - IPv4 address of host (str)
-
- """
- node = fullnode.split(".", 1)[0]
-
- f = open('/etc/hosts', 'r+')
-
- inthere = False
-
- save_lines = []
- add_lines = []
- removed = False
-
- while True:
- rawline = f.readline()
-
- if not rawline:
- # End of file
- break
-
- line = rawline.split('\n')[0]
-
- # Strip off comments
- line = line.split('#')[0]
-
- if not line:
- # Entire line was comment, skip
- save_lines.append(rawline)
- continue
-
- fields = line.split()
-
- haveall = True
- havesome = False
- for spec in [ ip, fullnode, node ]:
- if spec not in fields:
- haveall = False
- if spec in fields:
- havesome = True
-
- if haveall:
- inthere = True
- save_lines.append(rawline)
- continue
-
- if havesome and not haveall:
- # Line (old, or manual?) which is missing some. Remove.
- removed = True
- continue
-
- save_lines.append(rawline)
-
- if not inthere:
- add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
-
- if removed:
- if add_lines:
- save_lines = save_lines + add_lines
-
- # We removed a line, write a new file and replace old.
- fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
- newfile = os.fdopen(fd, 'w')
- newfile.write(''.join(save_lines))
- newfile.close()
- os.rename(tmpname, '/etc/hosts')
-
- elif add_lines:
- # Simply appending a new line will do the trick.
- f.seek(0, 2)
- for add in add_lines:
- f.write(add)
-
- f.close()
-
-
def _UpdateKnownHosts(fullnode, ip, pubkey):
"""Ensure a node has a correct known_hosts entry.
add_lines = []
removed = False
- while True:
- rawline = f.readline()
+ for rawline in f:
logger.Debug('read %s' % (repr(rawline),))
- if not rawline:
- # End of file
- break
-
- line = rawline.split('\n')[0]
-
- parts = line.split(' ')
- fields = parts[0].split(',')
- key = parts[2]
-
- haveall = True
- havesome = False
- for spec in [ ip, fullnode ]:
- if spec not in fields:
- haveall = False
- if spec in fields:
- havesome = True
-
- logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
- if haveall and key == pubkey:
- inthere = True
- save_lines.append(rawline)
- logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
- continue
+ parts = rawline.rstrip('\r\n').split()
+
+ # Ignore unwanted lines
+ if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
+ fields = parts[0].split(',')
+ key = parts[2]
+
+ haveall = True
+ havesome = False
+ for spec in [ ip, fullnode ]:
+ if spec not in fields:
+ haveall = False
+ if spec in fields:
+ havesome = True
+
+ logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
+ if haveall and key == pubkey:
+ inthere = True
+ save_lines.append(rawline)
+ logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
+ continue
- if havesome and (not haveall or key != pubkey):
- removed = True
- logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
- continue
+ if havesome and (not haveall or key != pubkey):
+ removed = True
+ logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
+ continue
save_lines.append(rawline)
node: the name of this host as a fqdn
"""
- if os.path.exists('/root/.ssh/id_dsa'):
- utils.CreateBackup('/root/.ssh/id_dsa')
- if os.path.exists('/root/.ssh/id_dsa.pub'):
- utils.CreateBackup('/root/.ssh/id_dsa.pub')
+ priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
- utils.RemoveFile('/root/.ssh/id_dsa')
- utils.RemoveFile('/root/.ssh/id_dsa.pub')
+ for name in priv_key, pub_key:
+ if os.path.exists(name):
+ utils.CreateBackup(name)
+ utils.RemoveFile(name)
result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
- "-f", "/root/.ssh/id_dsa",
+ "-f", priv_key,
"-q", "-N", ""])
if result.failed:
raise errors.OpExecError("Could not generate ssh keypair, error %s" %
result.output)
- f = open('/root/.ssh/id_dsa.pub', 'r')
+ f = open(pub_key, 'r')
try:
- utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
+ utils.AddAuthorizedKey(auth_keys, f.read(8192))
finally:
f.close()
(result.cmd, result.exit_code, result.output))
+def _CheckInstanceBridgesExist(instance):
+ """Check that the brigdes needed by an instance exist.
+
+ """
+ # check bridges existance
+ brlist = [nic.bridge for nic in instance.nics]
+ if not rpc.call_bridges_exist(instance.primary_node, brlist):
+ raise errors.OpPrereqError("one or more target bridges %s does not"
+ " exist on destination node '%s'" %
+ (brlist, instance.primary_node))
+
+
class LUInitCluster(LogicalUnit):
"""Initialise the cluster.
ourselves in the post-run node list.
"""
- env = {
- "CLUSTER": self.op.cluster_name,
- "MASTER": self.hostname.name,
- }
+ env = {"OP_TARGET": self.op.cluster_name}
return env, [], [self.hostname.name]
def CheckPrereq(self):
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised")
- hostname_local = socket.gethostname()
- self.hostname = hostname = utils.LookupHostname(hostname_local)
- if not hostname:
- raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
- hostname_local)
+ if self.op.hypervisor_type == constants.HT_XEN_HVM31:
+ if not os.path.exists(constants.VNC_PASSWORD_FILE):
+ raise errors.OpPrereqError("Please prepare the cluster VNC"
+ "password file %s" %
+ constants.VNC_PASSWORD_FILE)
- if hostname.name != hostname_local:
- raise errors.OpPrereqError("My own hostname (%s) does not match the"
- " resolver (%s): probably not using FQDN"
- " for hostname." %
- (hostname_local, hostname.name))
+ self.hostname = hostname = utils.HostInfo()
if hostname.ip.startswith("127."):
raise errors.OpPrereqError("This host's IP resolves to the private"
- " range (%s). Please fix DNS or /etc/hosts." %
- (hostname.ip,))
-
- self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
- if not clustername:
- raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
- % self.op.cluster_name)
+ " range (%s). Please fix DNS or %s." %
+ (hostname.ip, constants.ETC_HOSTS))
- result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname.ip])
- if result.failed:
+ if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
+ source=constants.LOCALHOST_IP_ADDRESS):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host."
" Aborting." % hostname.ip)
+ self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
+
+ if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
+ timeout=5):
+ raise errors.OpPrereqError("Cluster IP already active. Aborting.")
+
secondary_ip = getattr(self.op, "secondary_ip", None)
if secondary_ip and not utils.IsValidIP(secondary_ip):
raise errors.OpPrereqError("Invalid secondary ip given")
- if secondary_ip and secondary_ip != hostname.ip:
- result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
- if result.failed:
- raise errors.OpPrereqError("You gave %s as secondary IP,\n"
- "but it does not belong to this host." %
- secondary_ip)
+ if (secondary_ip and
+ secondary_ip != hostname.ip and
+ (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
+ source=constants.LOCALHOST_IP_ADDRESS))):
+ raise errors.OpPrereqError("You gave %s as secondary IP,"
+ " but it does not belong to this host." %
+ secondary_ip)
self.secondary_ip = secondary_ip
# checks presence of the volume group given
raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
self.op.mac_prefix)
- if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
+ if self.op.hypervisor_type not in constants.HYPER_TYPES:
raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
self.op.hypervisor_type)
(self.op.master_netdev,
result.output.strip()))
+ if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
+ os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
+ raise errors.OpPrereqError("Init.d script '%s' missing or not"
+ " executable." % constants.NODE_INITD_SCRIPT)
+
def Exec(self, feedback_fn):
"""Initialize the cluster.
hostname = self.hostname
# set up the simple store
- ss = ssconf.SimpleStore()
+ self.sstore = ss = ssconf.SimpleStore()
ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
rpc.call_node_start_master(hostname.name)
# set up ssh config and /etc/hosts
- f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
+ f = open(constants.SSH_HOST_RSA_PUB, 'r')
try:
sshline = f.read()
finally:
f.close()
sshkey = sshline.split(" ")[1]
- _UpdateEtcHosts(hostname.name, hostname.ip)
+ _AddHostToEtcHosts(hostname.name)
_UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
_InitSSHSetup(hostname.name)
# init of cluster config file
- cfgw = config.ConfigWriter()
+ self.cfg = cfgw = config.ConfigWriter()
cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
sshkey, self.op.mac_prefix,
self.op.vg_name, self.op.def_bridge)
"""Destroys the cluster.
"""
- utils.CreateBackup('/root/.ssh/id_dsa')
- utils.CreateBackup('/root/.ssh/id_dsa.pub')
- rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
+ master = self.sstore.GetMasterNode()
+ if not rpc.call_node_stop_master(master):
+ raise errors.OpExecError("Could not disable the master role")
+ priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
+ utils.CreateBackup(priv_key)
+ utils.CreateBackup(pub_key)
+ rpc.call_node_leave_cluster(master)
class LUVerifyCluster(NoHooksLU):
"""Verifies the cluster status.
"""
- _OP_REQP = []
+ _OP_REQP = ["skip_checks"]
def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
remote_version, feedback_fn):
# compares ganeti version
local_version = constants.PROTOCOL_VERSION
if not remote_version:
- feedback_fn(" - ERROR: connection to %s failed" % (node))
+ feedback_fn(" - ERROR: connection to %s failed" % (node))
return True
if local_version != remote_version:
feedback_fn(" - ERROR: hypervisor verify failure: '%s'" % hyp_result)
return bad
- def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
+ def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
+ node_instance, feedback_fn):
"""Verify an instance.
This function checks to see if the required block devices are
"""
bad = False
- instancelist = self.cfg.GetInstanceList()
- if not instance in instancelist:
- feedback_fn(" - ERROR: instance %s not in instance list %s" %
- (instance, instancelist))
- bad = True
-
- instanceconfig = self.cfg.GetInstanceInfo(instance)
node_current = instanceconfig.primary_node
node_vol_should = {}
bad = True
if not instanceconfig.status == 'down':
- if not instance in node_instance[node_current]:
+ if (node_current not in node_instance or
+ not instance in node_instance[node_current]):
feedback_fn(" - ERROR: instance %s not running on node %s" %
(instance, node_current))
bad = True
(instance, node))
bad = True
- return not bad
+ return bad
def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
"""Verify if there are any unknown volumes in the cluster.
bad = True
return bad
+ def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
+ """Verify N+1 Memory Resilience.
+
+ Check that if one single node dies we can still start all the instances it
+ was primary for.
+
+ """
+ bad = False
+
+ for node, nodeinfo in node_info.iteritems():
+ # This code checks that every node which is now listed as secondary has
+ # enough memory to host all instances it is supposed to should a single
+ # other node in the cluster fail.
+ # FIXME: not ready for failover to an arbitrary node
+ # FIXME: does not support file-backed instances
+ # WARNING: we currently take into account down instances as well as up
+ # ones, considering that even if they're down someone might want to start
+ # them even in the event of a node failure.
+ for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
+ needed_mem = 0
+ for instance in instances:
+ needed_mem += instance_cfg[instance].memory
+ if nodeinfo['mfree'] < needed_mem:
+ feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
+ " failovers should node %s fail" % (node, prinode))
+ bad = True
+ return bad
+
def CheckPrereq(self):
"""Check prerequisites.
- This has no prerequisites.
+ Transform the list of checks we're going to skip into a set and check that
+ all its members are valid.
"""
- pass
+ self.skip_set = frozenset(self.op.skip_checks)
+ if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
+ raise errors.OpPrereqError("Invalid checks to be skipped specified")
def Exec(self, feedback_fn):
"""Verify integrity of cluster, performing various test on nodes.
"""
bad = False
feedback_fn("* Verifying global settings")
- self.cfg.VerifyConfig()
+ for msg in self.cfg.VerifyConfig():
+ feedback_fn(" - ERROR: %s" % msg)
- master = self.sstore.GetMasterNode()
vg_name = self.cfg.GetVGName()
nodelist = utils.NiceSort(self.cfg.GetNodeList())
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
+ i_non_redundant = [] # Non redundant instances
node_volume = {}
node_instance = {}
+ node_info = {}
+ instance_cfg = {}
# FIXME: verify OS list
# do local checksums
}
all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
all_rversion = rpc.call_version(nodelist)
+ all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
for node in nodelist:
feedback_fn("* Verifying node %s" % node)
# node_volume
volumeinfo = all_volumeinfo[node]
- if type(volumeinfo) != dict:
+ if isinstance(volumeinfo, basestring):
+ feedback_fn(" - ERROR: LVM problem on node %s: %s" %
+ (node, volumeinfo[-400:].encode('string_escape')))
+ bad = True
+ node_volume[node] = {}
+ elif not isinstance(volumeinfo, dict):
feedback_fn(" - ERROR: connection to %s failed" % (node,))
bad = True
continue
-
- node_volume[node] = volumeinfo
+ else:
+ node_volume[node] = volumeinfo
# node_instance
nodeinstance = all_instanceinfo[node]
node_instance[node] = nodeinstance
+ # node_info
+ nodeinfo = all_ninfo[node]
+ if not isinstance(nodeinfo, dict):
+ feedback_fn(" - ERROR: connection to %s failed" % (node,))
+ bad = True
+ continue
+
+ try:
+ node_info[node] = {
+ "mfree": int(nodeinfo['memory_free']),
+ "dfree": int(nodeinfo['vg_free']),
+ "pinst": [],
+ "sinst": [],
+ # dictionary holding all instances this node is secondary for,
+ # grouped by their primary node. Each key is a cluster node, and each
+ # value is a list of instances which have the key as primary and the
+ # current node as secondary. this is handy to calculate N+1 memory
+ # availability if you can only failover from a primary to its
+ # secondary.
+ "sinst-by-pnode": {},
+ }
+ except ValueError:
+ feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
+ bad = True
+ continue
+
node_vol_should = {}
for instance in instancelist:
feedback_fn("* Verifying instance %s" % instance)
- result = self._VerifyInstance(instance, node_volume, node_instance,
- feedback_fn)
- bad = bad or result
-
inst_config = self.cfg.GetInstanceInfo(instance)
+ result = self._VerifyInstance(instance, inst_config, node_volume,
+ node_instance, feedback_fn)
+ bad = bad or result
inst_config.MapLVsByNode(node_vol_should)
+ instance_cfg[instance] = inst_config
+
+ pnode = inst_config.primary_node
+ if pnode in node_info:
+ node_info[pnode]['pinst'].append(instance)
+ else:
+ feedback_fn(" - ERROR: instance %s, connection to primary node"
+ " %s failed" % (instance, pnode))
+ bad = True
+
+ # If the instance is non-redundant we cannot survive losing its primary
+ # node, so we are not N+1 compliant. On the other hand we have no disk
+ # templates with more than one secondary so that situation is not well
+ # supported either.
+ # FIXME: does not support file-backed instances
+ if len(inst_config.secondary_nodes) == 0:
+ i_non_redundant.append(instance)
+ elif len(inst_config.secondary_nodes) > 1:
+ feedback_fn(" - WARNING: multiple secondaries for instance %s"
+ % instance)
+
+ for snode in inst_config.secondary_nodes:
+ if snode in node_info:
+ node_info[snode]['sinst'].append(instance)
+ if pnode not in node_info[snode]['sinst-by-pnode']:
+ node_info[snode]['sinst-by-pnode'][pnode] = []
+ node_info[snode]['sinst-by-pnode'][pnode].append(instance)
+ else:
+ feedback_fn(" - ERROR: instance %s, connection to secondary node"
+ " %s failed" % (instance, snode))
+
feedback_fn("* Verifying orphan volumes")
result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
feedback_fn)
feedback_fn)
bad = bad or result
+ if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
+ feedback_fn("* Verifying N+1 Memory redundancy")
+ result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
+ bad = bad or result
+
+ feedback_fn("* Other Notes")
+ if i_non_redundant:
+ feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
+ % len(i_non_redundant))
+
return int(bad)
+class LUVerifyDisks(NoHooksLU):
+ """Verifies the cluster disks status.
+
+ """
+ _OP_REQP = []
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This has no prerequisites.
+
+ """
+ pass
+
+ def Exec(self, feedback_fn):
+ """Verify integrity of cluster disks.
+
+ """
+ result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
+
+ vg_name = self.cfg.GetVGName()
+ nodes = utils.NiceSort(self.cfg.GetNodeList())
+ instances = [self.cfg.GetInstanceInfo(name)
+ for name in self.cfg.GetInstanceList()]
+
+ nv_dict = {}
+ for inst in instances:
+ inst_lvs = {}
+ if (inst.status != "up" or
+ inst.disk_template not in constants.DTS_NET_MIRROR):
+ continue
+ inst.MapLVsByNode(inst_lvs)
+ # transform { iname: {node: [vol,],},} to {(node, vol): iname}
+ for node, vol_list in inst_lvs.iteritems():
+ for vol in vol_list:
+ nv_dict[(node, vol)] = inst
+
+ if not nv_dict:
+ return result
+
+ node_lvs = rpc.call_volume_list(nodes, vg_name)
+
+ to_act = set()
+ for node in nodes:
+ # node_volume
+ lvs = node_lvs[node]
+
+ if isinstance(lvs, basestring):
+ logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
+ res_nlvm[node] = lvs
+ elif not isinstance(lvs, dict):
+ logger.Info("connection to node %s failed or invalid data returned" %
+ (node,))
+ res_nodes.append(node)
+ continue
+
+ for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
+ inst = nv_dict.pop((node, lv_name), None)
+ if (not lv_online and inst is not None
+ and inst.name not in res_instances):
+ res_instances.append(inst.name)
+
+ # any leftover items in nv_dict are missing LVs, let's arrange the
+ # data better
+ for key, inst in nv_dict.iteritems():
+ if inst.name not in res_missing:
+ res_missing[inst.name] = []
+ res_missing[inst.name].append(key)
+
+ return result
+
+
class LURenameCluster(LogicalUnit):
"""Rename the cluster.
"""
env = {
+ "OP_TARGET": self.sstore.GetClusterName(),
"NEW_NAME": self.op.name,
}
mn = self.sstore.GetMasterNode()
"""Verify that the passed name is a valid one.
"""
- hostname = utils.LookupHostname(self.op.name)
- if not hostname:
- raise errors.OpPrereqError("Cannot resolve the new cluster name ('%s')" %
- self.op.name)
+ hostname = utils.HostInfo(self.op.name)
new_name = hostname.name
self.ip = new_ip = hostname.ip
(fname, to_node))
finally:
if not rpc.call_node_start_master(master):
- logger.Error("Could not re-enable the master role on the master,\n"
- "please restart manually.")
+ logger.Error("Could not re-enable the master role on the master,"
+ " please restart manually.")
-def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
+def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
"""Sleep and poll for an instance's disk to sync.
"""
return True
if not oneshot:
- logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
+ proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
node = instance.primary_node
cumul_degraded = False
rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
if not rstats:
- logger.ToStderr("Can't get any data from node %s" % node)
+ proc.LogWarning("Can't get any data from node %s" % node)
retries += 1
if retries >= 10:
raise errors.RemoteError("Can't contact node %s for mirror data,"
for i in range(len(rstats)):
mstat = rstats[i]
if mstat is None:
- logger.ToStderr("Can't compute data for node %s/%s" %
+ proc.LogWarning("Can't compute data for node %s/%s" %
(node, instance.disks[i].iv_name))
continue
- perc_done, est_time, is_degraded = mstat
+ # we ignore the ldisk parameter
+ perc_done, est_time, is_degraded, _ = mstat
cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
if perc_done is not None:
done = False
max_time = est_time
else:
rem_time = "no time estimate"
- logger.ToStdout("- device %s: %5.2f%% done, %s" %
- (instance.disks[i].iv_name, perc_done, rem_time))
+ proc.LogInfo("- device %s: %5.2f%% done, %s" %
+ (instance.disks[i].iv_name, perc_done, rem_time))
if done or oneshot:
break
utils.Lock('cmd')
if done:
- logger.ToStdout("Instance %s's disks are in sync." % instance.name)
+ proc.LogInfo("Instance %s's disks are in sync." % instance.name)
return not cumul_degraded
-def _CheckDiskConsistency(cfgw, dev, node, on_primary):
+def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
"""Check that mirrors are not degraded.
+ The ldisk parameter, if True, will change the test from the
+ is_degraded attribute (which represents overall non-ok status for
+ the device(s)) to the ldisk (representing the local storage status).
+
"""
cfgw.SetDiskID(dev, node)
+ if ldisk:
+ idx = 6
+ else:
+ idx = 5
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = rpc.call_blockdev_find(node, dev)
if not rstats:
- logger.ToStderr("Can't get any data from node %s" % node)
+ logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
result = False
else:
- result = result and (not rstats[5])
+ result = result and (not rstats[idx])
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
"""
env = {
+ "OP_TARGET": self.op.node_name,
"NODE_NAME": self.op.node_name,
}
all_nodes = self.cfg.GetNodeList()
self.cfg.RemoveNode(node.name)
+ _RemoveHostFromEtcHosts(node.name)
+
class LUQueryNodes(NoHooksLU):
"""Logical unit for querying nodes.
"""
self.dynamic_fields = frozenset(["dtotal", "dfree",
- "mtotal", "mnode", "mfree"])
+ "mtotal", "mnode", "mfree",
+ "bootid"])
_CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
"pinst_list", "sinst_list",
"mfree": utils.TryConvert(int, nodeinfo['memory_free']),
"dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
"dfree": utils.TryConvert(int, nodeinfo['vg_free']),
+ "bootid": nodeinfo['bootid'],
}
else:
live_data[name] = {}
"""
env = {
+ "OP_TARGET": self.op.node_name,
"NODE_NAME": self.op.node_name,
"NODE_PIP": self.op.primary_ip,
"NODE_SIP": self.op.secondary_ip,
node_name = self.op.node_name
cfg = self.cfg
- dns_data = utils.LookupHostname(node_name)
- if not dns_data:
- raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
+ dns_data = utils.HostInfo(node_name)
node = dns_data.name
primary_ip = self.op.primary_ip = dns_data.ip
" new node doesn't have one")
# checks reachablity
- command = ["fping", "-q", primary_ip]
- result = utils.RunCmd(command)
- if result.failed:
+ if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("Node not reachable by ping")
if not newbie_singlehomed:
# check reachability from my secondary ip to newbie's secondary ip
- command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
- result = utils.RunCmd(command)
- if result.failed:
- raise errors.OpPrereqError("Node secondary ip not reachable by ping")
+ if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
+ source=myself.secondary_ip):
+ raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
+ " based ping to noded port")
self.new_node = objects.Node(name=node,
primary_ip=primary_ip,
secondary_ip=secondary_ip)
+ if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+ if not os.path.exists(constants.VNC_PASSWORD_FILE):
+ raise errors.OpPrereqError("Cluster VNC password file %s missing" %
+ constants.VNC_PASSWORD_FILE)
+
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
# setup ssh on node
logger.Info("copy ssh key to node %s" % node)
+ priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
keyarray = []
- keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
- "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
- "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
+ keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
+ constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
+ priv_key, pub_key]
for i in keyfiles:
f = open(i, 'r')
raise errors.OpExecError("Cannot transfer ssh keys to the new node")
# Add node to our /etc/hosts, and add key to known_hosts
- _UpdateEtcHosts(new_node.name, new_node.primary_ip)
+ _AddHostToEtcHosts(new_node.name)
+
_UpdateKnownHosts(new_node.name, new_node.primary_ip,
self.cfg.GetHostKey())
if new_node.secondary_ip != new_node.primary_ip:
- result = ssh.SSHCall(node, "root",
- "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
- if result.failed:
- raise errors.OpExecError("Node claims it doesn't have the"
- " secondary ip you gave (%s).\n"
- "Please fix and re-run this command." %
- new_node.secondary_ip)
+ if not rpc.call_node_tcp_ping(new_node.name,
+ constants.LOCALHOST_IP_ADDRESS,
+ new_node.secondary_ip,
+ constants.DEFAULT_NODED_PORT,
+ 10, False):
+ raise errors.OpExecError("Node claims it doesn't have the secondary ip"
+ " you gave (%s). Please fix and re-run this"
+ " command." % new_node.secondary_ip)
success, msg = ssh.VerifyNodeHostname(node)
if not success:
raise errors.OpExecError("Node '%s' claims it has a different hostname"
- " than the one the resolver gives: %s.\n"
- "Please fix and re-run this command." %
+ " than the one the resolver gives: %s."
+ " Please fix and re-run this command." %
(node, msg))
# Distribute updated /etc/hosts and known_hosts to all nodes,
dist_nodes.remove(myself.name)
logger.Debug("Copying hosts and known_hosts to all nodes")
- for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
+ for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
result = rpc.call_upload_file(dist_nodes, fname)
for to_node in dist_nodes:
if not result[to_node]:
(fname, to_node))
to_copy = ss.GetFileList()
+ if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+ to_copy.append(constants.VNC_PASSWORD_FILE)
for fname in to_copy:
if not ssh.CopyFileToNode(node, fname):
logger.Error("could not copy file %s to node %s" % (fname, node))
"""
env = {
+ "OP_TARGET": self.new_master,
"NEW_MASTER": self.new_master,
"OLD_MASTER": self.old_master,
}
This checks that we are not already the master.
"""
- self.new_master = socket.gethostname()
-
+ self.new_master = utils.HostInfo().name
self.old_master = self.sstore.GetMasterNode()
if self.old_master == self.new_master:
raise errors.OpPrereqError("This commands must be run on the node"
- " where you want the new master to be.\n"
- "%s is already the master" %
+ " where you want the new master to be."
+ " %s is already the master" %
self.old_master)
def Exec(self, feedback_fn):
if not rpc.call_node_start_master(self.new_master):
logger.Error("could not start the master role on the new master"
" %s, please check" % self.new_master)
- feedback_fn("Error in activating the master IP on the new master,\n"
- "please fix manually.")
+ feedback_fn("Error in activating the master IP on the new master,"
+ " please fix manually.")
"""
filename = self.op.filename
- myname = socket.gethostname()
+ myname = utils.HostInfo().name
for node in self.nodes:
if node == myname:
"""Run a command on some nodes.
"""
+ # put the master at the end of the nodes list
+ master_node = self.sstore.GetMasterNode()
+ if master_node in self.nodes:
+ self.nodes.remove(master_node)
+ self.nodes.append(master_node)
+
data = []
for node in self.nodes:
result = ssh.SSHCall(node, "root", self.op.command)
"""
device_info = []
disks_ok = True
+ iname = instance.name
+ # With the two passes mechanism we try to reduce the window of
+ # opportunity for the race condition of switching DRBD to primary
+ # before handshaking occured, but we do not eliminate it
+
+ # The proper fix would be to wait (with some limits) until the
+ # connection has been made and drbd transitions from WFConnection
+ # into any other network-connected state (Connected, SyncTarget,
+ # SyncSource, etc.)
+
+ # 1st pass, assemble on all nodes in secondary mode
for inst_disk in instance.disks:
- master_result = None
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
cfg.SetDiskID(node_disk, node)
- is_primary = node == instance.primary_node
- result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
+ result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
if not result:
- logger.Error("could not prepare block device %s on node %s (is_pri"
- "mary=%s)" % (inst_disk.iv_name, node, is_primary))
- if is_primary or not ignore_secondaries:
+ logger.Error("could not prepare block device %s on node %s"
+ " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
+ if not ignore_secondaries:
disks_ok = False
- if is_primary:
- master_result = result
- device_info.append((instance.primary_node, inst_disk.iv_name,
- master_result))
+
+ # FIXME: race condition on drbd migration to primary
+
+ # 2nd pass, do only the primary node
+ for inst_disk in instance.disks:
+ for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
+ if node != instance.primary_node:
+ continue
+ cfg.SetDiskID(node_disk, node)
+ result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
+ if not result:
+ logger.Error("could not prepare block device %s on node %s"
+ " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
+ disks_ok = False
+ device_info.append((instance.primary_node, inst_disk.iv_name, result))
+
+ # leave the disks configured for the primary node
+ # this is a workaround that would be fixed better by
+ # improving the logical/physical id handling
+ for disk in instance.disks:
+ cfg.SetDiskID(disk, instance.primary_node)
return disks_ok, device_info
return result
+def _CheckNodeFreeMemory(cfg, node, reason, requested):
+ """Checks if a node has enough free memory.
+
+ This function check if a given node has the needed amount of free
+ memory. In case the node has less memory or we cannot get the
+ information from the node, this function raise an OpPrereqError
+ exception.
+
+ Args:
+ - cfg: a ConfigWriter instance
+ - node: the node name
+ - reason: string to use in the error message
+ - requested: the amount of memory in MiB
+
+ """
+ nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
+ if not nodeinfo or not isinstance(nodeinfo, dict):
+ raise errors.OpPrereqError("Could not contact node %s for resource"
+ " information" % (node,))
+
+ free_mem = nodeinfo[node].get('memory_free')
+ if not isinstance(free_mem, int):
+ raise errors.OpPrereqError("Can't compute free memory on node %s, result"
+ " was '%s'" % (node, free_mem))
+ if requested > free_mem:
+ raise errors.OpPrereqError("Not enough memory on node %s for %s:"
+ " needed %s MiB, available %s MiB" %
+ (node, reason, requested, free_mem))
+
+
class LUStartupInstance(LogicalUnit):
"""Starts an instance.
self.op.instance_name)
# check bridges existance
- brlist = [nic.bridge for nic in instance.nics]
- if not rpc.call_bridges_exist(instance.primary_node, brlist):
- raise errors.OpPrereqError("one or more target bridges %s does not"
- " exist on destination node '%s'" %
- (brlist, instance.primary_node))
+ _CheckInstanceBridgesExist(instance)
+
+ _CheckNodeFreeMemory(self.cfg, instance.primary_node,
+ "starting instance %s" % instance.name,
+ instance.memory)
self.instance = instance
self.op.instance_name = instance.name
force = self.op.force
extra_args = getattr(self.op, "extra_args", "")
- node_current = instance.primary_node
-
- nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
- if not nodeinfo:
- raise errors.OpExecError("Could not contact node %s for infos" %
- (node_current))
+ self.cfg.MarkInstanceUp(instance.name)
- freememory = nodeinfo[node_current]['memory_free']
- memory = instance.memory
- if memory > freememory:
- raise errors.OpExecError("Not enough memory to start instance"
- " %s on node %s"
- " needed %s MiB, available %s MiB" %
- (instance.name, node_current, memory,
- freememory))
+ node_current = instance.primary_node
_StartInstanceDisks(self.cfg, instance, force)
_ShutdownInstanceDisks(instance, self.cfg)
raise errors.OpExecError("Could not start instance")
+
+class LURebootInstance(LogicalUnit):
+ """Reboot an instance.
+
+ """
+ HPATH = "instance-reboot"
+ HTYPE = constants.HTYPE_INSTANCE
+ _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ This runs on master, primary and secondary nodes of the instance.
+
+ """
+ env = {
+ "IGNORE_SECONDARIES": self.op.ignore_secondaries,
+ }
+ env.update(_BuildInstanceHookEnvByObject(self.instance))
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+ list(self.instance.secondary_nodes))
+ return env, nl, nl
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that the instance is in the cluster.
+
+ """
+ instance = self.cfg.GetInstanceInfo(
+ self.cfg.ExpandInstanceName(self.op.instance_name))
+ if instance is None:
+ raise errors.OpPrereqError("Instance '%s' not known" %
+ self.op.instance_name)
+
+ # check bridges existance
+ _CheckInstanceBridgesExist(instance)
+
+ self.instance = instance
+ self.op.instance_name = instance.name
+
+ def Exec(self, feedback_fn):
+ """Reboot the instance.
+
+ """
+ instance = self.instance
+ ignore_secondaries = self.op.ignore_secondaries
+ reboot_type = self.op.reboot_type
+ extra_args = getattr(self.op, "extra_args", "")
+
+ node_current = instance.primary_node
+
+ if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
+ constants.INSTANCE_REBOOT_HARD,
+ constants.INSTANCE_REBOOT_FULL]:
+ raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
+ (constants.INSTANCE_REBOOT_SOFT,
+ constants.INSTANCE_REBOOT_HARD,
+ constants.INSTANCE_REBOOT_FULL))
+
+ if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+ constants.INSTANCE_REBOOT_HARD]:
+ if not rpc.call_instance_reboot(node_current, instance,
+ reboot_type, extra_args):
+ raise errors.OpExecError("Could not reboot instance")
+ else:
+ if not rpc.call_instance_shutdown(node_current, instance):
+ raise errors.OpExecError("could not shutdown instance for full reboot")
+ _ShutdownInstanceDisks(instance, self.cfg)
+ _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
+ if not rpc.call_instance_start(node_current, instance, extra_args):
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Could not start instance for full reboot")
+
self.cfg.MarkInstanceUp(instance.name)
"""
instance = self.instance
node_current = instance.primary_node
+ self.cfg.MarkInstanceDown(instance.name)
if not rpc.call_instance_shutdown(node_current, instance):
logger.Error("could not shutdown instance")
- self.cfg.MarkInstanceDown(instance.name)
_ShutdownInstanceDisks(instance, self.cfg)
if pnode is None:
raise errors.OpPrereqError("Primary node '%s' is unknown" %
self.op.pnode)
- os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
- if not isinstance(os_obj, objects.OS):
+ os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
+ if not os_obj:
raise errors.OpPrereqError("OS '%s' not in supported OS list for"
" primary node" % self.op.os_type)
try:
feedback_fn("Running the instance OS create scripts...")
if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
- raise errors.OpExecError("Could not install OS for instance %s "
- "on node %s" %
+ raise errors.OpExecError("Could not install OS for instance %s"
+ " on node %s" %
(inst.name, inst.primary_node))
finally:
_ShutdownInstanceDisks(inst, self.cfg)
self.instance = instance
# new name verification
- hostname1 = utils.LookupHostname(self.op.new_name)
- if not hostname1:
- raise errors.OpPrereqError("New instance name '%s' not found in dns" %
- self.op.new_name)
+ name_info = utils.HostInfo(self.op.new_name)
+
+ self.op.new_name = new_name = name_info.name
+ instance_list = self.cfg.GetInstanceList()
+ if new_name in instance_list:
+ raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
+ new_name)
- self.op.new_name = new_name = hostname1.name
if not getattr(self.op, "ignore_ip", False):
- command = ["fping", "-q", hostname1.ip]
+ command = ["fping", "-q", name_info.ip]
result = utils.RunCmd(command)
if not result.failed:
raise errors.OpPrereqError("IP %s of instance %s already in use" %
- (hostname1.ip, new_name))
+ (name_info.ip, new_name))
def Exec(self, feedback_fn):
try:
if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
"sda", "sdb"):
- msg = ("Could run OS rename script for instance %s\n"
- "on node %s\n"
- "(but the instance has been renamed in Ganeti)" %
+ msg = ("Could run OS rename script for instance %s on node %s (but the"
+ " instance has been renamed in Ganeti)" %
(inst.name, inst.primary_node))
logger.Error(msg)
finally:
"""
env = _BuildInstanceHookEnvByObject(self.instance)
- nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
- list(self.instance.secondary_nodes))
+ nl = [self.sstore.GetMasterNode()]
return env, nl, nl
def CheckPrereq(self):
(instance.name, instance.primary_node))
if not rpc.call_instance_shutdown(instance.primary_node, instance):
- raise errors.OpExecError("Could not shutdown instance %s on node %s" %
- (instance.name, instance.primary_node))
+ if self.op.ignore_failures:
+ feedback_fn("Warning: can't shutdown instance")
+ else:
+ raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+ (instance.name, instance.primary_node))
logger.Info("removing block devices for instance %s" % instance.name)
- _RemoveDisks(instance, self.cfg)
+ if not _RemoveDisks(instance, self.cfg):
+ if self.op.ignore_failures:
+ feedback_fn("Warning: can't remove instance's disks")
+ else:
+ raise errors.OpExecError("Can't remove instance's disks")
logger.Info("removing instance %s out of cluster config" % instance.name)
This checks that the fields required are valid output fields.
"""
- self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
+ self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
_CheckOutputFields(static=["name", "os", "pnode", "snodes",
"admin_state", "admin_ram",
"disk_template", "ip", "mac", "bridge",
- "sda_size", "sdb_size"],
+ "sda_size", "sdb_size", "vcpus"],
dynamic=self.dynamic_fields,
selected=self.op.output_fields)
val = None
else:
val = bool(live_data.get(instance.name))
+ elif field == "status":
+ if instance.primary_node in bad_nodes:
+ val = "ERROR_nodedown"
+ else:
+ running = bool(live_data.get(instance.name))
+ if running:
+ if instance.status != "down":
+ val = "running"
+ else:
+ val = "ERROR_up"
+ else:
+ if instance.status != "down":
+ val = "ERROR_down"
+ else:
+ val = "ADMIN_down"
elif field == "admin_ram":
val = instance.memory
elif field == "oper_ram":
val = None
else:
val = disk.size
+ elif field == "vcpus":
+ val = instance.vcpus
else:
raise errors.ParameterError(field)
iout.append(val)
raise errors.OpPrereqError("Instance '%s' not known" %
self.op.instance_name)
- if instance.disk_template != constants.DT_REMOTE_RAID1:
+ if instance.disk_template not in constants.DTS_NET_MIRROR:
raise errors.OpPrereqError("Instance's disk layout is not"
- " remote_raid1.")
+ " network mirrored, cannot failover.")
secondary_nodes = instance.secondary_nodes
if not secondary_nodes:
raise errors.ProgrammerError("no secondary node but using "
"DT_REMOTE_RAID1 template")
- # check memory requirements on the secondary node
target_node = secondary_nodes[0]
- nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
- info = nodeinfo.get(target_node, None)
- if not info:
- raise errors.OpPrereqError("Cannot get current information"
- " from node '%s'" % nodeinfo)
- if instance.memory > info['memory_free']:
- raise errors.OpPrereqError("Not enough memory on target node %s."
- " %d MB available, %d MB required" %
- (target_node, info['memory_free'],
- instance.memory))
+ # check memory requirements on the secondary node
+ _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
+ instance.name, instance.memory)
# check bridge existance
brlist = [nic.bridge for nic in instance.nics]
- if not rpc.call_bridges_exist(instance.primary_node, brlist):
+ if not rpc.call_bridges_exist(target_node, brlist):
raise errors.OpPrereqError("One or more target bridges %s does not"
" exist on destination node '%s'" %
- (brlist, instance.primary_node))
+ (brlist, target_node))
self.instance = instance
for dev in instance.disks:
# for remote_raid1, these are md over drbd
if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
- if not self.op.ignore_consistency:
+ if instance.status == "up" and not self.op.ignore_consistency:
raise errors.OpExecError("Disk %s is degraded on target node,"
" aborting failover." % dev.iv_name)
- feedback_fn("* checking target node resource availability")
- nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
-
- if not nodeinfo:
- raise errors.OpExecError("Could not contact target node %s." %
- target_node)
-
- free_memory = int(nodeinfo[target_node]['memory_free'])
- memory = instance.memory
- if memory > free_memory:
- raise errors.OpExecError("Not enough memory to create instance %s on"
- " node %s. needed %s MiB, available %s MiB" %
- (instance.name, target_node, memory,
- free_memory))
-
feedback_fn("* shutting down instance on source node")
logger.Info("Shutting down instance %s on node %s" %
(instance.name, source_node))
if not rpc.call_instance_shutdown(source_node, instance):
- logger.Error("Could not shutdown instance %s on node %s. Proceeding"
- " anyway. Please make sure node %s is down" %
- (instance.name, source_node, source_node))
+ if self.op.ignore_consistency:
+ logger.Error("Could not shutdown instance %s on node %s. Proceeding"
+ " anyway. Please make sure node %s is down" %
+ (instance.name, source_node, source_node))
+ else:
+ raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+ (instance.name, source_node))
feedback_fn("* deactivating the instance's disks on source node")
if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
# distribute new instance config to the other nodes
self.cfg.AddInstance(instance)
- feedback_fn("* activating the instance's disks on target node")
- logger.Info("Starting instance %s on node %s" %
- (instance.name, target_node))
+ # Only start the instance if it's marked as up
+ if instance.status == "up":
+ feedback_fn("* activating the instance's disks on target node")
+ logger.Info("Starting instance %s on node %s" %
+ (instance.name, target_node))
- disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
- ignore_secondaries=True)
- if not disks_ok:
- _ShutdownInstanceDisks(instance, self.cfg)
- raise errors.OpExecError("Can't activate the instance's disks")
+ disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
+ ignore_secondaries=True)
+ if not disks_ok:
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Can't activate the instance's disks")
- feedback_fn("* starting the instance on the target node")
- if not rpc.call_instance_start(target_node, instance, None):
- _ShutdownInstanceDisks(instance, self.cfg)
- raise errors.OpExecError("Could not start instance %s on node %s." %
- (instance.name, target_node))
+ feedback_fn("* starting the instance on the target node")
+ if not rpc.call_instance_start(target_node, instance, None):
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Could not start instance %s on node %s." %
+ (instance.name, target_node))
-def _CreateBlockDevOnPrimary(cfg, node, device, info):
+def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
"""Create a tree of block devices on the primary node.
This always creates all devices.
"""
if device.children:
for child in device.children:
- if not _CreateBlockDevOnPrimary(cfg, node, child, info):
+ if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
return False
cfg.SetDiskID(device, node)
- new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
+ new_id = rpc.call_blockdev_create(node, device, device.size,
+ instance.name, True, info)
if not new_id:
return False
if device.physical_id is None:
return True
-def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
+def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
"""Create a tree of block devices on a secondary node.
If this device type has to be created on secondaries, create it and
force = True
if device.children:
for child in device.children:
- if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
+ if not _CreateBlockDevOnSecondary(cfg, node, instance,
+ child, force, info):
return False
if not force:
return True
cfg.SetDiskID(device, node)
- new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
+ new_id = rpc.call_blockdev_create(node, device, device.size,
+ instance.name, False, info)
if not new_id:
return False
if device.physical_id is None:
"""
port = cfg.AllocatePort()
vgname = cfg.GetVGName()
- dev_data = objects.Disk(dev_type="lvm", size=size,
+ dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
logical_id=(vgname, names[0]))
- dev_meta = objects.Disk(dev_type="lvm", size=128,
+ dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
logical_id=(vgname, names[1]))
- drbd_dev = objects.Disk(dev_type="drbd", size=size,
+ drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
logical_id = (primary, secondary, port),
children = [dev_data, dev_meta])
return drbd_dev
-def _GenerateDiskTemplate(cfg, template_name,
- instance_name, primary_node,
- secondary_nodes, disk_sz, swap_sz):
- """Generate the entire disk layout for a given template type.
+def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
+ """Generate a drbd8 device complete with its children.
+
+ """
+ port = cfg.AllocatePort()
+ vgname = cfg.GetVGName()
+ dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+ logical_id=(vgname, names[0]))
+ dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
+ logical_id=(vgname, names[1]))
+ drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+ logical_id = (primary, secondary, port),
+ children = [dev_data, dev_meta],
+ iv_name=iv_name)
+ return drbd_dev
+
+def _GenerateDiskTemplate(cfg, template_name,
+ instance_name, primary_node,
+ secondary_nodes, disk_sz, swap_sz):
+ """Generate the entire disk layout for a given template type.
"""
#TODO: compute space requirements
vgname = cfg.GetVGName()
- if template_name == "diskless":
+ if template_name == constants.DT_DISKLESS:
disks = []
- elif template_name == "plain":
+ elif template_name == constants.DT_PLAIN:
if len(secondary_nodes) != 0:
raise errors.ProgrammerError("Wrong template configuration")
names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
- sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
+ sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
logical_id=(vgname, names[0]),
iv_name = "sda")
- sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
+ sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
logical_id=(vgname, names[1]),
iv_name = "sdb")
disks = [sda_dev, sdb_dev]
- elif template_name == "local_raid1":
+ elif template_name == constants.DT_LOCAL_RAID1:
if len(secondary_nodes) != 0:
raise errors.ProgrammerError("Wrong template configuration")
names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
".sdb_m1", ".sdb_m2"])
- sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
+ sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
logical_id=(vgname, names[0]))
- sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
+ sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
logical_id=(vgname, names[1]))
- md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
+ md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
size=disk_sz,
children = [sda_dev_m1, sda_dev_m2])
- sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
+ sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
logical_id=(vgname, names[2]))
- sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
+ sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
logical_id=(vgname, names[3]))
- md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
+ md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
size=swap_sz,
children = [sdb_dev_m1, sdb_dev_m2])
disks = [md_sda_dev, md_sdb_dev]
".sdb_data", ".sdb_meta"])
drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
disk_sz, names[0:2])
- md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
+ md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
children = [drbd_sda_dev], size=disk_sz)
drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
swap_sz, names[2:4])
- md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
+ md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
children = [drbd_sdb_dev], size=swap_sz)
disks = [md_sda_dev, md_sdb_dev]
+ elif template_name == constants.DT_DRBD8:
+ if len(secondary_nodes) != 1:
+ raise errors.ProgrammerError("Wrong template configuration")
+ remote_node = secondary_nodes[0]
+ names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
+ ".sdb_data", ".sdb_meta"])
+ drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+ disk_sz, names[0:2], "sda")
+ drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+ swap_sz, names[2:4], "sdb")
+ disks = [drbd_sda_dev, drbd_sdb_dev]
else:
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
return disks
(device.iv_name, instance.name))
#HARDCODE
for secondary_node in instance.secondary_nodes:
- if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
- info):
+ if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
+ device, False, info):
logger.Error("failed to create volume %s (%s) on secondary node %s!" %
(device.iv_name, device, secondary_node))
return False
#HARDCODE
- if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
+ if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
+ instance, device, info):
logger.Error("failed to create volume %s on primary!" %
device.iv_name)
return False
This abstracts away some work from `AddInstance()` and
`RemoveInstance()`. Note that in case some of the devices couldn't
- be remove, the removal will continue with the other ones (compare
+ be removed, the removal will continue with the other ones (compare
with `_CreateDisks()`).
Args:
return result
+def _ComputeDiskSize(disk_template, disk_size, swap_size):
+ """Compute disk size requirements in the volume group
+
+ This is currently hard-coded for the two-drive layout.
+
+ """
+ # Required free disk space as a function of disk and swap space
+ req_size_dict = {
+ constants.DT_DISKLESS: None,
+ constants.DT_PLAIN: disk_size + swap_size,
+ constants.DT_LOCAL_RAID1: (disk_size + swap_size) * 2,
+ # 256 MB are added for drbd metadata, 128MB for each drbd device
+ constants.DT_REMOTE_RAID1: disk_size + swap_size + 256,
+ constants.DT_DRBD8: disk_size + swap_size + 256,
+ }
+
+ if disk_template not in req_size_dict:
+ raise errors.ProgrammerError("Disk template '%s' size requirement"
+ " is unknown" % disk_template)
+
+ return req_size_dict[disk_template]
+
+
class LUCreateInstance(LogicalUnit):
"""Create an instance.
"""
HPATH = "instance-add"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
+ _OP_REQP = ["instance_name", "mem_size", "disk_size",
"disk_template", "swap_size", "mode", "start", "vcpus",
- "wait_for_sync"]
+ "wait_for_sync", "ip_check", "mac"]
+
+ def _RunAllocator(self):
+ """Run the allocator based on input opcode.
+
+ """
+ disks = [{"size": self.op.disk_size, "mode": "w"},
+ {"size": self.op.swap_size, "mode": "w"}]
+ nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
+ "bridge": self.op.bridge}]
+ ial = IAllocator(self.cfg, self.sstore,
+ name=self.op.instance_name,
+ disk_template=self.op.disk_template,
+ tags=[],
+ os=self.op.os_type,
+ vcpus=self.op.vcpus,
+ mem_size=self.op.mem_size,
+ disks=disks,
+ nics=nics,
+ mode=constants.IALLOCATOR_MODE_ALLOC)
+
+ ial.Run(self.op.iallocator)
+
+ if not ial.success:
+ raise errors.OpPrereqError("Can't compute nodes using"
+ " iallocator '%s': %s" % (self.op.iallocator,
+ ial.info))
+ req_nodes = 1
+ if self.op.disk_template in constants.DTS_NET_MIRROR:
+ req_nodes += 1
+
+ if len(ial.nodes) != req_nodes:
+ raise errors.OpPrereqError("iallocator '%s' returned invalid number"
+ " of nodes (%s), required %s" %
+ (len(ial.nodes), req_nodes))
+ self.op.pnode = ial.nodes[0]
+ logger.ToStdout("Selected nodes for the instance: %s" %
+ (", ".join(ial.nodes),))
+ logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
+ (self.op.instance_name, self.op.iallocator, ial.nodes))
+ if req_nodes == 2:
+ self.op.snode = ial.nodes[1]
def BuildHooksEnv(self):
"""Build hooks env.
os_type=self.op.os_type,
memory=self.op.mem_size,
vcpus=self.op.vcpus,
- nics=[(self.inst_ip, self.op.bridge)],
+ nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
))
nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
"""Check prerequisites.
"""
+ # set optional parameters to none if they don't exist
+ for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
+ "iallocator"]:
+ if not hasattr(self.op, attr):
+ setattr(self.op, attr, None)
+
if self.op.mode not in (constants.INSTANCE_CREATE,
constants.INSTANCE_IMPORT):
raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
if getattr(self.op, "os_type", None) is None:
raise errors.OpPrereqError("No guest OS specified")
- # check primary node
- pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
- if pnode is None:
- raise errors.OpPrereqError("Primary node '%s' is unknown" %
- self.op.pnode)
- self.op.pnode = pnode.name
- self.pnode = pnode
- self.secondaries = []
+ #### instance parameters check
+
# disk template and mirror node verification
if self.op.disk_template not in constants.DISK_TEMPLATES:
raise errors.OpPrereqError("Invalid disk template name")
- if self.op.disk_template == constants.DT_REMOTE_RAID1:
- if getattr(self.op, "snode", None) is None:
- raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
- " a mirror node")
-
- snode_name = self.cfg.ExpandNodeName(self.op.snode)
- if snode_name is None:
- raise errors.OpPrereqError("Unknown secondary node '%s'" %
- self.op.snode)
- elif snode_name == pnode.name:
- raise errors.OpPrereqError("The secondary node cannot be"
- " the primary node.")
- self.secondaries.append(snode_name)
-
- # Check lv size requirements
- nodenames = [pnode.name] + self.secondaries
- nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
-
- # Required free disk space as a function of disk and swap space
- req_size_dict = {
- constants.DT_DISKLESS: 0,
- constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
- constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
- # 256 MB are added for drbd metadata, 128MB for each drbd device
- constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
- }
-
- if self.op.disk_template not in req_size_dict:
- raise errors.ProgrammerError("Disk template '%s' size requirement"
- " is unknown" % self.op.disk_template)
-
- req_size = req_size_dict[self.op.disk_template]
-
- for node in nodenames:
- info = nodeinfo.get(node, None)
- if not info:
- raise errors.OpPrereqError("Cannot get current information"
- " from node '%s'" % nodeinfo)
- if req_size > info['vg_free']:
- raise errors.OpPrereqError("Not enough disk space on target node %s."
- " %d MB available, %d MB required" %
- (node, info['vg_free'], req_size))
-
- # os verification
- os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
- if not isinstance(os_obj, objects.OS):
- raise errors.OpPrereqError("OS '%s' not in supported os list for"
- " primary node" % self.op.os_type)
-
- # instance verification
- hostname1 = utils.LookupHostname(self.op.instance_name)
- if not hostname1:
- raise errors.OpPrereqError("Instance name '%s' not found in dns" %
- self.op.instance_name)
+ # instance name verification
+ hostname1 = utils.HostInfo(self.op.instance_name)
self.op.instance_name = instance_name = hostname1.name
instance_list = self.cfg.GetInstanceList()
raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
instance_name)
+ # ip validity checks
ip = getattr(self.op, "ip", None)
if ip is None or ip.lower() == "none":
inst_ip = None
raise errors.OpPrereqError("given IP address '%s' doesn't look"
" like a valid IP" % ip)
inst_ip = ip
- self.inst_ip = inst_ip
+ self.inst_ip = self.op.ip = inst_ip
+
+ if self.op.start and not self.op.ip_check:
+ raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
+ " adding an instance in start mode")
- command = ["fping", "-q", hostname1.ip]
- result = utils.RunCmd(command)
- if not result.failed:
- raise errors.OpPrereqError("IP %s of instance %s already in use" %
- (hostname1.ip, instance_name))
+ if self.op.ip_check:
+ if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
+ raise errors.OpPrereqError("IP %s of instance %s already in use" %
+ (hostname1.ip, instance_name))
+
+ # MAC address verification
+ if self.op.mac != "auto":
+ if not utils.IsValidMac(self.op.mac.lower()):
+ raise errors.OpPrereqError("invalid MAC address specified: %s" %
+ self.op.mac)
# bridge verification
bridge = getattr(self.op, "bridge", None)
else:
self.op.bridge = bridge
+ # boot order verification
+ if self.op.hvm_boot_order is not None:
+ if len(self.op.hvm_boot_order.strip("acdn")) != 0:
+ raise errors.OpPrereqError("invalid boot order specified,"
+ " must be one or more of [acdn]")
+ #### allocator run
+
+ if [self.op.iallocator, self.op.pnode].count(None) != 1:
+ raise errors.OpPrereqError("One and only one of iallocator and primary"
+ " node must be given")
+
+ if self.op.iallocator is not None:
+ self._RunAllocator()
+
+ #### node related checks
+
+ # check primary node
+ pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
+ if pnode is None:
+ raise errors.OpPrereqError("Primary node '%s' is unknown" %
+ self.op.pnode)
+ self.op.pnode = pnode.name
+ self.pnode = pnode
+ self.secondaries = []
+
+ # mirror node verification
+ if self.op.disk_template in constants.DTS_NET_MIRROR:
+ if getattr(self.op, "snode", None) is None:
+ raise errors.OpPrereqError("The networked disk templates need"
+ " a mirror node")
+
+ snode_name = self.cfg.ExpandNodeName(self.op.snode)
+ if snode_name is None:
+ raise errors.OpPrereqError("Unknown secondary node '%s'" %
+ self.op.snode)
+ elif snode_name == pnode.name:
+ raise errors.OpPrereqError("The secondary node cannot be"
+ " the primary node.")
+ self.secondaries.append(snode_name)
+
+ req_size = _ComputeDiskSize(self.op.disk_template,
+ self.op.disk_size, self.op.swap_size)
+
+ # Check lv size requirements
+ if req_size is not None:
+ nodenames = [pnode.name] + self.secondaries
+ nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+ for node in nodenames:
+ info = nodeinfo.get(node, None)
+ if not info:
+ raise errors.OpPrereqError("Cannot get current information"
+ " from node '%s'" % nodeinfo)
+ vg_free = info.get('vg_free', None)
+ if not isinstance(vg_free, int):
+ raise errors.OpPrereqError("Can't compute free disk space on"
+ " node %s" % node)
+ if req_size > info['vg_free']:
+ raise errors.OpPrereqError("Not enough disk space on target node %s."
+ " %d MB available, %d MB required" %
+ (node, info['vg_free'], req_size))
+
+ # os verification
+ os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
+ if not os_obj:
+ raise errors.OpPrereqError("OS '%s' not in supported os list for"
+ " primary node" % self.op.os_type)
+
+ if self.op.kernel_path == constants.VALUE_NONE:
+ raise errors.OpPrereqError("Can't set instance kernel to none")
+
+
+ # bridge check on primary node
if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
raise errors.OpPrereqError("target bridge '%s' does not exist on"
" destination node '%s'" %
instance = self.op.instance_name
pnode_name = self.pnode.name
- nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
+ if self.op.mac == "auto":
+ mac_address = self.cfg.GenerateMAC()
+ else:
+ mac_address = self.op.mac
+
+ nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
if self.inst_ip is not None:
nic.ip = self.inst_ip
+ ht_kind = self.sstore.GetHypervisorType()
+ if ht_kind in constants.HTS_REQ_PORT:
+ network_port = self.cfg.AllocatePort()
+ else:
+ network_port = None
+
disks = _GenerateDiskTemplate(self.cfg,
self.op.disk_template,
instance, pnode_name,
nics=[nic], disks=disks,
disk_template=self.op.disk_template,
status=self.instance_status,
+ network_port=network_port,
+ kernel_path=self.op.kernel_path,
+ initrd_path=self.op.initrd_path,
+ hvm_boot_order=self.op.hvm_boot_order,
)
feedback_fn("* creating instance disks...")
self.cfg.AddInstance(iobj)
if self.op.wait_for_sync:
- disk_abort = not _WaitForSync(self.cfg, iobj)
- elif iobj.disk_template == constants.DT_REMOTE_RAID1:
+ disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
+ elif iobj.disk_template in constants.DTS_NET_MIRROR:
# make sure the disks are not degraded (still sync-ing is ok)
time.sleep(15)
feedback_fn("* checking mirrors status")
- disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
+ disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
else:
disk_abort = False
logger.Debug("connecting to console of %s on %s" % (instance.name, node))
hyper = hypervisor.GetHypervisor()
- console_cmd = hyper.GetShellCommandForConsole(instance.name)
+ console_cmd = hyper.GetShellCommandForConsole(instance)
# build ssh cmdline
argv = ["ssh", "-q", "-t"]
argv.extend(ssh.KNOWN_HOSTS_OPTS)
raise errors.OpPrereqError("Can't find this device ('%s') in the"
" instance." % self.op.disk_name)
if len(disk.children) > 1:
- raise errors.OpPrereqError("The device already has two slave"
- " devices.\n"
- "This would create a 3-disk raid1"
- " which we don't allow.")
+ raise errors.OpPrereqError("The device already has two slave devices."
+ " This would create a 3-disk raid1 which we"
+ " don't allow.")
self.disk = disk
def Exec(self, feedback_fn):
logger.Info("adding new mirror component on secondary")
#HARDCODE
- if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
+ if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
+ new_drbd, False,
_GetInstanceInfoText(instance)):
raise errors.OpExecError("Failed to create new component on secondary"
" node %s" % remote_node)
logger.Info("adding new mirror component on primary")
#HARDCODE
- if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
+ if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
+ instance, new_drbd,
_GetInstanceInfoText(instance)):
# remove secondary dev
self.cfg.SetDiskID(new_drbd, remote_node)
# the device exists now
# call the primary node to add the mirror to md
logger.Info("adding new mirror component to md")
- if not rpc.call_blockdev_addchild(instance.primary_node,
- disk, new_drbd):
+ if not rpc.call_blockdev_addchildren(instance.primary_node,
+ disk, [new_drbd]):
logger.Error("Can't add mirror compoment to md!")
self.cfg.SetDiskID(new_drbd, remote_node)
if not rpc.call_blockdev_remove(remote_node, new_drbd):
self.cfg.AddInstance(instance)
- _WaitForSync(self.cfg, instance)
+ _WaitForSync(self.cfg, instance, self.proc)
return 0
raise errors.OpPrereqError("Can't find this device ('%s') in the"
" instance." % self.op.disk_name)
for child in disk.children:
- if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
+ if (child.dev_type == constants.LD_DRBD7 and
+ child.logical_id[2] == self.op.disk_id):
break
else:
raise errors.OpPrereqError("Can't find the device with this port.")
child = self.child
logger.Info("remove mirror component")
self.cfg.SetDiskID(disk, instance.primary_node)
- if not rpc.call_blockdev_removechild(instance.primary_node,
- disk, child):
+ if not rpc.call_blockdev_removechildren(instance.primary_node,
+ disk, [child]):
raise errors.OpExecError("Can't remove child from mirror.")
for node in child.logical_id[:2]:
"""
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = ["instance_name"]
+ _OP_REQP = ["instance_name", "mode", "disks"]
def BuildHooksEnv(self):
"""Build hooks env.
"""
env = {
+ "MODE": self.op.mode,
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.instance.secondary_nodes[0],
}
env.update(_BuildInstanceHookEnvByObject(self.instance))
- nl = [self.sstore.GetMasterNode(),
- self.instance.primary_node] + list(self.instance.secondary_nodes)
+ nl = [
+ self.sstore.GetMasterNode(),
+ self.instance.primary_node,
+ ]
+ if self.op.remote_node is not None:
+ nl.append(self.op.remote_node)
return env, nl, nl
def CheckPrereq(self):
raise errors.OpPrereqError("Instance '%s' not known" %
self.op.instance_name)
self.instance = instance
+ self.op.instance_name = instance.name
- if instance.disk_template != constants.DT_REMOTE_RAID1:
+ if instance.disk_template not in constants.DTS_NET_MIRROR:
raise errors.OpPrereqError("Instance's disk layout is not"
- " remote_raid1.")
+ " network mirrored.")
if len(instance.secondary_nodes) != 1:
raise errors.OpPrereqError("The instance has a strange layout,"
" expected one secondary but found %d" %
len(instance.secondary_nodes))
+ self.sec_node = instance.secondary_nodes[0]
+
remote_node = getattr(self.op, "remote_node", None)
- if remote_node is None:
- remote_node = instance.secondary_nodes[0]
- else:
+ if remote_node is not None:
remote_node = self.cfg.ExpandNodeName(remote_node)
if remote_node is None:
raise errors.OpPrereqError("Node '%s' not known" %
self.op.remote_node)
+ self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
+ else:
+ self.remote_node_info = None
if remote_node == instance.primary_node:
raise errors.OpPrereqError("The specified node is the primary node of"
" the instance.")
+ elif remote_node == self.sec_node:
+ if self.op.mode == constants.REPLACE_DISK_SEC:
+ # this is for DRBD8, where we can't execute the same mode of
+ # replacement as for drbd7 (no different port allocated)
+ raise errors.OpPrereqError("Same secondary given, cannot execute"
+ " replacement")
+ # the user gave the current secondary, switch to
+ # 'no-replace-secondary' mode for drbd7
+ remote_node = None
+ if (instance.disk_template == constants.DT_REMOTE_RAID1 and
+ self.op.mode != constants.REPLACE_DISK_ALL):
+ raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
+ " disks replacement, not individual ones")
+ if instance.disk_template == constants.DT_DRBD8:
+ if (self.op.mode == constants.REPLACE_DISK_ALL and
+ remote_node is not None):
+ # switch to replace secondary mode
+ self.op.mode = constants.REPLACE_DISK_SEC
+
+ if self.op.mode == constants.REPLACE_DISK_ALL:
+ raise errors.OpPrereqError("Template 'drbd' only allows primary or"
+ " secondary disk replacement, not"
+ " both at once")
+ elif self.op.mode == constants.REPLACE_DISK_PRI:
+ if remote_node is not None:
+ raise errors.OpPrereqError("Template 'drbd' does not allow changing"
+ " the secondary while doing a primary"
+ " node disk replacement")
+ self.tgt_node = instance.primary_node
+ self.oth_node = instance.secondary_nodes[0]
+ elif self.op.mode == constants.REPLACE_DISK_SEC:
+ self.new_node = remote_node # this can be None, in which case
+ # we don't change the secondary
+ self.tgt_node = instance.secondary_nodes[0]
+ self.oth_node = instance.primary_node
+ else:
+ raise errors.ProgrammerError("Unhandled disk replace mode")
+
+ for name in self.op.disks:
+ if instance.FindDisk(name) is None:
+ raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
+ (name, instance.name))
self.op.remote_node = remote_node
- def Exec(self, feedback_fn):
+ def _ExecRR1(self, feedback_fn):
"""Replace the disks of an instance.
"""
instance = self.instance
iv_names = {}
# start of work
- remote_node = self.op.remote_node
+ if self.op.remote_node is None:
+ remote_node = self.sec_node
+ else:
+ remote_node = self.op.remote_node
cfg = self.cfg
for dev in instance.disks:
size = dev.size
logger.Info("adding new mirror component on secondary for %s" %
dev.iv_name)
#HARDCODE
- if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
+ if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
+ new_drbd, False,
_GetInstanceInfoText(instance)):
- raise errors.OpExecError("Failed to create new component on"
- " secondary node %s\n"
- "Full abort, cleanup manually!" %
+ raise errors.OpExecError("Failed to create new component on secondary"
+ " node %s. Full abort, cleanup manually!" %
remote_node)
logger.Info("adding new mirror component on primary")
#HARDCODE
- if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
+ if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
+ instance, new_drbd,
_GetInstanceInfoText(instance)):
# remove secondary dev
cfg.SetDiskID(new_drbd, remote_node)
rpc.call_blockdev_remove(remote_node, new_drbd)
- raise errors.OpExecError("Failed to create volume on primary!\n"
- "Full abort, cleanup manually!!")
+ raise errors.OpExecError("Failed to create volume on primary!"
+ " Full abort, cleanup manually!!")
# the device exists now
# call the primary node to add the mirror to md
logger.Info("adding new mirror component to md")
- if not rpc.call_blockdev_addchild(instance.primary_node, dev,
- new_drbd):
+ if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
+ [new_drbd]):
logger.Error("Can't add mirror compoment to md!")
cfg.SetDiskID(new_drbd, remote_node)
if not rpc.call_blockdev_remove(remote_node, new_drbd):
# this can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its
# return value
- _WaitForSync(cfg, instance, unlock=True)
+ _WaitForSync(cfg, instance, self.proc, unlock=True)
# so check manually all the devices
for name in iv_names:
dev, child, new_drbd = iv_names[name]
logger.Info("remove mirror %s component" % name)
cfg.SetDiskID(dev, instance.primary_node)
- if not rpc.call_blockdev_removechild(instance.primary_node,
- dev, child):
+ if not rpc.call_blockdev_removechildren(instance.primary_node,
+ dev, [child]):
logger.Error("Can't remove child from mirror, aborting"
" *this device cleanup*.\nYou need to cleanup manually!!")
continue
cfg.AddInstance(instance)
+ def _ExecD8DiskOnly(self, feedback_fn):
+ """Replace a disk on the primary or secondary for dbrd8.
+
+ The algorithm for replace is quite complicated:
+ - for each disk to be replaced:
+ - create new LVs on the target node with unique names
+ - detach old LVs from the drbd device
+ - rename old LVs to name_replaced.<time_t>
+ - rename new LVs to old LVs
+ - attach the new LVs (with the old names now) to the drbd device
+ - wait for sync across all devices
+ - for each modified disk:
+ - remove old LVs (which have the name name_replaces.<time_t>)
+
+ Failures are not very well handled.
+
+ """
+ steps_total = 6
+ warning, info = (self.proc.LogWarning, self.proc.LogInfo)
+ instance = self.instance
+ iv_names = {}
+ vgname = self.cfg.GetVGName()
+ # start of work
+ cfg = self.cfg
+ tgt_node = self.tgt_node
+ oth_node = self.oth_node
+
+ # Step: check device activation
+ self.proc.LogStep(1, steps_total, "check device existence")
+ info("checking volume groups")
+ my_vg = cfg.GetVGName()
+ results = rpc.call_vg_list([oth_node, tgt_node])
+ if not results:
+ raise errors.OpExecError("Can't list volume groups on the nodes")
+ for node in oth_node, tgt_node:
+ res = results.get(node, False)
+ if not res or my_vg not in res:
+ raise errors.OpExecError("Volume group '%s' not found on %s" %
+ (my_vg, node))
+ for dev in instance.disks:
+ if not dev.iv_name in self.op.disks:
+ continue
+ for node in tgt_node, oth_node:
+ info("checking %s on %s" % (dev.iv_name, node))
+ cfg.SetDiskID(dev, node)
+ if not rpc.call_blockdev_find(node, dev):
+ raise errors.OpExecError("Can't find device %s on node %s" %
+ (dev.iv_name, node))
+
+ # Step: check other node consistency
+ self.proc.LogStep(2, steps_total, "check peer consistency")
+ for dev in instance.disks:
+ if not dev.iv_name in self.op.disks:
+ continue
+ info("checking %s consistency on %s" % (dev.iv_name, oth_node))
+ if not _CheckDiskConsistency(self.cfg, dev, oth_node,
+ oth_node==instance.primary_node):
+ raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
+ " to replace disks on this node (%s)" %
+ (oth_node, tgt_node))
+
+ # Step: create new storage
+ self.proc.LogStep(3, steps_total, "allocate new storage")
+ for dev in instance.disks:
+ if not dev.iv_name in self.op.disks:
+ continue
+ size = dev.size
+ cfg.SetDiskID(dev, tgt_node)
+ lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
+ names = _GenerateUniqueNames(cfg, lv_names)
+ lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+ logical_id=(vgname, names[0]))
+ lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
+ logical_id=(vgname, names[1]))
+ new_lvs = [lv_data, lv_meta]
+ old_lvs = dev.children
+ iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
+ info("creating new local storage on %s for %s" %
+ (tgt_node, dev.iv_name))
+ # since we *always* want to create this LV, we use the
+ # _Create...OnPrimary (which forces the creation), even if we
+ # are talking about the secondary node
+ for new_lv in new_lvs:
+ if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
+ _GetInstanceInfoText(instance)):
+ raise errors.OpExecError("Failed to create new LV named '%s' on"
+ " node '%s'" %
+ (new_lv.logical_id[1], tgt_node))
+
+ # Step: for each lv, detach+rename*2+attach
+ self.proc.LogStep(4, steps_total, "change drbd configuration")
+ for dev, old_lvs, new_lvs in iv_names.itervalues():
+ info("detaching %s drbd from local storage" % dev.iv_name)
+ if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
+ raise errors.OpExecError("Can't detach drbd from local storage on node"
+ " %s for device %s" % (tgt_node, dev.iv_name))
+ #dev.children = []
+ #cfg.Update(instance)
+
+ # ok, we created the new LVs, so now we know we have the needed
+ # storage; as such, we proceed on the target node to rename
+ # old_lv to _old, and new_lv to old_lv; note that we rename LVs
+ # using the assumption that logical_id == physical_id (which in
+ # turn is the unique_id on that node)
+
+ # FIXME(iustin): use a better name for the replaced LVs
+ temp_suffix = int(time.time())
+ ren_fn = lambda d, suff: (d.physical_id[0],
+ d.physical_id[1] + "_replaced-%s" % suff)
+ # build the rename list based on what LVs exist on the node
+ rlist = []
+ for to_ren in old_lvs:
+ find_res = rpc.call_blockdev_find(tgt_node, to_ren)
+ if find_res is not None: # device exists
+ rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
+
+ info("renaming the old LVs on the target node")
+ if not rpc.call_blockdev_rename(tgt_node, rlist):
+ raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
+ # now we rename the new LVs to the old LVs
+ info("renaming the new LVs on the target node")
+ rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
+ if not rpc.call_blockdev_rename(tgt_node, rlist):
+ raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
+
+ for old, new in zip(old_lvs, new_lvs):
+ new.logical_id = old.logical_id
+ cfg.SetDiskID(new, tgt_node)
+
+ for disk in old_lvs:
+ disk.logical_id = ren_fn(disk, temp_suffix)
+ cfg.SetDiskID(disk, tgt_node)
+
+ # now that the new lvs have the old name, we can add them to the device
+ info("adding new mirror component on %s" % tgt_node)
+ if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
+ for new_lv in new_lvs:
+ if not rpc.call_blockdev_remove(tgt_node, new_lv):
+ warning("Can't rollback device %s", hint="manually cleanup unused"
+ " logical volumes")
+ raise errors.OpExecError("Can't add local storage to drbd")
+
+ dev.children = new_lvs
+ cfg.Update(instance)
+
+ # Step: wait for sync
+
+ # this can fail as the old devices are degraded and _WaitForSync
+ # does a combined result over all disks, so we don't check its
+ # return value
+ self.proc.LogStep(5, steps_total, "sync devices")
+ _WaitForSync(cfg, instance, self.proc, unlock=True)
+
+ # so check manually all the devices
+ for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+ cfg.SetDiskID(dev, instance.primary_node)
+ is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
+ if is_degr:
+ raise errors.OpExecError("DRBD device %s is degraded!" % name)
+
+ # Step: remove old storage
+ self.proc.LogStep(6, steps_total, "removing old storage")
+ for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+ info("remove logical volumes for %s" % name)
+ for lv in old_lvs:
+ cfg.SetDiskID(lv, tgt_node)
+ if not rpc.call_blockdev_remove(tgt_node, lv):
+ warning("Can't remove old LV", hint="manually remove unused LVs")
+ continue
+
+ def _ExecD8Secondary(self, feedback_fn):
+ """Replace the secondary node for drbd8.
+
+ The algorithm for replace is quite complicated:
+ - for all disks of the instance:
+ - create new LVs on the new node with same names
+ - shutdown the drbd device on the old secondary
+ - disconnect the drbd network on the primary
+ - create the drbd device on the new secondary
+ - network attach the drbd on the primary, using an artifice:
+ the drbd code for Attach() will connect to the network if it
+ finds a device which is connected to the good local disks but
+ not network enabled
+ - wait for sync across all devices
+ - remove all disks from the old secondary
+
+ Failures are not very well handled.
+
+ """
+ steps_total = 6
+ warning, info = (self.proc.LogWarning, self.proc.LogInfo)
+ instance = self.instance
+ iv_names = {}
+ vgname = self.cfg.GetVGName()
+ # start of work
+ cfg = self.cfg
+ old_node = self.tgt_node
+ new_node = self.new_node
+ pri_node = instance.primary_node
+
+ # Step: check device activation
+ self.proc.LogStep(1, steps_total, "check device existence")
+ info("checking volume groups")
+ my_vg = cfg.GetVGName()
+ results = rpc.call_vg_list([pri_node, new_node])
+ if not results:
+ raise errors.OpExecError("Can't list volume groups on the nodes")
+ for node in pri_node, new_node:
+ res = results.get(node, False)
+ if not res or my_vg not in res:
+ raise errors.OpExecError("Volume group '%s' not found on %s" %
+ (my_vg, node))
+ for dev in instance.disks:
+ if not dev.iv_name in self.op.disks:
+ continue
+ info("checking %s on %s" % (dev.iv_name, pri_node))
+ cfg.SetDiskID(dev, pri_node)
+ if not rpc.call_blockdev_find(pri_node, dev):
+ raise errors.OpExecError("Can't find device %s on node %s" %
+ (dev.iv_name, pri_node))
+
+ # Step: check other node consistency
+ self.proc.LogStep(2, steps_total, "check peer consistency")
+ for dev in instance.disks:
+ if not dev.iv_name in self.op.disks:
+ continue
+ info("checking %s consistency on %s" % (dev.iv_name, pri_node))
+ if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
+ raise errors.OpExecError("Primary node (%s) has degraded storage,"
+ " unsafe to replace the secondary" %
+ pri_node)
+
+ # Step: create new storage
+ self.proc.LogStep(3, steps_total, "allocate new storage")
+ for dev in instance.disks:
+ size = dev.size
+ info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
+ # since we *always* want to create this LV, we use the
+ # _Create...OnPrimary (which forces the creation), even if we
+ # are talking about the secondary node
+ for new_lv in dev.children:
+ if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
+ _GetInstanceInfoText(instance)):
+ raise errors.OpExecError("Failed to create new LV named '%s' on"
+ " node '%s'" %
+ (new_lv.logical_id[1], new_node))
+
+ iv_names[dev.iv_name] = (dev, dev.children)
+
+ self.proc.LogStep(4, steps_total, "changing drbd configuration")
+ for dev in instance.disks:
+ size = dev.size
+ info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
+ # create new devices on new_node
+ new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
+ logical_id=(pri_node, new_node,
+ dev.logical_id[2]),
+ children=dev.children)
+ if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
+ new_drbd, False,
+ _GetInstanceInfoText(instance)):
+ raise errors.OpExecError("Failed to create new DRBD on"
+ " node '%s'" % new_node)
+
+ for dev in instance.disks:
+ # we have new devices, shutdown the drbd on the old secondary
+ info("shutting down drbd for %s on old node" % dev.iv_name)
+ cfg.SetDiskID(dev, old_node)
+ if not rpc.call_blockdev_shutdown(old_node, dev):
+ warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
+ hint="Please cleanup this device manually as soon as possible")
+
+ info("detaching primary drbds from the network (=> standalone)")
+ done = 0
+ for dev in instance.disks:
+ cfg.SetDiskID(dev, pri_node)
+ # set the physical (unique in bdev terms) id to None, meaning
+ # detach from network
+ dev.physical_id = (None,) * len(dev.physical_id)
+ # and 'find' the device, which will 'fix' it to match the
+ # standalone state
+ if rpc.call_blockdev_find(pri_node, dev):
+ done += 1
+ else:
+ warning("Failed to detach drbd %s from network, unusual case" %
+ dev.iv_name)
+
+ if not done:
+ # no detaches succeeded (very unlikely)
+ raise errors.OpExecError("Can't detach at least one DRBD from old node")
+
+ # if we managed to detach at least one, we update all the disks of
+ # the instance to point to the new secondary
+ info("updating instance configuration")
+ for dev in instance.disks:
+ dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
+ cfg.SetDiskID(dev, pri_node)
+ cfg.Update(instance)
+
+ # and now perform the drbd attach
+ info("attaching primary drbds to new secondary (standalone => connected)")
+ failures = []
+ for dev in instance.disks:
+ info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
+ # since the attach is smart, it's enough to 'find' the device,
+ # it will automatically activate the network, if the physical_id
+ # is correct
+ cfg.SetDiskID(dev, pri_node)
+ if not rpc.call_blockdev_find(pri_node, dev):
+ warning("can't attach drbd %s to new secondary!" % dev.iv_name,
+ "please do a gnt-instance info to see the status of disks")
+
+ # this can fail as the old devices are degraded and _WaitForSync
+ # does a combined result over all disks, so we don't check its
+ # return value
+ self.proc.LogStep(5, steps_total, "sync devices")
+ _WaitForSync(cfg, instance, self.proc, unlock=True)
+
+ # so check manually all the devices
+ for name, (dev, old_lvs) in iv_names.iteritems():
+ cfg.SetDiskID(dev, pri_node)
+ is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
+ if is_degr:
+ raise errors.OpExecError("DRBD device %s is degraded!" % name)
+
+ self.proc.LogStep(6, steps_total, "removing old storage")
+ for name, (dev, old_lvs) in iv_names.iteritems():
+ info("remove logical volumes for %s" % name)
+ for lv in old_lvs:
+ cfg.SetDiskID(lv, old_node)
+ if not rpc.call_blockdev_remove(old_node, lv):
+ warning("Can't remove LV on old secondary",
+ hint="Cleanup stale volumes by hand")
+
+ def Exec(self, feedback_fn):
+ """Execute disk replacement.
+
+ This dispatches the disk replacement to the appropriate handler.
+
+ """
+ instance = self.instance
+ if instance.disk_template == constants.DT_REMOTE_RAID1:
+ fn = self._ExecRR1
+ elif instance.disk_template == constants.DT_DRBD8:
+ if self.op.remote_node is None:
+ fn = self._ExecD8DiskOnly
+ else:
+ fn = self._ExecD8Secondary
+ else:
+ raise errors.ProgrammerError("Unhandled disk replacement case")
+ return fn(feedback_fn)
+
class LUQueryInstanceData(NoHooksLU):
"""Query runtime instance data.
instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
if instance is None:
raise errors.OpPrereqError("No such instance name '%s'" % name)
- self.wanted_instances.append(instance)
+ self.wanted_instances.append(instance)
else:
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
in self.cfg.GetInstanceList()]
"""
self.cfg.SetDiskID(dev, instance.primary_node)
dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
- if dev.dev_type == "drbd":
+ if dev.dev_type in constants.LDS_DRBD:
# we change the snode then (otherwise we use the one passed in)
if dev.logical_id[0] == instance.primary_node:
snode = dev.logical_id[1]
"memory": instance.memory,
"nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
"disks": disks,
+ "network_port": instance.network_port,
+ "vcpus": instance.vcpus,
+ "kernel_path": instance.kernel_path,
+ "initrd_path": instance.initrd_path,
+ "hvm_boot_order": instance.hvm_boot_order,
}
result[instance.name] = idict
args['memory'] = self.mem
if self.vcpus:
args['vcpus'] = self.vcpus
- if self.do_ip or self.do_bridge:
+ if self.do_ip or self.do_bridge or self.mac:
if self.do_ip:
ip = self.ip
else:
bridge = self.bridge
else:
bridge = self.instance.nics[0].bridge
- args['nics'] = [(ip, bridge)]
+ if self.mac:
+ mac = self.mac
+ else:
+ mac = self.instance.nics[0].mac
+ args['nics'] = [(ip, bridge, mac)]
env = _BuildInstanceHookEnvByObject(self.instance, override=args)
nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
self.mem = getattr(self.op, "mem", None)
self.vcpus = getattr(self.op, "vcpus", None)
self.ip = getattr(self.op, "ip", None)
+ self.mac = getattr(self.op, "mac", None)
self.bridge = getattr(self.op, "bridge", None)
- if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
+ self.kernel_path = getattr(self.op, "kernel_path", None)
+ self.initrd_path = getattr(self.op, "initrd_path", None)
+ self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
+ all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
+ self.kernel_path, self.initrd_path, self.hvm_boot_order]
+ if all_parms.count(None) == len(all_parms):
raise errors.OpPrereqError("No changes submitted")
if self.mem is not None:
try:
else:
self.do_ip = False
self.do_bridge = (self.bridge is not None)
+ if self.mac is not None:
+ if self.cfg.IsMacInUse(self.mac):
+ raise errors.OpPrereqError('MAC address %s already in use in cluster' %
+ self.mac)
+ if not utils.IsValidMac(self.mac):
+ raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
+
+ if self.kernel_path is not None:
+ self.do_kernel_path = True
+ if self.kernel_path == constants.VALUE_NONE:
+ raise errors.OpPrereqError("Can't set instance to no kernel")
+
+ if self.kernel_path != constants.VALUE_DEFAULT:
+ if not os.path.isabs(self.kernel_path):
+ raise errors.OpPrereqError("The kernel path must be an absolute"
+ " filename")
+ else:
+ self.do_kernel_path = False
+
+ if self.initrd_path is not None:
+ self.do_initrd_path = True
+ if self.initrd_path not in (constants.VALUE_NONE,
+ constants.VALUE_DEFAULT):
+ if not os.path.isabs(self.initrd_path):
+ raise errors.OpPrereqError("The initrd path must be an absolute"
+ " filename")
+ else:
+ self.do_initrd_path = False
+
+ # boot order verification
+ if self.hvm_boot_order is not None:
+ if self.hvm_boot_order != constants.VALUE_DEFAULT:
+ if len(self.hvm_boot_order.strip("acdn")) != 0:
+ raise errors.OpPrereqError("invalid boot order specified,"
+ " must be one or more of [acdn]"
+ " or 'default'")
instance = self.cfg.GetInstanceInfo(
self.cfg.ExpandInstanceName(self.op.instance_name))
if self.bridge:
instance.nics[0].bridge = self.bridge
result.append(("bridge", self.bridge))
+ if self.mac:
+ instance.nics[0].mac = self.mac
+ result.append(("mac", self.mac))
+ if self.do_kernel_path:
+ instance.kernel_path = self.kernel_path
+ result.append(("kernel_path", self.kernel_path))
+ if self.do_initrd_path:
+ instance.initrd_path = self.initrd_path
+ result.append(("initrd_path", self.initrd_path))
+ if self.hvm_boot_order:
+ if self.hvm_boot_order == constants.VALUE_DEFAULT:
+ instance.hvm_boot_order = None
+ else:
+ instance.hvm_boot_order = self.hvm_boot_order
+ result.append(("hvm_boot_order", self.hvm_boot_order))
self.cfg.AddInstance(instance)
instance = self.instance
dst_node = self.dst_node
src_node = instance.primary_node
- # shutdown the instance, unless requested not to do so
if self.op.shutdown:
- op = opcodes.OpShutdownInstance(instance_name=instance.name)
- self.processor.ChainOpCode(op, feedback_fn)
+ # shutdown the instance, but not the disks
+ if not rpc.call_instance_shutdown(src_node, instance):
+ raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+ (instance.name, source_node))
vgname = self.cfg.GetVGName()
logger.Error("could not snapshot block device %s on node %s" %
(disk.logical_id[1], src_node))
else:
- new_dev = objects.Disk(dev_type="lvm", size=disk.size,
+ new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
logical_id=(vgname, new_dev_name),
physical_id=(vgname, new_dev_name),
iv_name=disk.iv_name)
snap_disks.append(new_dev)
finally:
- if self.op.shutdown:
- op = opcodes.OpStartupInstance(instance_name=instance.name,
- force=False)
- self.processor.ChainOpCode(op, feedback_fn)
+ if self.op.shutdown and instance.status == "up":
+ if not rpc.call_instance_start(src_node, instance, None):
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Could not start instance")
# TODO: check for size
# substitutes an empty list with the full cluster node list.
if nodelist:
op = opcodes.OpQueryExports(nodes=nodelist)
- exportlist = self.processor.ChainOpCode(op, feedback_fn)
+ exportlist = self.proc.ChainOpCode(op)
for node in exportlist:
if instance.name in exportlist[node]:
if not rpc.call_export_remove(node, instance.name):
self.op.name = name
self.target = self.cfg.GetNodeInfo(name)
elif self.op.kind == constants.TAG_INSTANCE:
- name = self.cfg.ExpandInstanceName(name)
+ name = self.cfg.ExpandInstanceName(self.op.name)
if name is None:
raise errors.OpPrereqError("Invalid instance name (%s)" %
(self.op.name,))
return self.target.GetTags()
-class LUAddTag(TagsLU):
+class LUSearchTags(NoHooksLU):
+ """Searches the tags for a given pattern.
+
+ """
+ _OP_REQP = ["pattern"]
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks the pattern passed for validity by compiling it.
+
+ """
+ try:
+ self.re = re.compile(self.op.pattern)
+ except re.error, err:
+ raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
+ (self.op.pattern, err))
+
+ def Exec(self, feedback_fn):
+ """Returns the tag list.
+
+ """
+ cfg = self.cfg
+ tgts = [("/cluster", cfg.GetClusterInfo())]
+ ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
+ tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
+ nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
+ tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
+ results = []
+ for path, target in tgts:
+ for tag in target.GetTags():
+ if self.re.search(tag):
+ results.append((path, tag))
+ return results
+
+
+class LUAddTags(TagsLU):
"""Sets a tag on a given object.
"""
- _OP_REQP = ["kind", "name", "tag"]
+ _OP_REQP = ["kind", "name", "tags"]
def CheckPrereq(self):
"""Check prerequisites.
"""
TagsLU.CheckPrereq(self)
- objects.TaggableObject.ValidateTag(self.op.tag)
+ for tag in self.op.tags:
+ objects.TaggableObject.ValidateTag(tag)
def Exec(self, feedback_fn):
"""Sets the tag.
"""
try:
- self.target.AddTag(self.op.tag)
+ for tag in self.op.tags:
+ self.target.AddTag(tag)
except errors.TagError, err:
raise errors.OpExecError("Error while setting tag: %s" % str(err))
try:
" aborted. Please retry.")
-class LUDelTag(TagsLU):
- """Delete a tag from a given object.
+class LUDelTags(TagsLU):
+ """Delete a list of tags from a given object.
"""
- _OP_REQP = ["kind", "name", "tag"]
+ _OP_REQP = ["kind", "name", "tags"]
def CheckPrereq(self):
"""Check prerequisites.
"""
TagsLU.CheckPrereq(self)
- objects.TaggableObject.ValidateTag(self.op.tag)
- if self.op.tag not in self.target.GetTags():
- raise errors.OpPrereqError("Tag not found")
+ for tag in self.op.tags:
+ objects.TaggableObject.ValidateTag(tag)
+ del_tags = frozenset(self.op.tags)
+ cur_tags = self.target.GetTags()
+ if not del_tags <= cur_tags:
+ diff_tags = del_tags - cur_tags
+ diff_names = ["'%s'" % tag for tag in diff_tags]
+ diff_names.sort()
+ raise errors.OpPrereqError("Tag(s) %s not found" %
+ (",".join(diff_names)))
def Exec(self, feedback_fn):
"""Remove the tag from the object.
"""
- self.target.RemoveTag(self.op.tag)
+ for tag in self.op.tags:
+ self.target.RemoveTag(tag)
try:
self.cfg.Update(self.target)
except errors.ConfigurationError:
raise errors.OpRetryError("There has been a modification to the"
" config file and the operation has been"
" aborted. Please retry.")
+
+class LUTestDelay(NoHooksLU):
+ """Sleep for a specified amount of time.
+
+ This LU sleeps on the master and/or nodes for a specified amoutn of
+ time.
+
+ """
+ _OP_REQP = ["duration", "on_master", "on_nodes"]
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that we have a good list of nodes and/or the duration
+ is valid.
+
+ """
+
+ if self.op.on_nodes:
+ self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
+
+ def Exec(self, feedback_fn):
+ """Do the actual sleep.
+
+ """
+ if self.op.on_master:
+ if not utils.TestDelay(self.op.duration):
+ raise errors.OpExecError("Error during master delay test")
+ if self.op.on_nodes:
+ result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
+ if not result:
+ raise errors.OpExecError("Complete failure from rpc call")
+ for node, node_result in result.items():
+ if not node_result:
+ raise errors.OpExecError("Failure during rpc call to node %s,"
+ " result: %s" % (node, node_result))
+
+
+class IAllocator(object):
+ """IAllocator framework.
+
+ An IAllocator instance has three sets of attributes:
+ - cfg/sstore that are needed to query the cluster
+ - input data (all members of the _KEYS class attribute are required)
+ - four buffer attributes (in|out_data|text), that represent the
+ input (to the external script) in text and data structure format,
+ and the output from it, again in two formats
+ - the result variables from the script (success, info, nodes) for
+ easy usage
+
+ """
+ _KEYS = [
+ "mode", "name",
+ "mem_size", "disks", "disk_template",
+ "os", "tags", "nics", "vcpus",
+ ]
+
+ def __init__(self, cfg, sstore, **kwargs):
+ self.cfg = cfg
+ self.sstore = sstore
+ # init buffer variables
+ self.in_text = self.out_text = self.in_data = self.out_data = None
+ # init all input fields so that pylint is happy
+ self.mode = self.name = None
+ self.mem_size = self.disks = self.disk_template = None
+ self.os = self.tags = self.nics = self.vcpus = None
+ # init result fields
+ self.success = self.info = self.nodes = None
+ for key in kwargs:
+ if key not in self._KEYS:
+ raise errors.ProgrammerError("Invalid input parameter '%s' to"
+ " IAllocator" % key)
+ setattr(self, key, kwargs[key])
+ for key in self._KEYS:
+ if key not in kwargs:
+ raise errors.ProgrammerError("Missing input parameter '%s' to"
+ " IAllocator" % key)
+ self._BuildInputData()
+
+ def _ComputeClusterData(self):
+ """Compute the generic allocator input data.
+
+ This is the data that is independent of the actual operation.
+
+ """
+ cfg = self.cfg
+ # cluster data
+ data = {
+ "version": 1,
+ "cluster_name": self.sstore.GetClusterName(),
+ "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
+ # we don't have job IDs
+ }
+
+ # node data
+ node_results = {}
+ node_list = cfg.GetNodeList()
+ node_data = rpc.call_node_info(node_list, cfg.GetVGName())
+ for nname in node_list:
+ ninfo = cfg.GetNodeInfo(nname)
+ if nname not in node_data or not isinstance(node_data[nname], dict):
+ raise errors.OpExecError("Can't get data for node %s" % nname)
+ remote_info = node_data[nname]
+ for attr in ['memory_total', 'memory_free',
+ 'vg_size', 'vg_free']:
+ if attr not in remote_info:
+ raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
+ (nname, attr))
+ try:
+ int(remote_info[attr])
+ except ValueError, err:
+ raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
+ " %s" % (nname, attr, str(err)))
+ pnr = {
+ "tags": list(ninfo.GetTags()),
+ "total_memory": utils.TryConvert(int, remote_info['memory_total']),
+ "free_memory": utils.TryConvert(int, remote_info['memory_free']),
+ "total_disk": utils.TryConvert(int, remote_info['vg_size']),
+ "free_disk": utils.TryConvert(int, remote_info['vg_free']),
+ "primary_ip": ninfo.primary_ip,
+ "secondary_ip": ninfo.secondary_ip,
+ }
+ node_results[nname] = pnr
+ data["nodes"] = node_results
+
+ # instance data
+ instance_data = {}
+ i_list = cfg.GetInstanceList()
+ for iname in i_list:
+ iinfo = cfg.GetInstanceInfo(iname)
+ nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
+ for n in iinfo.nics]
+ pir = {
+ "tags": list(iinfo.GetTags()),
+ "should_run": iinfo.status == "up",
+ "vcpus": iinfo.vcpus,
+ "memory": iinfo.memory,
+ "os": iinfo.os,
+ "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
+ "nics": nic_data,
+ "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
+ "disk_template": iinfo.disk_template,
+ }
+ instance_data[iname] = pir
+
+ data["instances"] = instance_data
+
+ self.in_data = data
+
+ def _AddNewInstance(self):
+ """Add new instance data to allocator structure.
+
+ This in combination with _AllocatorGetClusterData will create the
+ correct structure needed as input for the allocator.
+
+ The checks for the completeness of the opcode must have already been
+ done.
+
+ """
+ data = self.in_data
+ if len(self.disks) != 2:
+ raise errors.OpExecError("Only two-disk configurations supported")
+
+ disk_space = _ComputeDiskSize(self.disk_template,
+ self.disks[0]["size"], self.disks[1]["size"])
+
+ request = {
+ "type": "allocate",
+ "name": self.name,
+ "disk_template": self.disk_template,
+ "tags": self.tags,
+ "os": self.os,
+ "vcpus": self.vcpus,
+ "memory": self.mem_size,
+ "disks": self.disks,
+ "disk_space_total": disk_space,
+ "nics": self.nics,
+ }
+ data["request"] = request
+
+ def _AddRelocateInstance(self):
+ """Add relocate instance data to allocator structure.
+
+ This in combination with _IAllocatorGetClusterData will create the
+ correct structure needed as input for the allocator.
+
+ The checks for the completeness of the opcode must have already been
+ done.
+
+ """
+ data = self.in_data
+ request = {
+ "type": "replace_secondary",
+ "name": self.name,
+ }
+ data["request"] = request
+
+ def _BuildInputData(self):
+ """Build input data structures.
+
+ """
+ self._ComputeClusterData()
+
+ if self.mode == constants.IALLOCATOR_MODE_ALLOC:
+ self._AddNewInstance()
+ else:
+ self._AddRelocateInstance()
+
+ self.in_text = serializer.Dump(self.in_data)
+
+ def Run(self, name, validate=True):
+ """Run an instance allocator and return the results.
+
+ """
+ data = self.in_text
+
+ alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
+ os.path.isfile)
+ if alloc_script is None:
+ raise errors.OpExecError("Can't find allocator '%s'" % name)
+
+ fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
+ try:
+ os.write(fd, data)
+ os.close(fd)
+ result = utils.RunCmd([alloc_script, fin_name])
+ if result.failed:
+ raise errors.OpExecError("Instance allocator call failed: %s,"
+ " output: %s" %
+ (result.fail_reason, result.stdout))
+ finally:
+ os.unlink(fin_name)
+ self.out_text = result.stdout
+ if validate:
+ self._ValidateResult()
+
+ def _ValidateResult(self):
+ """Process the allocator results.
+
+ This will process and if successful save the result in
+ self.out_data and the other parameters.
+
+ """
+ try:
+ rdict = serializer.Load(self.out_text)
+ except Exception, err:
+ raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
+
+ if not isinstance(rdict, dict):
+ raise errors.OpExecError("Can't parse iallocator results: not a dict")
+
+ for key in "success", "info", "nodes":
+ if key not in rdict:
+ raise errors.OpExecError("Can't parse iallocator results:"
+ " missing key '%s'" % key)
+ setattr(self, key, rdict[key])
+
+ if not isinstance(rdict["nodes"], list):
+ raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
+ " is not a list")
+ self.out_data = rdict
+
+
+class LUTestAllocator(NoHooksLU):
+ """Run allocator tests.
+
+ This LU runs the allocator tests
+
+ """
+ _OP_REQP = ["direction", "mode", "name"]
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks the opcode parameters depending on the director and mode test.
+
+ """
+ if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
+ for attr in ["name", "mem_size", "disks", "disk_template",
+ "os", "tags", "nics", "vcpus"]:
+ if not hasattr(self.op, attr):
+ raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
+ attr)
+ iname = self.cfg.ExpandInstanceName(self.op.name)
+ if iname is not None:
+ raise errors.OpPrereqError("Instance '%s' already in the cluster" %
+ iname)
+ if not isinstance(self.op.nics, list):
+ raise errors.OpPrereqError("Invalid parameter 'nics'")
+ for row in self.op.nics:
+ if (not isinstance(row, dict) or
+ "mac" not in row or
+ "ip" not in row or
+ "bridge" not in row):
+ raise errors.OpPrereqError("Invalid contents of the"
+ " 'nics' parameter")
+ if not isinstance(self.op.disks, list):
+ raise errors.OpPrereqError("Invalid parameter 'disks'")
+ if len(self.op.disks) != 2:
+ raise errors.OpPrereqError("Only two-disk configurations supported")
+ for row in self.op.disks:
+ if (not isinstance(row, dict) or
+ "size" not in row or
+ not isinstance(row["size"], int) or
+ "mode" not in row or
+ row["mode"] not in ['r', 'w']):
+ raise errors.OpPrereqError("Invalid contents of the"
+ " 'disks' parameter")
+ elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
+ if not hasattr(self.op, "name"):
+ raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
+ fname = self.cfg.ExpandInstanceName(self.op.name)
+ if fname is None:
+ raise errors.OpPrereqError("Instance '%s' not found for relocation" %
+ self.op.name)
+ self.op.name = fname
+ else:
+ raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
+ self.op.mode)
+
+ if self.op.direction == constants.IALLOCATOR_DIR_OUT:
+ if not hasattr(self.op, "allocator") or self.op.allocator is None:
+ raise errors.OpPrereqError("Missing allocator name")
+ elif self.op.direction != constants.IALLOCATOR_DIR_IN:
+ raise errors.OpPrereqError("Wrong allocator test '%s'" %
+ self.op.direction)
+
+ def Exec(self, feedback_fn):
+ """Run the allocator test.
+
+ """
+ ial = IAllocator(self.cfg, self.sstore,
+ mode=self.op.mode,
+ name=self.op.name,
+ mem_size=self.op.mem_size,
+ disks=self.op.disks,
+ disk_template=self.op.disk_template,
+ os=self.op.os,
+ tags=self.op.tags,
+ nics=self.op.nics,
+ vcpus=self.op.vcpus,
+ )
+
+ if self.op.direction == constants.IALLOCATOR_DIR_IN:
+ result = ial.in_text
+ else:
+ ial.Run(self.op.allocator, validate=False)
+ result = ial.out_text
+ return result