X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/a0c3fea1f56906ee5c21e09b44a4ff178f3dfd16..9ac99fdae57b360468731ad92471b37843fe0347:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 8f5216f..cdac3d0 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -1,7 +1,7 @@ -#!/usr/bin/python +# # -# Copyright (C) 2006, 2007 Google Inc. +# Copyright (C) 2006, 2007, 2008 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,7 +26,6 @@ import os import os.path import sha -import socket import time import tempfile import re @@ -43,6 +42,8 @@ from ganeti import constants from ganeti import objects from ganeti import opcodes from ganeti import ssconf +from ganeti import serializer + class LogicalUnit(object): """Logical Unit base class. @@ -70,24 +71,36 @@ class LogicalUnit(object): validity. """ - self.processor = processor + self.proc = processor self.op = op self.cfg = cfg self.sstore = sstore + self.__ssh = None + for attr_name in self._OP_REQP: attr_val = getattr(op, attr_name, None) if attr_val is None: - raise errors.OpPrereqError, ("Required parameter '%s' missing" % - attr_name) + raise errors.OpPrereqError("Required parameter '%s' missing" % + attr_name) if self.REQ_CLUSTER: if not cfg.IsCluster(): - raise errors.OpPrereqError, ("Cluster not initialized yet," - " use 'gnt-cluster init' first.") + raise errors.OpPrereqError("Cluster not initialized yet," + " use 'gnt-cluster init' first.") if self.REQ_MASTER: master = sstore.GetMasterNode() - if master != socket.gethostname(): - raise errors.OpPrereqError, ("Commands must be run on the master" - " node %s" % master) + if master != utils.HostInfo().name: + raise errors.OpPrereqError("Commands must be run on the master" + " node %s" % master) + + def __GetSSH(self): + """Returns the SshRunner object + + """ + if not self.__ssh: + self.__ssh = ssh.SshRunner(self.sstore) + return self.__ssh + + ssh = property(fget=__GetSSH) def CheckPrereq(self): """Check prerequisites for this LU. @@ -161,31 +174,72 @@ class NoHooksLU(LogicalUnit): This is a no-op, since we don't run hooks. """ - return + return {}, [], [] + + +def _AddHostToEtcHosts(hostname): + """Wrapper around utils.SetEtcHostsEntry. + + """ + hi = utils.HostInfo(name=hostname) + utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()]) + + +def _RemoveHostFromEtcHosts(hostname): + """Wrapper around utils.RemoveEtcHostsEntry. + + """ + hi = utils.HostInfo(name=hostname) + utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name) + utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName()) def _GetWantedNodes(lu, nodes): - """Returns list of checked and expanded nodes. + """Returns list of checked and expanded node names. Args: nodes: List of nodes (strings) or None for all """ - if nodes is not None and not isinstance(nodes, list): - raise errors.OpPrereqError, "Invalid argument type 'nodes'" + if not isinstance(nodes, list): + raise errors.OpPrereqError("Invalid argument type 'nodes'") if nodes: - wanted_nodes = [] + wanted = [] for name in nodes: - node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name)) + node = lu.cfg.ExpandNodeName(name) if node is None: - raise errors.OpPrereqError, ("No such node name '%s'" % name) - wanted_nodes.append(node) + raise errors.OpPrereqError("No such node name '%s'" % name) + wanted.append(node) - return wanted_nodes else: - return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()] + wanted = lu.cfg.GetNodeList() + return utils.NiceSort(wanted) + + +def _GetWantedInstances(lu, instances): + """Returns list of checked and expanded instance names. + + Args: + instances: List of instances (strings) or None for all + + """ + if not isinstance(instances, list): + raise errors.OpPrereqError("Invalid argument type 'instances'") + + if instances: + wanted = [] + + for name in instances: + instance = lu.cfg.ExpandInstanceName(name) + if instance is None: + raise errors.OpPrereqError("No such instance name '%s'" % name) + wanted.append(instance) + + else: + wanted = lu.cfg.GetInstanceList() + return utils.NiceSort(wanted) def _CheckOutputFields(static, dynamic, selected): @@ -202,9 +256,9 @@ def _CheckOutputFields(static, dynamic, selected): all_fields = static_fields | dynamic_fields if not all_fields.issuperset(selected): - raise errors.OpPrereqError, ("Unknown output fields selected: %s" - % ",".join(frozenset(selected). - difference(all_fields))) + raise errors.OpPrereqError("Unknown output fields selected: %s" + % ",".join(frozenset(selected). + difference(all_fields))) def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, @@ -215,6 +269,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, secondary_nodes: List of secondary nodes as strings """ env = { + "OP_TARGET": name, "INSTANCE_NAME": name, "INSTANCE_PRIMARY": primary_node, "INSTANCE_SECONDARIES": " ".join(secondary_nodes), @@ -226,11 +281,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, if nics: nic_count = len(nics) - for idx, (ip, bridge) in enumerate(nics): + for idx, (ip, bridge, mac) in enumerate(nics): if ip is None: ip = "" env["INSTANCE_NIC%d_IP" % idx] = ip env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge + env["INSTANCE_NIC%d_HWADDR" % idx] = mac else: nic_count = 0 @@ -254,173 +310,13 @@ def _BuildInstanceHookEnvByObject(instance, override=None): 'status': instance.os, 'memory': instance.memory, 'vcpus': instance.vcpus, - 'nics': [(nic.ip, nic.bridge) for nic in instance.nics], + 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics], } if override: args.update(override) return _BuildInstanceHookEnv(**args) -def _UpdateEtcHosts(fullnode, ip): - """Ensure a node has a correct entry in /etc/hosts. - - Args: - fullnode - Fully qualified domain name of host. (str) - ip - IPv4 address of host (str) - - """ - node = fullnode.split(".", 1)[0] - - f = open('/etc/hosts', 'r+') - - inthere = False - - save_lines = [] - add_lines = [] - removed = False - - while True: - rawline = f.readline() - - if not rawline: - # End of file - break - - line = rawline.split('\n')[0] - - # Strip off comments - line = line.split('#')[0] - - if not line: - # Entire line was comment, skip - save_lines.append(rawline) - continue - - fields = line.split() - - haveall = True - havesome = False - for spec in [ ip, fullnode, node ]: - if spec not in fields: - haveall = False - if spec in fields: - havesome = True - - if haveall: - inthere = True - save_lines.append(rawline) - continue - - if havesome and not haveall: - # Line (old, or manual?) which is missing some. Remove. - removed = True - continue - - save_lines.append(rawline) - - if not inthere: - add_lines.append('%s\t%s %s\n' % (ip, fullnode, node)) - - if removed: - if add_lines: - save_lines = save_lines + add_lines - - # We removed a line, write a new file and replace old. - fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc') - newfile = os.fdopen(fd, 'w') - newfile.write(''.join(save_lines)) - newfile.close() - os.rename(tmpname, '/etc/hosts') - - elif add_lines: - # Simply appending a new line will do the trick. - f.seek(0, 2) - for add in add_lines: - f.write(add) - - f.close() - - -def _UpdateKnownHosts(fullnode, ip, pubkey): - """Ensure a node has a correct known_hosts entry. - - Args: - fullnode - Fully qualified domain name of host. (str) - ip - IPv4 address of host (str) - pubkey - the public key of the cluster - - """ - if os.path.exists('/etc/ssh/ssh_known_hosts'): - f = open('/etc/ssh/ssh_known_hosts', 'r+') - else: - f = open('/etc/ssh/ssh_known_hosts', 'w+') - - inthere = False - - save_lines = [] - add_lines = [] - removed = False - - while True: - rawline = f.readline() - logger.Debug('read %s' % (repr(rawline),)) - - if not rawline: - # End of file - break - - line = rawline.split('\n')[0] - - parts = line.split(' ') - fields = parts[0].split(',') - key = parts[2] - - haveall = True - havesome = False - for spec in [ ip, fullnode ]: - if spec not in fields: - haveall = False - if spec in fields: - havesome = True - - logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),)) - if haveall and key == pubkey: - inthere = True - save_lines.append(rawline) - logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),)) - continue - - if havesome and (not haveall or key != pubkey): - removed = True - logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),)) - continue - - save_lines.append(rawline) - - if not inthere: - add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey)) - logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),)) - - if removed: - save_lines = save_lines + add_lines - - # Write a new file and replace old. - fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh') - newfile = os.fdopen(fd, 'w') - newfile.write(''.join(save_lines)) - newfile.close() - logger.Debug("Wrote new known_hosts.") - os.rename(tmpname, '/etc/ssh/ssh_known_hosts') - - elif add_lines: - # Simply appending a new line will do the trick. - f.seek(0, 2) - for add in add_lines: - f.write(add) - - f.close() - - def _HasValidVG(vglist, vgname): """Checks if the volume group list is valid. @@ -448,26 +344,23 @@ def _InitSSHSetup(node): node: the name of this host as a fqdn """ - utils.RemoveFile('/root/.ssh/known_hosts') + priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) - if os.path.exists('/root/.ssh/id_dsa'): - utils.CreateBackup('/root/.ssh/id_dsa') - if os.path.exists('/root/.ssh/id_dsa.pub'): - utils.CreateBackup('/root/.ssh/id_dsa.pub') - - utils.RemoveFile('/root/.ssh/id_dsa') - utils.RemoveFile('/root/.ssh/id_dsa.pub') + for name in priv_key, pub_key: + if os.path.exists(name): + utils.CreateBackup(name) + utils.RemoveFile(name) result = utils.RunCmd(["ssh-keygen", "-t", "dsa", - "-f", "/root/.ssh/id_dsa", + "-f", priv_key, "-q", "-N", ""]) if result.failed: - raise errors.OpExecError, ("could not generate ssh keypair, error %s" % - result.output) + raise errors.OpExecError("Could not generate ssh keypair, error %s" % + result.output) - f = open('/root/.ssh/id_dsa.pub', 'r') + f = open(pub_key, 'r') try: - utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192)) + utils.AddAuthorizedKey(auth_keys, f.read(8192)) finally: f.close() @@ -489,18 +382,30 @@ def _InitGanetiServerSetup(ss): "-keyout", constants.SSL_CERT_FILE, "-out", constants.SSL_CERT_FILE, "-batch"]) if result.failed: - raise errors.OpExecError, ("could not generate server ssl cert, command" - " %s had exitcode %s and error message %s" % - (result.cmd, result.exit_code, result.output)) + raise errors.OpExecError("could not generate server ssl cert, command" + " %s had exitcode %s and error message %s" % + (result.cmd, result.exit_code, result.output)) os.chmod(constants.SSL_CERT_FILE, 0400) result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"]) if result.failed: - raise errors.OpExecError, ("could not start the node daemon, command %s" - " had exitcode %s and error %s" % - (result.cmd, result.exit_code, result.output)) + raise errors.OpExecError("Could not start the node daemon, command %s" + " had exitcode %s and error %s" % + (result.cmd, result.exit_code, result.output)) + + +def _CheckInstanceBridgesExist(instance): + """Check that the brigdes needed by an instance exist. + + """ + # check bridges existance + brlist = [nic.bridge for nic in instance.nics] + if not rpc.call_bridges_exist(instance.primary_node, brlist): + raise errors.OpPrereqError("one or more target bridges %s does not" + " exist on destination node '%s'" % + (brlist, instance.primary_node)) class LUInitCluster(LogicalUnit): @@ -509,8 +414,8 @@ class LUInitCluster(LogicalUnit): """ HPATH = "cluster-init" HTYPE = constants.HTYPE_CLUSTER - _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix", - "def_bridge", "master_netdev"] + _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix", + "def_bridge", "master_netdev", "file_storage_dir"] REQ_CLUSTER = False def BuildHooksEnv(self): @@ -520,67 +425,100 @@ class LUInitCluster(LogicalUnit): ourselves in the post-run node list. """ - env = { - "CLUSTER": self.op.cluster_name, - "MASTER": self.hostname['hostname_full'], - } - return env, [], [self.hostname['hostname_full']] + env = {"OP_TARGET": self.op.cluster_name} + return env, [], [self.hostname.name] def CheckPrereq(self): """Verify that the passed name is a valid one. """ if config.ConfigWriter.IsCluster(): - raise errors.OpPrereqError, ("Cluster is already initialised") + raise errors.OpPrereqError("Cluster is already initialised") - hostname_local = socket.gethostname() - self.hostname = hostname = utils.LookupHostname(hostname_local) - if not hostname: - raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" % - hostname_local) + if self.op.hypervisor_type == constants.HT_XEN_HVM31: + if not os.path.exists(constants.VNC_PASSWORD_FILE): + raise errors.OpPrereqError("Please prepare the cluster VNC" + "password file %s" % + constants.VNC_PASSWORD_FILE) - self.clustername = clustername = utils.LookupHostname(self.op.cluster_name) - if not clustername: - raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')" - % self.op.cluster_name) + self.hostname = hostname = utils.HostInfo() - result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']]) - if result.failed: - raise errors.OpPrereqError, ("Inconsistency: this host's name resolves" - " to %s,\nbut this ip address does not" - " belong to this host." - " Aborting." % hostname['ip']) + if hostname.ip.startswith("127."): + raise errors.OpPrereqError("This host's IP resolves to the private" + " range (%s). Please fix DNS or %s." % + (hostname.ip, constants.ETC_HOSTS)) + + if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS): + raise errors.OpPrereqError("Inconsistency: this host's name resolves" + " to %s,\nbut this ip address does not" + " belong to this host." + " Aborting." % hostname.ip) + + self.clustername = clustername = utils.HostInfo(self.op.cluster_name) + + if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, + timeout=5): + raise errors.OpPrereqError("Cluster IP already active. Aborting.") secondary_ip = getattr(self.op, "secondary_ip", None) if secondary_ip and not utils.IsValidIP(secondary_ip): - raise errors.OpPrereqError, ("Invalid secondary ip given") - if secondary_ip and secondary_ip != hostname['ip']: - result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip]) - if result.failed: - raise errors.OpPrereqError, ("You gave %s as secondary IP,\n" - "but it does not belong to this host." % - secondary_ip) + raise errors.OpPrereqError("Invalid secondary ip given") + if (secondary_ip and + secondary_ip != hostname.ip and + (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS))): + raise errors.OpPrereqError("You gave %s as secondary IP," + " but it does not belong to this host." % + secondary_ip) self.secondary_ip = secondary_ip - # checks presence of the volume group given - vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name) + if not hasattr(self.op, "vg_name"): + self.op.vg_name = None + # if vg_name not None, checks if volume group is valid + if self.op.vg_name: + vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name) + if vgstatus: + raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if" + " you are not using lvm" % vgstatus) + + self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir) - if vgstatus: - raise errors.OpPrereqError, ("Error: %s" % vgstatus) + if not os.path.isabs(self.op.file_storage_dir): + raise errors.OpPrereqError("The file storage directory you have is" + " not an absolute path.") + + if not os.path.exists(self.op.file_storage_dir): + try: + os.makedirs(self.op.file_storage_dir, 0750) + except OSError, err: + raise errors.OpPrereqError("Cannot create file storage directory" + " '%s': %s" % + (self.op.file_storage_dir, err)) + + if not os.path.isdir(self.op.file_storage_dir): + raise errors.OpPrereqError("The file storage directory '%s' is not" + " a directory." % self.op.file_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", self.op.mac_prefix): - raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" % - self.op.mac_prefix) + raise errors.OpPrereqError("Invalid mac prefix given '%s'" % + self.op.mac_prefix) - if self.op.hypervisor_type not in hypervisor.VALID_HTYPES: - raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" % - self.op.hypervisor_type) + if self.op.hypervisor_type not in constants.HYPER_TYPES: + raise errors.OpPrereqError("Invalid hypervisor type given '%s'" % + self.op.hypervisor_type) result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev]) if result.failed: - raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" % - (self.op.master_netdev, result.output)) + raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % + (self.op.master_netdev, + result.output.strip())) + + if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and + os.access(constants.NODE_INITD_SCRIPT, os.X_OK)): + raise errors.OpPrereqError("Init.d script '%s' missing or not" + " executable." % constants.NODE_INITD_SCRIPT) def Exec(self, feedback_fn): """Initialize the cluster. @@ -590,44 +528,39 @@ class LUInitCluster(LogicalUnit): hostname = self.hostname # set up the simple store - ss = ssconf.SimpleStore() + self.sstore = ss = ssconf.SimpleStore() ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type) - ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full']) - ss.SetKey(ss.SS_MASTER_IP, clustername['ip']) + ss.SetKey(ss.SS_MASTER_NODE, hostname.name) + ss.SetKey(ss.SS_MASTER_IP, clustername.ip) ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev) - ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname']) + ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name) + ss.SetKey(ss.SS_FILE_STORAGE_DIR, self.op.file_storage_dir) # set up the inter-node password and certificate _InitGanetiServerSetup(ss) # start the master ip - rpc.call_node_start_master(hostname['hostname_full']) + rpc.call_node_start_master(hostname.name) # set up ssh config and /etc/hosts - f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r') + f = open(constants.SSH_HOST_RSA_PUB, 'r') try: sshline = f.read() finally: f.close() sshkey = sshline.split(" ")[1] - _UpdateEtcHosts(hostname['hostname_full'], - hostname['ip'], - ) - - _UpdateKnownHosts(hostname['hostname_full'], - hostname['ip'], - sshkey, - ) - - _InitSSHSetup(hostname['hostname']) + _AddHostToEtcHosts(hostname.name) + _InitSSHSetup(hostname.name) # init of cluster config file - cfgw = config.ConfigWriter() - cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip, + self.cfg = cfgw = config.ConfigWriter() + cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip, sshkey, self.op.mac_prefix, self.op.vg_name, self.op.def_bridge) + ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE) + class LUDestroyCluster(NoHooksLU): """Logical unit for destroying the cluster. @@ -647,27 +580,31 @@ class LUDestroyCluster(NoHooksLU): nodelist = self.cfg.GetNodeList() if len(nodelist) != 1 or nodelist[0] != master: - raise errors.OpPrereqError, ("There are still %d node(s) in " - "this cluster." % (len(nodelist) - 1)) + raise errors.OpPrereqError("There are still %d node(s) in" + " this cluster." % (len(nodelist) - 1)) instancelist = self.cfg.GetInstanceList() if instancelist: - raise errors.OpPrereqError, ("There are still %d instance(s) in " - "this cluster." % len(instancelist)) + raise errors.OpPrereqError("There are still %d instance(s) in" + " this cluster." % len(instancelist)) def Exec(self, feedback_fn): """Destroys the cluster. """ - utils.CreateBackup('/root/.ssh/id_dsa') - utils.CreateBackup('/root/.ssh/id_dsa.pub') - rpc.call_node_leave_cluster(self.sstore.GetMasterNode()) + master = self.sstore.GetMasterNode() + if not rpc.call_node_stop_master(master): + raise errors.OpExecError("Could not disable the master role") + priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) + utils.CreateBackup(priv_key) + utils.CreateBackup(pub_key) + rpc.call_node_leave_cluster(master) class LUVerifyCluster(NoHooksLU): """Verifies the cluster status. """ - _OP_REQP = [] + _OP_REQP = ["skip_checks"] def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result, remote_version, feedback_fn): @@ -688,7 +625,7 @@ class LUVerifyCluster(NoHooksLU): # compares ganeti version local_version = constants.PROTOCOL_VERSION if not remote_version: - feedback_fn(" - ERROR: connection to %s failed" % (node)) + feedback_fn(" - ERROR: connection to %s failed" % (node)) return True if local_version != remote_version: @@ -739,7 +676,8 @@ class LUVerifyCluster(NoHooksLU): feedback_fn(" - ERROR: hypervisor verify failure: '%s'" % hyp_result) return bad - def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn): + def _VerifyInstance(self, instance, instanceconfig, node_vol_is, + node_instance, feedback_fn): """Verify an instance. This function checks to see if the required block devices are @@ -748,13 +686,6 @@ class LUVerifyCluster(NoHooksLU): """ bad = False - instancelist = self.cfg.GetInstanceList() - if not instance in instancelist: - feedback_fn(" - ERROR: instance %s not in instance list %s" % - (instance, instancelist)) - bad = True - - instanceconfig = self.cfg.GetInstanceInfo(instance) node_current = instanceconfig.primary_node node_vol_should = {} @@ -768,7 +699,8 @@ class LUVerifyCluster(NoHooksLU): bad = True if not instanceconfig.status == 'down': - if not instance in node_instance[node_current]: + if (node_current not in node_instance or + not instance in node_instance[node_current]): feedback_fn(" - ERROR: instance %s not running on node %s" % (instance, node_current)) bad = True @@ -780,7 +712,7 @@ class LUVerifyCluster(NoHooksLU): (instance, node)) bad = True - return not bad + return bad def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn): """Verify if there are any unknown volumes in the cluster. @@ -814,13 +746,44 @@ class LUVerifyCluster(NoHooksLU): bad = True return bad + def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn): + """Verify N+1 Memory Resilience. + + Check that if one single node dies we can still start all the instances it + was primary for. + + """ + bad = False + + for node, nodeinfo in node_info.iteritems(): + # This code checks that every node which is now listed as secondary has + # enough memory to host all instances it is supposed to should a single + # other node in the cluster fail. + # FIXME: not ready for failover to an arbitrary node + # FIXME: does not support file-backed instances + # WARNING: we currently take into account down instances as well as up + # ones, considering that even if they're down someone might want to start + # them even in the event of a node failure. + for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems(): + needed_mem = 0 + for instance in instances: + needed_mem += instance_cfg[instance].memory + if nodeinfo['mfree'] < needed_mem: + feedback_fn(" - ERROR: not enough memory on node %s to accomodate" + " failovers should node %s fail" % (node, prinode)) + bad = True + return bad + def CheckPrereq(self): """Check prerequisites. - This has no prerequisites. + Transform the list of checks we're going to skip into a set and check that + all its members are valid. """ - pass + self.skip_set = frozenset(self.op.skip_checks) + if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set): + raise errors.OpPrereqError("Invalid checks to be skipped specified") def Exec(self, feedback_fn): """Verify integrity of cluster, performing various test on nodes. @@ -828,14 +791,17 @@ class LUVerifyCluster(NoHooksLU): """ bad = False feedback_fn("* Verifying global settings") - self.cfg.VerifyConfig() + for msg in self.cfg.VerifyConfig(): + feedback_fn(" - ERROR: %s" % msg) - master = self.sstore.GetMasterNode() vg_name = self.cfg.GetVGName() nodelist = utils.NiceSort(self.cfg.GetNodeList()) instancelist = utils.NiceSort(self.cfg.GetInstanceList()) + i_non_redundant = [] # Non redundant instances node_volume = {} node_instance = {} + node_info = {} + instance_cfg = {} # FIXME: verify OS list # do local checksums @@ -855,6 +821,7 @@ class LUVerifyCluster(NoHooksLU): } all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param) all_rversion = rpc.call_version(nodelist) + all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName()) for node in nodelist: feedback_fn("* Verifying node %s" % node) @@ -866,12 +833,17 @@ class LUVerifyCluster(NoHooksLU): # node_volume volumeinfo = all_volumeinfo[node] - if type(volumeinfo) != dict: + if isinstance(volumeinfo, basestring): + feedback_fn(" - ERROR: LVM problem on node %s: %s" % + (node, volumeinfo[-400:].encode('string_escape'))) + bad = True + node_volume[node] = {} + elif not isinstance(volumeinfo, dict): feedback_fn(" - ERROR: connection to %s failed" % (node,)) bad = True continue - - node_volume[node] = volumeinfo + else: + node_volume[node] = volumeinfo # node_instance nodeinstance = all_instanceinfo[node] @@ -882,18 +854,74 @@ class LUVerifyCluster(NoHooksLU): node_instance[node] = nodeinstance + # node_info + nodeinfo = all_ninfo[node] + if not isinstance(nodeinfo, dict): + feedback_fn(" - ERROR: connection to %s failed" % (node,)) + bad = True + continue + + try: + node_info[node] = { + "mfree": int(nodeinfo['memory_free']), + "dfree": int(nodeinfo['vg_free']), + "pinst": [], + "sinst": [], + # dictionary holding all instances this node is secondary for, + # grouped by their primary node. Each key is a cluster node, and each + # value is a list of instances which have the key as primary and the + # current node as secondary. this is handy to calculate N+1 memory + # availability if you can only failover from a primary to its + # secondary. + "sinst-by-pnode": {}, + } + except ValueError: + feedback_fn(" - ERROR: invalid value returned from node %s" % (node,)) + bad = True + continue + node_vol_should = {} for instance in instancelist: feedback_fn("* Verifying instance %s" % instance) - result = self._VerifyInstance(instance, node_volume, node_instance, - feedback_fn) - bad = bad or result - inst_config = self.cfg.GetInstanceInfo(instance) + result = self._VerifyInstance(instance, inst_config, node_volume, + node_instance, feedback_fn) + bad = bad or result inst_config.MapLVsByNode(node_vol_should) + instance_cfg[instance] = inst_config + + pnode = inst_config.primary_node + if pnode in node_info: + node_info[pnode]['pinst'].append(instance) + else: + feedback_fn(" - ERROR: instance %s, connection to primary node" + " %s failed" % (instance, pnode)) + bad = True + + # If the instance is non-redundant we cannot survive losing its primary + # node, so we are not N+1 compliant. On the other hand we have no disk + # templates with more than one secondary so that situation is not well + # supported either. + # FIXME: does not support file-backed instances + if len(inst_config.secondary_nodes) == 0: + i_non_redundant.append(instance) + elif len(inst_config.secondary_nodes) > 1: + feedback_fn(" - WARNING: multiple secondaries for instance %s" + % instance) + + for snode in inst_config.secondary_nodes: + if snode in node_info: + node_info[snode]['sinst'].append(instance) + if pnode not in node_info[snode]['sinst-by-pnode']: + node_info[snode]['sinst-by-pnode'][pnode] = [] + node_info[snode]['sinst-by-pnode'][pnode].append(instance) + else: + feedback_fn(" - ERROR: instance %s, connection to secondary node" + " %s failed" % (instance, snode)) + feedback_fn("* Verifying orphan volumes") result = self._VerifyOrphanVolumes(node_vol_should, node_volume, feedback_fn) @@ -904,10 +932,244 @@ class LUVerifyCluster(NoHooksLU): feedback_fn) bad = bad or result + if constants.VERIFY_NPLUSONE_MEM not in self.skip_set: + feedback_fn("* Verifying N+1 Memory redundancy") + result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn) + bad = bad or result + + feedback_fn("* Other Notes") + if i_non_redundant: + feedback_fn(" - NOTICE: %d non-redundant instance(s) found." + % len(i_non_redundant)) + return int(bad) -def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): +class LUVerifyDisks(NoHooksLU): + """Verifies the cluster disks status. + + """ + _OP_REQP = [] + + def CheckPrereq(self): + """Check prerequisites. + + This has no prerequisites. + + """ + pass + + def Exec(self, feedback_fn): + """Verify integrity of cluster disks. + + """ + result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {} + + vg_name = self.cfg.GetVGName() + nodes = utils.NiceSort(self.cfg.GetNodeList()) + instances = [self.cfg.GetInstanceInfo(name) + for name in self.cfg.GetInstanceList()] + + nv_dict = {} + for inst in instances: + inst_lvs = {} + if (inst.status != "up" or + inst.disk_template not in constants.DTS_NET_MIRROR): + continue + inst.MapLVsByNode(inst_lvs) + # transform { iname: {node: [vol,],},} to {(node, vol): iname} + for node, vol_list in inst_lvs.iteritems(): + for vol in vol_list: + nv_dict[(node, vol)] = inst + + if not nv_dict: + return result + + node_lvs = rpc.call_volume_list(nodes, vg_name) + + to_act = set() + for node in nodes: + # node_volume + lvs = node_lvs[node] + + if isinstance(lvs, basestring): + logger.Info("error enumerating LVs on node %s: %s" % (node, lvs)) + res_nlvm[node] = lvs + elif not isinstance(lvs, dict): + logger.Info("connection to node %s failed or invalid data returned" % + (node,)) + res_nodes.append(node) + continue + + for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems(): + inst = nv_dict.pop((node, lv_name), None) + if (not lv_online and inst is not None + and inst.name not in res_instances): + res_instances.append(inst.name) + + # any leftover items in nv_dict are missing LVs, let's arrange the + # data better + for key, inst in nv_dict.iteritems(): + if inst.name not in res_missing: + res_missing[inst.name] = [] + res_missing[inst.name].append(key) + + return result + + +class LURenameCluster(LogicalUnit): + """Rename the cluster. + + """ + HPATH = "cluster-rename" + HTYPE = constants.HTYPE_CLUSTER + _OP_REQP = ["name"] + + def BuildHooksEnv(self): + """Build hooks env. + + """ + env = { + "OP_TARGET": self.sstore.GetClusterName(), + "NEW_NAME": self.op.name, + } + mn = self.sstore.GetMasterNode() + return env, [mn], [mn] + + def CheckPrereq(self): + """Verify that the passed name is a valid one. + + """ + hostname = utils.HostInfo(self.op.name) + + new_name = hostname.name + self.ip = new_ip = hostname.ip + old_name = self.sstore.GetClusterName() + old_ip = self.sstore.GetMasterIP() + if new_name == old_name and new_ip == old_ip: + raise errors.OpPrereqError("Neither the name nor the IP address of the" + " cluster has changed") + if new_ip != old_ip: + result = utils.RunCmd(["fping", "-q", new_ip]) + if not result.failed: + raise errors.OpPrereqError("The given cluster IP address (%s) is" + " reachable on the network. Aborting." % + new_ip) + + self.op.name = new_name + + def Exec(self, feedback_fn): + """Rename the cluster. + + """ + clustername = self.op.name + ip = self.ip + ss = self.sstore + + # shutdown the master IP + master = ss.GetMasterNode() + if not rpc.call_node_stop_master(master): + raise errors.OpExecError("Could not disable the master role") + + try: + # modify the sstore + ss.SetKey(ss.SS_MASTER_IP, ip) + ss.SetKey(ss.SS_CLUSTER_NAME, clustername) + + # Distribute updated ss config to all nodes + myself = self.cfg.GetNodeInfo(master) + dist_nodes = self.cfg.GetNodeList() + if myself.name in dist_nodes: + dist_nodes.remove(myself.name) + + logger.Debug("Copying updated ssconf data to all nodes") + for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]: + fname = ss.KeyToFilename(keyname) + result = rpc.call_upload_file(dist_nodes, fname) + for to_node in dist_nodes: + if not result[to_node]: + logger.Error("copy of file %s to node %s failed" % + (fname, to_node)) + finally: + if not rpc.call_node_start_master(master): + logger.Error("Could not re-enable the master role on the master," + " please restart manually.") + + +def _RecursiveCheckIfLVMBased(disk): + """Check if the given disk or its children are lvm-based. + + Args: + disk: ganeti.objects.Disk object + + Returns: + boolean indicating whether a LD_LV dev_type was found or not + + """ + if disk.children: + for chdisk in disk.children: + if _RecursiveCheckIfLVMBased(chdisk): + return True + return disk.dev_type == constants.LD_LV + + +class LUSetClusterParams(LogicalUnit): + """Change the parameters of the cluster. + + """ + HPATH = "cluster-modify" + HTYPE = constants.HTYPE_CLUSTER + _OP_REQP = [] + + def BuildHooksEnv(self): + """Build hooks env. + + """ + env = { + "OP_TARGET": self.sstore.GetClusterName(), + "NEW_VG_NAME": self.op.vg_name, + } + mn = self.sstore.GetMasterNode() + return env, [mn], [mn] + + def CheckPrereq(self): + """Check prerequisites. + + This checks whether the given params don't conflict and + if the given volume group is valid. + + """ + if not self.op.vg_name: + instances = [self.cfg.GetInstanceInfo(name) + for name in self.cfg.GetInstanceList()] + for inst in instances: + for disk in inst.disks: + if _RecursiveCheckIfLVMBased(disk): + raise errors.OpPrereqError("Cannot disable lvm storage while" + " lvm-based instances exist") + + # if vg_name not None, checks given volume group on all nodes + if self.op.vg_name: + node_list = self.cfg.GetNodeList() + vglist = rpc.call_vg_list(node_list) + for node in node_list: + vgstatus = _HasValidVG(vglist[node], self.op.vg_name) + if vgstatus: + raise errors.OpPrereqError("Error on node '%s': %s" % + (node, vgstatus)) + + def Exec(self, feedback_fn): + """Change the parameters of the cluster. + + """ + if self.op.vg_name != self.cfg.GetVGName(): + self.cfg.SetVGName(self.op.vg_name) + else: + feedback_fn("Cluster LVM configuration already in desired" + " state, not changing") + + +def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False): """Sleep and poll for an instance's disk to sync. """ @@ -915,7 +1177,7 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): return True if not oneshot: - logger.ToStdout("Waiting for instance %s to sync disks." % instance.name) + proc.LogInfo("Waiting for instance %s to sync disks." % instance.name) node = instance.primary_node @@ -929,21 +1191,22 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): cumul_degraded = False rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks) if not rstats: - logger.ToStderr("Can't get any data from node %s" % node) + proc.LogWarning("Can't get any data from node %s" % node) retries += 1 if retries >= 10: - raise errors.RemoteError, ("Can't contact node %s for mirror data," - " aborting." % node) + raise errors.RemoteError("Can't contact node %s for mirror data," + " aborting." % node) time.sleep(6) continue retries = 0 for i in range(len(rstats)): mstat = rstats[i] if mstat is None: - logger.ToStderr("Can't compute data for node %s/%s" % + proc.LogWarning("Can't compute data for node %s/%s" % (node, instance.disks[i].iv_name)) continue - perc_done, est_time, is_degraded = mstat + # we ignore the ldisk parameter + perc_done, est_time, is_degraded, _ = mstat cumul_degraded = cumul_degraded or (is_degraded and perc_done is None) if perc_done is not None: done = False @@ -952,8 +1215,8 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): max_time = est_time else: rem_time = "no time estimate" - logger.ToStdout("- device %s: %5.2f%% done, %s" % - (instance.disks[i].iv_name, perc_done, rem_time)) + proc.LogInfo("- device %s: %5.2f%% done, %s" % + (instance.disks[i].iv_name, perc_done, rem_time)) if done or oneshot: break @@ -966,24 +1229,32 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): utils.Lock('cmd') if done: - logger.ToStdout("Instance %s's disks are in sync." % instance.name) + proc.LogInfo("Instance %s's disks are in sync." % instance.name) return not cumul_degraded -def _CheckDiskConsistency(cfgw, dev, node, on_primary): +def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False): """Check that mirrors are not degraded. + The ldisk parameter, if True, will change the test from the + is_degraded attribute (which represents overall non-ok status for + the device(s)) to the ldisk (representing the local storage status). + """ cfgw.SetDiskID(dev, node) + if ldisk: + idx = 6 + else: + idx = 5 result = True if on_primary or dev.AssembleOnSecondary(): rstats = rpc.call_blockdev_find(node, dev) if not rstats: - logger.ToStderr("Can't get any data from node %s" % node) + logger.ToStderr("Node %s: Disk degraded, not found or node down" % node) result = False else: - result = result and (not rstats[5]) + result = result and (not rstats[idx]) if dev.children: for child in dev.children: result = result and _CheckDiskConsistency(cfgw, child, node, on_primary) @@ -995,7 +1266,7 @@ class LUDiagnoseOS(NoHooksLU): """Logical unit for OS diagnose/query. """ - _OP_REQP = [] + _OP_REQP = ["output_fields", "names"] def CheckPrereq(self): """Check prerequisites. @@ -1003,7 +1274,44 @@ class LUDiagnoseOS(NoHooksLU): This always succeeds, since this is a pure query LU. """ - return + if self.op.names: + raise errors.OpPrereqError("Selective OS query not supported") + + self.dynamic_fields = frozenset(["name", "valid", "node_status"]) + _CheckOutputFields(static=[], + dynamic=self.dynamic_fields, + selected=self.op.output_fields) + + @staticmethod + def _DiagnoseByOS(node_list, rlist): + """Remaps a per-node return list into an a per-os per-node dictionary + + Args: + node_list: a list with the names of all nodes + rlist: a map with node names as keys and OS objects as values + + Returns: + map: a map with osnames as keys and as value another map, with + nodes as + keys and list of OS objects as values + e.g. {"debian-etch": {"node1": [,...], + "node2": [,]} + } + + """ + all_os = {} + for node_name, nr in rlist.iteritems(): + if not nr: + continue + for os in nr: + if os.name not in all_os: + # build a list of nodes for this os containing empty lists + # for each node in node_list + all_os[os.name] = {} + for nname in node_list: + all_os[os.name][nname] = [] + all_os[os.name][node_name].append(os) + return all_os def Exec(self, feedback_fn): """Compute the list of OSes. @@ -1012,8 +1320,26 @@ class LUDiagnoseOS(NoHooksLU): node_list = self.cfg.GetNodeList() node_data = rpc.call_os_diagnose(node_list) if node_data == False: - raise errors.OpExecError, "Can't gather the list of OSes" - return node_data + raise errors.OpExecError("Can't gather the list of OSes") + pol = self._DiagnoseByOS(node_list, node_data) + output = [] + for os_name, os_data in pol.iteritems(): + row = [] + for field in self.op.output_fields: + if field == "name": + val = os_name + elif field == "valid": + val = utils.all([osl and osl[0] for osl in os_data.values()]) + elif field == "node_status": + val = {} + for node_name, nos_list in os_data.iteritems(): + val[node_name] = [(v.status, v.path) for v in nos_list] + else: + raise errors.ParameterError(field) + row.append(val) + output.append(row) + + return output class LURemoveNode(LogicalUnit): @@ -1032,6 +1358,7 @@ class LURemoveNode(LogicalUnit): """ env = { + "OP_TARGET": self.op.node_name, "NODE_NAME": self.op.node_name, } all_nodes = self.cfg.GetNodeList() @@ -1051,24 +1378,23 @@ class LURemoveNode(LogicalUnit): """ node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) if node is None: - logger.Error("Error: Node '%s' is unknown." % self.op.node_name) - return 1 + raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name) instance_list = self.cfg.GetInstanceList() masternode = self.sstore.GetMasterNode() if node.name == masternode: - raise errors.OpPrereqError, ("Node is the master node," - " you need to failover first.") + raise errors.OpPrereqError("Node is the master node," + " you need to failover first.") for instance_name in instance_list: instance = self.cfg.GetInstanceInfo(instance_name) if node.name == instance.primary_node: - raise errors.OpPrereqError, ("Instance %s still running on the node," - " please remove first." % instance_name) + raise errors.OpPrereqError("Instance %s still running on the node," + " please remove first." % instance_name) if node.name in instance.secondary_nodes: - raise errors.OpPrereqError, ("Instance %s has node as a secondary," - " please remove first." % instance_name) + raise errors.OpPrereqError("Instance %s has node as a secondary," + " please remove first." % instance_name) self.op.node_name = node.name self.node = node @@ -1082,18 +1408,20 @@ class LURemoveNode(LogicalUnit): rpc.call_node_leave_cluster(node.name) - ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT) + self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT) logger.Info("Removing node %s from config" % node.name) self.cfg.RemoveNode(node.name) + _RemoveHostFromEtcHosts(node.name) + class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. """ - _OP_REQP = ["output_fields"] + _OP_REQP = ["output_fields", "names"] def CheckPrereq(self): """Check prerequisites. @@ -1102,21 +1430,24 @@ class LUQueryNodes(NoHooksLU): """ self.dynamic_fields = frozenset(["dtotal", "dfree", - "mtotal", "mnode", "mfree"]) + "mtotal", "mnode", "mfree", + "bootid"]) - _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"], + _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt", + "pinst_list", "sinst_list", + "pip", "sip"], dynamic=self.dynamic_fields, selected=self.op.output_fields) + self.wanted = _GetWantedNodes(self, self.op.names) def Exec(self, feedback_fn): """Computes the list of nodes and their attributes. """ - nodenames = utils.NiceSort(self.cfg.GetNodeList()) + nodenames = self.wanted nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames] - # begin data gathering if self.dynamic_fields.intersection(self.op.output_fields): @@ -1131,23 +1462,28 @@ class LUQueryNodes(NoHooksLU): "mfree": utils.TryConvert(int, nodeinfo['memory_free']), "dtotal": utils.TryConvert(int, nodeinfo['vg_size']), "dfree": utils.TryConvert(int, nodeinfo['vg_free']), + "bootid": nodeinfo['bootid'], } else: live_data[name] = {} else: live_data = dict.fromkeys(nodenames, {}) - node_to_primary = dict.fromkeys(nodenames, 0) - node_to_secondary = dict.fromkeys(nodenames, 0) + node_to_primary = dict([(name, set()) for name in nodenames]) + node_to_secondary = dict([(name, set()) for name in nodenames]) - if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields: + inst_fields = frozenset(("pinst_cnt", "pinst_list", + "sinst_cnt", "sinst_list")) + if inst_fields & frozenset(self.op.output_fields): instancelist = self.cfg.GetInstanceList() - for instance in instancelist: - instanceinfo = self.cfg.GetInstanceInfo(instance) - node_to_primary[instanceinfo.primary_node] += 1 - for secnode in instanceinfo.secondary_nodes: - node_to_secondary[secnode] += 1 + for instance_name in instancelist: + inst = self.cfg.GetInstanceInfo(instance_name) + if inst.primary_node in node_to_primary: + node_to_primary[inst.primary_node].add(inst.name) + for secnode in inst.secondary_nodes: + if secnode in node_to_secondary: + node_to_secondary[secnode].add(inst.name) # end data gathering @@ -1157,19 +1493,22 @@ class LUQueryNodes(NoHooksLU): for field in self.op.output_fields: if field == "name": val = node.name - elif field == "pinst": - val = node_to_primary[node.name] - elif field == "sinst": - val = node_to_secondary[node.name] + elif field == "pinst_list": + val = list(node_to_primary[node.name]) + elif field == "sinst_list": + val = list(node_to_secondary[node.name]) + elif field == "pinst_cnt": + val = len(node_to_primary[node.name]) + elif field == "sinst_cnt": + val = len(node_to_secondary[node.name]) elif field == "pip": val = node.primary_ip elif field == "sip": val = node.secondary_ip elif field in self.dynamic_fields: - val = live_data[node.name].get(field, "?") + val = live_data[node.name].get(field, None) else: - raise errors.ParameterError, field - val = str(val) + raise errors.ParameterError(field) node_output.append(val) output.append(node_output) @@ -1199,7 +1538,7 @@ class LUQueryNodeVolumes(NoHooksLU): """Computes the list of nodes and their attributes. """ - nodenames = utils.NiceSort([node.name for node in self.nodes]) + nodenames = self.nodes volumes = rpc.call_node_volumes(nodenames) ilist = [self.cfg.GetInstanceInfo(iname) for iname @@ -1209,6 +1548,9 @@ class LUQueryNodeVolumes(NoHooksLU): output = [] for node in nodenames: + if node not in volumes or not volumes[node]: + continue + node_vols = volumes[node][:] node_vols.sort(key=lambda vol: vol['dev']) @@ -1235,7 +1577,7 @@ class LUQueryNodeVolumes(NoHooksLU): else: val = '-' else: - raise errors.ParameterError, field + raise errors.ParameterError(field) node_output.append(str(val)) output.append(node_output) @@ -1258,6 +1600,7 @@ class LUAddNode(LogicalUnit): """ env = { + "OP_TARGET": self.op.node_name, "NODE_NAME": self.op.node_name, "NODE_PIP": self.op.primary_ip, "NODE_SIP": self.op.secondary_ip, @@ -1280,31 +1623,40 @@ class LUAddNode(LogicalUnit): node_name = self.op.node_name cfg = self.cfg - dns_data = utils.LookupHostname(node_name) - if not dns_data: - raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name) + dns_data = utils.HostInfo(node_name) - node = dns_data['hostname'] - primary_ip = self.op.primary_ip = dns_data['ip'] + node = dns_data.name + primary_ip = self.op.primary_ip = dns_data.ip secondary_ip = getattr(self.op, "secondary_ip", None) if secondary_ip is None: secondary_ip = primary_ip if not utils.IsValidIP(secondary_ip): - raise errors.OpPrereqError, ("Invalid secondary IP given") + raise errors.OpPrereqError("Invalid secondary IP given") self.op.secondary_ip = secondary_ip + node_list = cfg.GetNodeList() - if node in node_list: - raise errors.OpPrereqError, ("Node %s is already in the configuration" - % node) + if not self.op.readd and node in node_list: + raise errors.OpPrereqError("Node %s is already in the configuration" % + node) + elif self.op.readd and node not in node_list: + raise errors.OpPrereqError("Node %s is not in the configuration" % node) for existing_node_name in node_list: existing_node = cfg.GetNodeInfo(existing_node_name) + + if self.op.readd and node == existing_node_name: + if (existing_node.primary_ip != primary_ip or + existing_node.secondary_ip != secondary_ip): + raise errors.OpPrereqError("Readded node doesn't have the same IP" + " address configuration as before") + continue + if (existing_node.primary_ip == primary_ip or existing_node.secondary_ip == primary_ip or existing_node.primary_ip == secondary_ip or existing_node.secondary_ip == secondary_ip): - raise errors.OpPrereqError, ("New node ip address(es) conflict with" - " existing node %s" % existing_node.name) + raise errors.OpPrereqError("New node ip address(es) conflict with" + " existing node %s" % existing_node.name) # check that the type of the node (single versus dual homed) is the # same as for the master @@ -1313,29 +1665,32 @@ class LUAddNode(LogicalUnit): newbie_singlehomed = secondary_ip == primary_ip if master_singlehomed != newbie_singlehomed: if master_singlehomed: - raise errors.OpPrereqError, ("The master has no private ip but the" - " new node has one") + raise errors.OpPrereqError("The master has no private ip but the" + " new node has one") else: - raise errors.OpPrereqError ("The master has a private ip but the" - " new node doesn't have one") + raise errors.OpPrereqError("The master has a private ip but the" + " new node doesn't have one") # checks reachablity - command = ["fping", "-q", primary_ip] - result = utils.RunCmd(command) - if result.failed: - raise errors.OpPrereqError, ("Node not reachable by ping") + if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): + raise errors.OpPrereqError("Node not reachable by ping") if not newbie_singlehomed: # check reachability from my secondary ip to newbie's secondary ip - command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip] - result = utils.RunCmd(command) - if result.failed: - raise errors.OpPrereqError, ("Node secondary ip not reachable by ping") + if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, + source=myself.secondary_ip): + raise errors.OpPrereqError("Node secondary ip not reachable by TCP" + " based ping to noded port") self.new_node = objects.Node(name=node, primary_ip=primary_ip, secondary_ip=secondary_ip) + if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31: + if not os.path.exists(constants.VNC_PASSWORD_FILE): + raise errors.OpPrereqError("Cluster VNC password file %s missing" % + constants.VNC_PASSWORD_FILE) + def Exec(self, feedback_fn): """Adds the new node to the cluster. @@ -1346,7 +1701,7 @@ class LUAddNode(LogicalUnit): # set up inter-node password and certificate and restarts the node daemon gntpass = self.sstore.GetNodeDaemonPassword() if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass): - raise errors.OpExecError, ("ganeti password corruption detected") + raise errors.OpExecError("ganeti password corruption detected") f = open(constants.SSL_CERT_FILE) try: gntpem = f.read(8192) @@ -1357,13 +1712,11 @@ class LUAddNode(LogicalUnit): # cert doesn't contain this, the here-document will be correctly # parsed by the shell sequence below if re.search('^!EOF\.', gntpem, re.MULTILINE): - raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate") + raise errors.OpExecError("invalid PEM encoding in the SSL certificate") if not gntpem.endswith("\n"): - raise errors.OpExecError, ("PEM must end with newline") + raise errors.OpExecError("PEM must end with newline") logger.Info("copy cluster pass to %s and starting the node daemon" % node) - # remove first the root's known_hosts file - utils.RemoveFile("/root/.ssh/known_hosts") # and then connect with ssh to set password and start ganeti-noded # note that all the below variables are sanitized at this point, # either by being constants or by the checks above @@ -1376,11 +1729,11 @@ class LUAddNode(LogicalUnit): constants.SSL_CERT_FILE, gntpem, constants.NODE_INITD_SCRIPT)) - result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True) + result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True) if result.failed: - raise errors.OpExecError, ("Remote command on node %s, error: %s," - " output: %s" % - (node, result.fail_reason, result.output)) + raise errors.OpExecError("Remote command on node %s, error: %s," + " output: %s" % + (node, result.fail_reason, result.output)) # check connectivity time.sleep(4) @@ -1391,18 +1744,19 @@ class LUAddNode(LogicalUnit): logger.Info("communication to node %s fine, sw version %s match" % (node, result)) else: - raise errors.OpExecError, ("Version mismatch master version %s," - " node version %s" % - (constants.PROTOCOL_VERSION, result)) + raise errors.OpExecError("Version mismatch master version %s," + " node version %s" % + (constants.PROTOCOL_VERSION, result)) else: - raise errors.OpExecError, ("Cannot get version from the new node") + raise errors.OpExecError("Cannot get version from the new node") # setup ssh on node logger.Info("copy ssh key to node %s" % node) + priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) keyarray = [] - keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub", - "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub", - "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"] + keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, + constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, + priv_key, pub_key] for i in keyfiles: f = open(i, 'r') @@ -1415,21 +1769,27 @@ class LUAddNode(LogicalUnit): keyarray[3], keyarray[4], keyarray[5]) if not result: - raise errors.OpExecError, ("Cannot transfer ssh keys to the new node") + raise errors.OpExecError("Cannot transfer ssh keys to the new node") # Add node to our /etc/hosts, and add key to known_hosts - _UpdateEtcHosts(new_node.name, new_node.primary_ip) - _UpdateKnownHosts(new_node.name, new_node.primary_ip, - self.cfg.GetHostKey()) + _AddHostToEtcHosts(new_node.name) if new_node.secondary_ip != new_node.primary_ip: - result = ssh.SSHCall(node, "root", - "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip) - if result.failed: - raise errors.OpExecError, ("Node claims it doesn't have the" - " secondary ip you gave (%s).\n" - "Please fix and re-run this command." % - new_node.secondary_ip) + if not rpc.call_node_tcp_ping(new_node.name, + constants.LOCALHOST_IP_ADDRESS, + new_node.secondary_ip, + constants.DEFAULT_NODED_PORT, + 10, False): + raise errors.OpExecError("Node claims it doesn't have the secondary ip" + " you gave (%s). Please fix and re-run this" + " command." % new_node.secondary_ip) + + success, msg = self.ssh.VerifyNodeHostname(node) + if not success: + raise errors.OpExecError("Node '%s' claims it has a different hostname" + " than the one the resolver gives: %s." + " Please fix and re-run this command." % + (node, msg)) # Distribute updated /etc/hosts and known_hosts to all nodes, # including the node just added @@ -1439,7 +1799,7 @@ class LUAddNode(LogicalUnit): dist_nodes.remove(myself.name) logger.Debug("Copying hosts and known_hosts to all nodes") - for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"): + for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): result = rpc.call_upload_file(dist_nodes, fname) for to_node in dist_nodes: if not result[to_node]: @@ -1447,12 +1807,15 @@ class LUAddNode(LogicalUnit): (fname, to_node)) to_copy = ss.GetFileList() + if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31: + to_copy.append(constants.VNC_PASSWORD_FILE) for fname in to_copy: - if not ssh.CopyFileToNode(node, fname): + if not self.ssh.CopyFileToNode(node, fname): logger.Error("could not copy file %s to node %s" % (fname, node)) - logger.Info("adding node %s to cluster.conf" % node) - self.cfg.AddNode(new_node) + if not self.op.readd: + logger.Info("adding node %s to cluster.conf" % node) + self.cfg.AddNode(new_node) class LUMasterFailover(LogicalUnit): @@ -1474,6 +1837,7 @@ class LUMasterFailover(LogicalUnit): """ env = { + "OP_TARGET": self.new_master, "NEW_MASTER": self.new_master, "OLD_MASTER": self.old_master, } @@ -1485,15 +1849,14 @@ class LUMasterFailover(LogicalUnit): This checks that we are not already the master. """ - self.new_master = socket.gethostname() - + self.new_master = utils.HostInfo().name self.old_master = self.sstore.GetMasterNode() if self.old_master == self.new_master: - raise errors.OpPrereqError, ("This commands must be run on the node" - " where you want the new master to be.\n" - "%s is already the master" % - self.old_master) + raise errors.OpPrereqError("This commands must be run on the node" + " where you want the new master to be." + " %s is already the master" % + self.old_master) def Exec(self, feedback_fn): """Failover the master node. @@ -1521,8 +1884,8 @@ class LUMasterFailover(LogicalUnit): if not rpc.call_node_start_master(self.new_master): logger.Error("could not start the master role on the new master" " %s, please check" % self.new_master) - feedback_fn("Error in activating the master IP on the new master,\n" - "please fix manually.") + feedback_fn("Error in activating the master IP on the new master," + " please fix manually.") @@ -1587,12 +1950,12 @@ class LUClusterCopyFile(NoHooksLU): """ filename = self.op.filename - myname = socket.gethostname() + myname = utils.HostInfo().name for node in self.nodes: if node == myname: continue - if not ssh.CopyFileToNode(node, filename): + if not self.ssh.CopyFileToNode(node, filename): logger.Error("Copy of file %s to node %s failed" % (filename, node)) @@ -1633,10 +1996,16 @@ class LURunClusterCommand(NoHooksLU): """Run a command on some nodes. """ + # put the master at the end of the nodes list + master_node = self.sstore.GetMasterNode() + if master_node in self.nodes: + self.nodes.remove(master_node) + self.nodes.append(master_node) + data = [] for node in self.nodes: - result = utils.RunCmd(["ssh", node.name, self.op.command]) - data.append((node.name, result.cmd, result.output, result.exit_code)) + result = self.ssh.Run(node, "root", self.op.command) + data.append((node, result.output, result.exit_code)) return data @@ -1656,8 +2025,8 @@ class LUActivateInstanceDisks(NoHooksLU): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance @@ -1667,7 +2036,7 @@ class LUActivateInstanceDisks(NoHooksLU): """ disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg) if not disks_ok: - raise errors.OpExecError, ("Cannot activate block devices") + raise errors.OpExecError("Cannot activate block devices") return disks_info @@ -1689,26 +2058,55 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False): """ device_info = [] disks_ok = True + iname = instance.name + # With the two passes mechanism we try to reduce the window of + # opportunity for the race condition of switching DRBD to primary + # before handshaking occured, but we do not eliminate it + + # The proper fix would be to wait (with some limits) until the + # connection has been made and drbd transitions from WFConnection + # into any other network-connected state (Connected, SyncTarget, + # SyncSource, etc.) + + # 1st pass, assemble on all nodes in secondary mode for inst_disk in instance.disks: - master_result = None for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): cfg.SetDiskID(node_disk, node) - is_primary = node == instance.primary_node - result = rpc.call_blockdev_assemble(node, node_disk, is_primary) + result = rpc.call_blockdev_assemble(node, node_disk, iname, False) if not result: - logger.Error("could not prepare block device %s on node %s (is_pri" - "mary=%s)" % (inst_disk.iv_name, node, is_primary)) - if is_primary or not ignore_secondaries: + logger.Error("could not prepare block device %s on node %s" + " (is_primary=False, pass=1)" % (inst_disk.iv_name, node)) + if not ignore_secondaries: disks_ok = False - if is_primary: - master_result = result - device_info.append((instance.primary_node, inst_disk.iv_name, - master_result)) + + # FIXME: race condition on drbd migration to primary + + # 2nd pass, do only the primary node + for inst_disk in instance.disks: + for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): + if node != instance.primary_node: + continue + cfg.SetDiskID(node_disk, node) + result = rpc.call_blockdev_assemble(node, node_disk, iname, True) + if not result: + logger.Error("could not prepare block device %s on node %s" + " (is_primary=True, pass=2)" % (inst_disk.iv_name, node)) + disks_ok = False + device_info.append((instance.primary_node, inst_disk.iv_name, result)) + + # leave the disks configured for the primary node + # this is a workaround that would be fixed better by + # improving the logical/physical id handling + for disk in instance.disks: + cfg.SetDiskID(disk, instance.primary_node) return disks_ok, device_info def _StartInstanceDisks(cfg, instance, force): + """Start the disks of an instance. + + """ disks_ok, dummy = _AssembleInstanceDisks(instance, cfg, ignore_secondaries=force) if not disks_ok: @@ -1716,7 +2114,7 @@ def _StartInstanceDisks(cfg, instance, force): if force is not None and not force: logger.Error("If the message above refers to a secondary node," " you can retry the operation using '--force'.") - raise errors.OpExecError, ("Disk consistency error") + raise errors.OpExecError("Disk consistency error") class LUDeactivateInstanceDisks(NoHooksLU): @@ -1734,8 +2132,8 @@ class LUDeactivateInstanceDisks(NoHooksLU): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): @@ -1746,12 +2144,12 @@ class LUDeactivateInstanceDisks(NoHooksLU): ins_l = rpc.call_instance_list([instance.primary_node]) ins_l = ins_l[instance.primary_node] if not type(ins_l) is list: - raise errors.OpExecError, ("Can't contact node '%s'" % - instance.primary_node) + raise errors.OpExecError("Can't contact node '%s'" % + instance.primary_node) if self.instance.name in ins_l: - raise errors.OpExecError, ("Instance is running, can't shutdown" - " block devices.") + raise errors.OpExecError("Instance is running, can't shutdown" + " block devices.") _ShutdownInstanceDisks(instance, self.cfg) @@ -1777,6 +2175,36 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False): return result +def _CheckNodeFreeMemory(cfg, node, reason, requested): + """Checks if a node has enough free memory. + + This function check if a given node has the needed amount of free + memory. In case the node has less memory or we cannot get the + information from the node, this function raise an OpPrereqError + exception. + + Args: + - cfg: a ConfigWriter instance + - node: the node name + - reason: string to use in the error message + - requested: the amount of memory in MiB + + """ + nodeinfo = rpc.call_node_info([node], cfg.GetVGName()) + if not nodeinfo or not isinstance(nodeinfo, dict): + raise errors.OpPrereqError("Could not contact node %s for resource" + " information" % (node,)) + + free_mem = nodeinfo[node].get('memory_free') + if not isinstance(free_mem, int): + raise errors.OpPrereqError("Can't compute free memory on node %s, result" + " was '%s'" % (node, free_mem)) + if requested > free_mem: + raise errors.OpPrereqError("Not enough memory on node %s for %s:" + " needed %s MiB, available %s MiB" % + (node, reason, requested, free_mem)) + + class LUStartupInstance(LogicalUnit): """Starts an instance. @@ -1808,15 +2236,15 @@ class LUStartupInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) # check bridges existance - brlist = [nic.bridge for nic in instance.nics] - if not rpc.call_bridges_exist(instance.primary_node, brlist): - raise errors.OpPrereqError, ("one or more target bridges %s does not" - " exist on destination node '%s'" % - (brlist, instance.primary_node)) + _CheckInstanceBridgesExist(instance) + + _CheckNodeFreeMemory(self.cfg, instance.primary_node, + "starting instance %s" % instance.name, + instance.memory) self.instance = instance self.op.instance_name = instance.name @@ -1829,27 +2257,89 @@ class LUStartupInstance(LogicalUnit): force = self.op.force extra_args = getattr(self.op, "extra_args", "") - node_current = instance.primary_node - - nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName()) - if not nodeinfo: - raise errors.OpExecError, ("Could not contact node %s for infos" % - (node_current)) + self.cfg.MarkInstanceUp(instance.name) - freememory = nodeinfo[node_current]['memory_free'] - memory = instance.memory - if memory > freememory: - raise errors.OpExecError, ("Not enough memory to start instance" - " %s on node %s" - " needed %s MiB, available %s MiB" % - (instance.name, node_current, memory, - freememory)) + node_current = instance.primary_node _StartInstanceDisks(self.cfg, instance, force) if not rpc.call_instance_start(node_current, instance, extra_args): _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError, ("Could not start instance") + raise errors.OpExecError("Could not start instance") + + +class LURebootInstance(LogicalUnit): + """Reboot an instance. + + """ + HPATH = "instance-reboot" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"] + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on master, primary and secondary nodes of the instance. + + """ + env = { + "IGNORE_SECONDARIES": self.op.ignore_secondaries, + } + env.update(_BuildInstanceHookEnvByObject(self.instance)) + nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + + list(self.instance.secondary_nodes)) + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + + # check bridges existance + _CheckInstanceBridgesExist(instance) + + self.instance = instance + self.op.instance_name = instance.name + + def Exec(self, feedback_fn): + """Reboot the instance. + + """ + instance = self.instance + ignore_secondaries = self.op.ignore_secondaries + reboot_type = self.op.reboot_type + extra_args = getattr(self.op, "extra_args", "") + + node_current = instance.primary_node + + if reboot_type not in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD, + constants.INSTANCE_REBOOT_FULL]: + raise errors.ParameterError("reboot type not in [%s, %s, %s]" % + (constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD, + constants.INSTANCE_REBOOT_FULL)) + + if reboot_type in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD]: + if not rpc.call_instance_reboot(node_current, instance, + reboot_type, extra_args): + raise errors.OpExecError("Could not reboot instance") + else: + if not rpc.call_instance_shutdown(node_current, instance): + raise errors.OpExecError("could not shutdown instance for full reboot") + _ShutdownInstanceDisks(instance, self.cfg) + _StartInstanceDisks(self.cfg, instance, ignore_secondaries) + if not rpc.call_instance_start(node_current, instance, extra_args): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance for full reboot") self.cfg.MarkInstanceUp(instance.name) @@ -1882,8 +2372,8 @@ class LUShutdownInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): @@ -1892,10 +2382,10 @@ class LUShutdownInstance(LogicalUnit): """ instance = self.instance node_current = instance.primary_node + self.cfg.MarkInstanceDown(instance.name) if not rpc.call_instance_shutdown(node_current, instance): logger.Error("could not shutdown instance") - self.cfg.MarkInstanceDown(instance.name) _ShutdownInstanceDisks(instance, self.cfg) @@ -1927,19 +2417,19 @@ class LUReinstallInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) if instance.disk_template == constants.DT_DISKLESS: - raise errors.OpPrereqError, ("Instance '%s' has no disks" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' has no disks" % + self.op.instance_name) if instance.status != "down": - raise errors.OpPrereqError, ("Instance '%s' is marked to be up" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' is marked to be up" % + self.op.instance_name) remote_info = rpc.call_instance_info(instance.primary_node, instance.name) if remote_info: - raise errors.OpPrereqError, ("Instance '%s' is running on the node %s" % - (self.op.instance_name, - instance.primary_node)) + raise errors.OpPrereqError("Instance '%s' is running on the node %s" % + (self.op.instance_name, + instance.primary_node)) self.op.os_type = getattr(self.op, "os_type", None) if self.op.os_type is not None: @@ -1947,12 +2437,12 @@ class LUReinstallInstance(LogicalUnit): pnode = self.cfg.GetNodeInfo( self.cfg.ExpandNodeName(instance.primary_node)) if pnode is None: - raise errors.OpPrereqError, ("Primary node '%s' is unknown" % - self.op.pnode) - os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name] - if not isinstance(os_obj, objects.OS): - raise errors.OpPrereqError, ("OS '%s' not in supported OS list for" - " primary node" % self.op.os_type) + raise errors.OpPrereqError("Primary node '%s' is unknown" % + self.op.pnode) + os_obj = rpc.call_os_get(pnode.name, self.op.os_type) + if not os_obj: + raise errors.OpPrereqError("OS '%s' not in supported OS list for" + " primary node" % self.op.os_type) self.instance = instance @@ -1971,9 +2461,113 @@ class LUReinstallInstance(LogicalUnit): try: feedback_fn("Running the instance OS create scripts...") if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"): - raise errors.OpExecError, ("Could not install OS for instance %s " - "on node %s" % - (inst.name, inst.primary_node)) + raise errors.OpExecError("Could not install OS for instance %s" + " on node %s" % + (inst.name, inst.primary_node)) + finally: + _ShutdownInstanceDisks(inst, self.cfg) + + +class LURenameInstance(LogicalUnit): + """Rename an instance. + + """ + HPATH = "instance-rename" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name", "new_name"] + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on master, primary and secondary nodes of the instance. + + """ + env = _BuildInstanceHookEnvByObject(self.instance) + env["INSTANCE_NEW_NAME"] = self.op.new_name + nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + + list(self.instance.secondary_nodes)) + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster and is not running. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + if instance.status != "down": + raise errors.OpPrereqError("Instance '%s' is marked to be up" % + self.op.instance_name) + remote_info = rpc.call_instance_info(instance.primary_node, instance.name) + if remote_info: + raise errors.OpPrereqError("Instance '%s' is running on the node %s" % + (self.op.instance_name, + instance.primary_node)) + self.instance = instance + + # new name verification + name_info = utils.HostInfo(self.op.new_name) + + self.op.new_name = new_name = name_info.name + instance_list = self.cfg.GetInstanceList() + if new_name in instance_list: + raise errors.OpPrereqError("Instance '%s' is already in the cluster" % + new_name) + + if not getattr(self.op, "ignore_ip", False): + command = ["fping", "-q", name_info.ip] + result = utils.RunCmd(command) + if not result.failed: + raise errors.OpPrereqError("IP %s of instance %s already in use" % + (name_info.ip, new_name)) + + + def Exec(self, feedback_fn): + """Reinstall the instance. + + """ + inst = self.instance + old_name = inst.name + + if inst.disk_template == constants.DT_FILE: + old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) + + self.cfg.RenameInstance(inst.name, self.op.new_name) + + # re-read the instance from the configuration after rename + inst = self.cfg.GetInstanceInfo(self.op.new_name) + + if inst.disk_template == constants.DT_FILE: + new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) + result = rpc.call_file_storage_dir_rename(inst.primary_node, + old_file_storage_dir, + new_file_storage_dir) + + if not result: + raise errors.OpExecError("Could not connect to node '%s' to rename" + " directory '%s' to '%s' (but the instance" + " has been renamed in Ganeti)" % ( + inst.primary_node, old_file_storage_dir, + new_file_storage_dir)) + + if not result[0]: + raise errors.OpExecError("Could not rename directory '%s' to '%s'" + " (but the instance has been renamed in" + " Ganeti)" % (old_file_storage_dir, + new_file_storage_dir)) + + _StartInstanceDisks(self.cfg, inst, None) + try: + if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name, + "sda", "sdb"): + msg = ("Could run OS rename script for instance %s on node %s (but the" + " instance has been renamed in Ganeti)" % + (inst.name, inst.primary_node)) + logger.Error(msg) finally: _ShutdownInstanceDisks(inst, self.cfg) @@ -1993,8 +2587,7 @@ class LURemoveInstance(LogicalUnit): """ env = _BuildInstanceHookEnvByObject(self.instance) - nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.sstore.GetMasterNode()] return env, nl, nl def CheckPrereq(self): @@ -2006,8 +2599,8 @@ class LURemoveInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): @@ -2019,12 +2612,19 @@ class LURemoveInstance(LogicalUnit): (instance.name, instance.primary_node)) if not rpc.call_instance_shutdown(instance.primary_node, instance): - raise errors.OpExecError, ("Could not shutdown instance %s on node %s" % + if self.op.ignore_failures: + feedback_fn("Warning: can't shutdown instance") + else: + raise errors.OpExecError("Could not shutdown instance %s on node %s" % (instance.name, instance.primary_node)) logger.Info("removing block devices for instance %s" % instance.name) - _RemoveDisks(instance, self.cfg) + if not _RemoveDisks(instance, self.cfg): + if self.op.ignore_failures: + feedback_fn("Warning: can't remove instance's disks") + else: + raise errors.OpExecError("Can't remove instance's disks") logger.Info("removing instance %s out of cluster config" % instance.name) @@ -2035,7 +2635,7 @@ class LUQueryInstances(NoHooksLU): """Logical unit for querying instances. """ - _OP_REQP = ["output_fields"] + _OP_REQP = ["output_fields", "names"] def CheckPrereq(self): """Check prerequisites. @@ -2043,18 +2643,21 @@ class LUQueryInstances(NoHooksLU): This checks that the fields required are valid output fields. """ - self.dynamic_fields = frozenset(["oper_state", "oper_ram"]) + self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"]) _CheckOutputFields(static=["name", "os", "pnode", "snodes", "admin_state", "admin_ram", - "disk_template", "ip", "mac", "bridge"], + "disk_template", "ip", "mac", "bridge", + "sda_size", "sdb_size", "vcpus"], dynamic=self.dynamic_fields, selected=self.op.output_fields) + self.wanted = _GetWantedInstances(self, self.op.names) + def Exec(self, feedback_fn): """Computes the list of nodes and their attributes. """ - instance_names = utils.NiceSort(self.cfg.GetInstanceList()) + instance_names = self.wanted instance_list = [self.cfg.GetInstanceInfo(iname) for iname in instance_names] @@ -2089,25 +2692,34 @@ class LUQueryInstances(NoHooksLU): elif field == "pnode": val = instance.primary_node elif field == "snodes": - val = ",".join(instance.secondary_nodes) or "-" + val = list(instance.secondary_nodes) elif field == "admin_state": - if instance.status == "down": - val = "no" - else: - val = "yes" + val = (instance.status != "down") elif field == "oper_state": if instance.primary_node in bad_nodes: - val = "(node down)" + val = None + else: + val = bool(live_data.get(instance.name)) + elif field == "status": + if instance.primary_node in bad_nodes: + val = "ERROR_nodedown" else: - if live_data.get(instance.name): - val = "running" + running = bool(live_data.get(instance.name)) + if running: + if instance.status != "down": + val = "running" + else: + val = "ERROR_up" else: - val = "stopped" + if instance.status != "down": + val = "ERROR_down" + else: + val = "ADMIN_down" elif field == "admin_ram": val = instance.memory elif field == "oper_ram": if instance.primary_node in bad_nodes: - val = "(node down)" + val = None elif instance.name in live_data: val = live_data[instance.name].get("memory", "?") else: @@ -2120,9 +2732,16 @@ class LUQueryInstances(NoHooksLU): val = instance.nics[0].bridge elif field == "mac": val = instance.nics[0].mac + elif field == "sda_size" or field == "sdb_size": + disk = instance.FindDisk(field[:3]) + if disk is None: + val = None + else: + val = disk.size + elif field == "vcpus": + val = instance.vcpus else: - raise errors.ParameterError, field - val = str(val) + raise errors.ParameterError(field) iout.append(val) output.append(iout) @@ -2159,28 +2778,29 @@ class LUFailoverInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + + if instance.disk_template not in constants.DTS_NET_MIRROR: + raise errors.OpPrereqError("Instance's disk layout is not" + " network mirrored, cannot failover.") + + secondary_nodes = instance.secondary_nodes + if not secondary_nodes: + raise errors.ProgrammerError("no secondary node but using " + "DT_REMOTE_RAID1 template") + target_node = secondary_nodes[0] # check memory requirements on the secondary node - target_node = instance.secondary_nodes[0] - nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName()) - info = nodeinfo.get(target_node, None) - if not info: - raise errors.OpPrereqError, ("Cannot get current information" - " from node '%s'" % nodeinfo) - if instance.memory > info['memory_free']: - raise errors.OpPrereqError, ("Not enough memory on target node %s." - " %d MB available, %d MB required" % - (target_node, info['memory_free'], - instance.memory)) + _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" % + instance.name, instance.memory) # check bridge existance brlist = [nic.bridge for nic in instance.nics] - if not rpc.call_bridges_exist(instance.primary_node, brlist): - raise errors.OpPrereqError, ("one or more target bridges %s does not" - " exist on destination node '%s'" % - (brlist, instance.primary_node)) + if not rpc.call_bridges_exist(target_node, brlist): + raise errors.OpPrereqError("One or more target bridges %s does not" + " exist on destination node '%s'" % + (brlist, target_node)) self.instance = instance @@ -2200,60 +2820,51 @@ class LUFailoverInstance(LogicalUnit): for dev in instance.disks: # for remote_raid1, these are md over drbd if not _CheckDiskConsistency(self.cfg, dev, target_node, False): - if not self.op.ignore_consistency: - raise errors.OpExecError, ("Disk %s is degraded on target node," - " aborting failover." % dev.iv_name) - - feedback_fn("* checking target node resource availability") - nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName()) - - if not nodeinfo: - raise errors.OpExecError, ("Could not contact target node %s." % - target_node) - - free_memory = int(nodeinfo[target_node]['memory_free']) - memory = instance.memory - if memory > free_memory: - raise errors.OpExecError, ("Not enough memory to create instance %s on" - " node %s. needed %s MiB, available %s MiB" % - (instance.name, target_node, memory, - free_memory)) + if instance.status == "up" and not self.op.ignore_consistency: + raise errors.OpExecError("Disk %s is degraded on target node," + " aborting failover." % dev.iv_name) feedback_fn("* shutting down instance on source node") logger.Info("Shutting down instance %s on node %s" % (instance.name, source_node)) if not rpc.call_instance_shutdown(source_node, instance): - logger.Error("Could not shutdown instance %s on node %s. Proceeding" - " anyway. Please make sure node %s is down" % - (instance.name, source_node, source_node)) + if self.op.ignore_consistency: + logger.Error("Could not shutdown instance %s on node %s. Proceeding" + " anyway. Please make sure node %s is down" % + (instance.name, source_node, source_node)) + else: + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, source_node)) feedback_fn("* deactivating the instance's disks on source node") if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True): - raise errors.OpExecError, ("Can't shut down the instance's disks.") + raise errors.OpExecError("Can't shut down the instance's disks.") instance.primary_node = target_node # distribute new instance config to the other nodes self.cfg.AddInstance(instance) - feedback_fn("* activating the instance's disks on target node") - logger.Info("Starting instance %s on node %s" % - (instance.name, target_node)) + # Only start the instance if it's marked as up + if instance.status == "up": + feedback_fn("* activating the instance's disks on target node") + logger.Info("Starting instance %s on node %s" % + (instance.name, target_node)) - disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg, - ignore_secondaries=True) - if not disks_ok: - _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError, ("Can't activate the instance's disks") + disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg, + ignore_secondaries=True) + if not disks_ok: + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Can't activate the instance's disks") - feedback_fn("* starting the instance on the target node") - if not rpc.call_instance_start(target_node, instance, None): - _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError("Could not start instance %s on node %s." % - (instance.name, target_node)) + feedback_fn("* starting the instance on the target node") + if not rpc.call_instance_start(target_node, instance, None): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance %s on node %s." % + (instance.name, target_node)) -def _CreateBlockDevOnPrimary(cfg, node, device, info): +def _CreateBlockDevOnPrimary(cfg, node, instance, device, info): """Create a tree of block devices on the primary node. This always creates all devices. @@ -2261,11 +2872,12 @@ def _CreateBlockDevOnPrimary(cfg, node, device, info): """ if device.children: for child in device.children: - if not _CreateBlockDevOnPrimary(cfg, node, child, info): + if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info): return False cfg.SetDiskID(device, node) - new_id = rpc.call_blockdev_create(node, device, device.size, True, info) + new_id = rpc.call_blockdev_create(node, device, device.size, + instance.name, True, info) if not new_id: return False if device.physical_id is None: @@ -2273,7 +2885,7 @@ def _CreateBlockDevOnPrimary(cfg, node, device, info): return True -def _CreateBlockDevOnSecondary(cfg, node, device, force, info): +def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info): """Create a tree of block devices on a secondary node. If this device type has to be created on secondaries, create it and @@ -2286,13 +2898,15 @@ def _CreateBlockDevOnSecondary(cfg, node, device, force, info): force = True if device.children: for child in device.children: - if not _CreateBlockDevOnSecondary(cfg, node, child, force, info): + if not _CreateBlockDevOnSecondary(cfg, node, instance, + child, force, info): return False if not force: return True cfg.SetDiskID(device, node) - new_id = rpc.call_blockdev_create(node, device, device.size, False, info) + new_id = rpc.call_blockdev_create(node, device, device.size, + instance.name, False, info) if not new_id: return False if device.physical_id is None: @@ -2319,82 +2933,88 @@ def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names): """ port = cfg.AllocatePort() vgname = cfg.GetVGName() - dev_data = objects.Disk(dev_type="lvm", size=size, + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, logical_id=(vgname, names[0])) - dev_meta = objects.Disk(dev_type="lvm", size=128, + dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, logical_id=(vgname, names[1])) - drbd_dev = objects.Disk(dev_type="drbd", size=size, + drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size, logical_id = (primary, secondary, port), children = [dev_data, dev_meta]) return drbd_dev +def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name): + """Generate a drbd8 device complete with its children. + + """ + port = cfg.AllocatePort() + vgname = cfg.GetVGName() + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, + logical_id = (primary, secondary, port), + children = [dev_data, dev_meta], + iv_name=iv_name) + return drbd_dev + + def _GenerateDiskTemplate(cfg, template_name, instance_name, primary_node, - secondary_nodes, disk_sz, swap_sz): + secondary_nodes, disk_sz, swap_sz, + file_storage_dir, file_driver): """Generate the entire disk layout for a given template type. """ #TODO: compute space requirements vgname = cfg.GetVGName() - if template_name == "diskless": + if template_name == constants.DT_DISKLESS: disks = [] - elif template_name == "plain": + elif template_name == constants.DT_PLAIN: if len(secondary_nodes) != 0: raise errors.ProgrammerError("Wrong template configuration") names = _GenerateUniqueNames(cfg, [".sda", ".sdb"]) - sda_dev = objects.Disk(dev_type="lvm", size=disk_sz, + sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz, logical_id=(vgname, names[0]), iv_name = "sda") - sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz, + sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz, logical_id=(vgname, names[1]), iv_name = "sdb") disks = [sda_dev, sdb_dev] - elif template_name == "local_raid1": - if len(secondary_nodes) != 0: - raise errors.ProgrammerError("Wrong template configuration") - - - names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2", - ".sdb_m1", ".sdb_m2"]) - sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, names[0])) - sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, names[1])) - md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda", - size=disk_sz, - children = [sda_dev_m1, sda_dev_m2]) - sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, names[2])) - sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, names[3])) - md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb", - size=swap_sz, - children = [sdb_dev_m1, sdb_dev_m2]) - disks = [md_sda_dev, md_sdb_dev] - elif template_name == "remote_raid1": + elif template_name == constants.DT_DRBD8: if len(secondary_nodes) != 1: raise errors.ProgrammerError("Wrong template configuration") remote_node = secondary_nodes[0] names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta", ".sdb_data", ".sdb_meta"]) - drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node, - disk_sz, names[0:2]) - md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda", - children = [drbd_sda_dev], size=disk_sz) - drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node, - swap_sz, names[2:4]) - md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb", - children = [drbd_sdb_dev], size=swap_sz) - disks = [md_sda_dev, md_sdb_dev] + drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node, + disk_sz, names[0:2], "sda") + drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node, + swap_sz, names[2:4], "sdb") + disks = [drbd_sda_dev, drbd_sdb_dev] + elif template_name == constants.DT_FILE: + if len(secondary_nodes) != 0: + raise errors.ProgrammerError("Wrong template configuration") + + file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz, + iv_name="sda", logical_id=(file_driver, + "%s/sda" % file_storage_dir)) + file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz, + iv_name="sdb", logical_id=(file_driver, + "%s/sdb" % file_storage_dir)) + disks = [file_sda_dev, file_sdb_dev] else: raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) return disks def _GetInstanceInfoText(instance): + """Compute that text that should be added to the disk's metadata. + + """ return "originstname+%s" % instance.name @@ -2412,21 +3032,36 @@ def _CreateDisks(cfg, instance): """ info = _GetInstanceInfoText(instance) + if instance.disk_template == constants.DT_FILE: + file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) + result = rpc.call_file_storage_dir_create(instance.primary_node, + file_storage_dir) + + if not result: + logger.Error("Could not connect to node '%s'" % instance.primary_node) + return False + + if not result[0]: + logger.Error("failed to create directory '%s'" % file_storage_dir) + return False + for device in instance.disks: logger.Info("creating volume %s for instance %s" % - (device.iv_name, instance.name)) + (device.iv_name, instance.name)) #HARDCODE for secondary_node in instance.secondary_nodes: - if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False, - info): + if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance, + device, False, info): logger.Error("failed to create volume %s (%s) on secondary node %s!" % (device.iv_name, device, secondary_node)) return False #HARDCODE - if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info): + if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, + instance, device, info): logger.Error("failed to create volume %s on primary!" % device.iv_name) return False + return True @@ -2435,7 +3070,7 @@ def _RemoveDisks(instance, cfg): This abstracts away some work from `AddInstance()` and `RemoveInstance()`. Note that in case some of the devices couldn't - be remove, the removal will continue with the other ones (compare + be removed, the removal will continue with the other ones (compare with `_CreateDisks()`). Args: @@ -2456,18 +3091,85 @@ def _RemoveDisks(instance, cfg): " continuing anyway" % (device.iv_name, node)) result = False + + if instance.disk_template == constants.DT_FILE: + file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) + if not rpc.call_file_storage_dir_remove(instance.primary_node, + file_storage_dir): + logger.Error("could not remove directory '%s'" % file_storage_dir) + result = False + return result +def _ComputeDiskSize(disk_template, disk_size, swap_size): + """Compute disk size requirements in the volume group + + This is currently hard-coded for the two-drive layout. + + """ + # Required free disk space as a function of disk and swap space + req_size_dict = { + constants.DT_DISKLESS: None, + constants.DT_PLAIN: disk_size + swap_size, + # 256 MB are added for drbd metadata, 128MB for each drbd device + constants.DT_DRBD8: disk_size + swap_size + 256, + constants.DT_FILE: None, + } + + if disk_template not in req_size_dict: + raise errors.ProgrammerError("Disk template '%s' size requirement" + " is unknown" % disk_template) + + return req_size_dict[disk_template] + + class LUCreateInstance(LogicalUnit): """Create an instance. """ HPATH = "instance-add" HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode", + _OP_REQP = ["instance_name", "mem_size", "disk_size", "disk_template", "swap_size", "mode", "start", "vcpus", - "wait_for_sync"] + "wait_for_sync", "ip_check", "mac"] + + def _RunAllocator(self): + """Run the allocator based on input opcode. + + """ + disks = [{"size": self.op.disk_size, "mode": "w"}, + {"size": self.op.swap_size, "mode": "w"}] + nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None), + "bridge": self.op.bridge}] + ial = IAllocator(self.cfg, self.sstore, + name=self.op.instance_name, + disk_template=self.op.disk_template, + tags=[], + os=self.op.os_type, + vcpus=self.op.vcpus, + mem_size=self.op.mem_size, + disks=disks, + nics=nics, + mode=constants.IALLOCATOR_MODE_ALLOC) + + ial.Run(self.op.iallocator) + + if not ial.success: + raise errors.OpPrereqError("Can't compute nodes using" + " iallocator '%s': %s" % (self.op.iallocator, + ial.info)) + if len(ial.nodes) != ial.required_nodes: + raise errors.OpPrereqError("iallocator '%s' returned invalid number" + " of nodes (%s), required %s" % + (len(ial.nodes), ial.required_nodes)) + self.op.pnode = ial.nodes[0] + logger.ToStdout("Selected nodes for the instance: %s" % + (", ".join(ial.nodes),)) + logger.Info("Selected nodes for instance %s via iallocator %s: %s" % + (self.op.instance_name, self.op.iallocator, ial.nodes)) + if ial.required_nodes == 2: + self.op.snode = ial.nodes[1] def BuildHooksEnv(self): """Build hooks env. @@ -2493,7 +3195,7 @@ class LUCreateInstance(LogicalUnit): os_type=self.op.os_type, memory=self.op.mem_size, vcpus=self.op.vcpus, - nics=[(self.inst_ip, self.op.bridge)], + nics=[(self.inst_ip, self.op.bridge, self.op.mac)], )) nl = ([self.sstore.GetMasterNode(), self.op.pnode] + @@ -2505,41 +3207,52 @@ class LUCreateInstance(LogicalUnit): """Check prerequisites. """ + # set optional parameters to none if they don't exist + for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode", + "iallocator"]: + if not hasattr(self.op, attr): + setattr(self.op, attr, None) + if self.op.mode not in (constants.INSTANCE_CREATE, constants.INSTANCE_IMPORT): - raise errors.OpPrereqError, ("Invalid instance creation mode '%s'" % - self.op.mode) + raise errors.OpPrereqError("Invalid instance creation mode '%s'" % + self.op.mode) + + if (not self.cfg.GetVGName() and + self.op.disk_template not in constants.DTS_NOT_LVM): + raise errors.OpPrereqError("Cluster does not support lvm-based" + " instances") if self.op.mode == constants.INSTANCE_IMPORT: src_node = getattr(self.op, "src_node", None) src_path = getattr(self.op, "src_path", None) if src_node is None or src_path is None: - raise errors.OpPrereqError, ("Importing an instance requires source" - " node and path options") + raise errors.OpPrereqError("Importing an instance requires source" + " node and path options") src_node_full = self.cfg.ExpandNodeName(src_node) if src_node_full is None: - raise errors.OpPrereqError, ("Unknown source node '%s'" % src_node) + raise errors.OpPrereqError("Unknown source node '%s'" % src_node) self.op.src_node = src_node = src_node_full if not os.path.isabs(src_path): - raise errors.OpPrereqError, ("The source path must be absolute") + raise errors.OpPrereqError("The source path must be absolute") export_info = rpc.call_export_info(src_node, src_path) if not export_info: - raise errors.OpPrereqError, ("No export found in dir %s" % src_path) + raise errors.OpPrereqError("No export found in dir %s" % src_path) if not export_info.has_section(constants.INISECT_EXP): - raise errors.ProgrammerError, ("Corrupted export config") + raise errors.ProgrammerError("Corrupted export config") ei_version = export_info.get(constants.INISECT_EXP, 'version') if (int(ei_version) != constants.EXPORT_VERSION): - raise errors.OpPrereqError, ("Wrong export version %s (wanted %d)" % - (ei_version, constants.EXPORT_VERSION)) + raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % + (ei_version, constants.EXPORT_VERSION)) if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1: - raise errors.OpPrereqError, ("Can't import instance with more than" - " one data disk") + raise errors.OpPrereqError("Can't import instance with more than" + " one data disk") # FIXME: are the old os-es, disk sizes, etc. useful? self.op.os_type = export_info.get(constants.INISECT_EXP, 'os') @@ -2548,98 +3261,50 @@ class LUCreateInstance(LogicalUnit): self.src_image = diskimage else: # INSTANCE_CREATE if getattr(self.op, "os_type", None) is None: - raise errors.OpPrereqError, ("No guest OS specified") + raise errors.OpPrereqError("No guest OS specified") + + #### instance parameters check - # check primary node - pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode)) - if pnode is None: - raise errors.OpPrereqError, ("Primary node '%s' is unknown" % - self.op.pnode) - self.op.pnode = pnode.name - self.pnode = pnode - self.secondaries = [] # disk template and mirror node verification if self.op.disk_template not in constants.DISK_TEMPLATES: - raise errors.OpPrereqError, ("Invalid disk template name") - - if self.op.disk_template == constants.DT_REMOTE_RAID1: - if getattr(self.op, "snode", None) is None: - raise errors.OpPrereqError, ("The 'remote_raid1' disk template needs" - " a mirror node") - - snode_name = self.cfg.ExpandNodeName(self.op.snode) - if snode_name is None: - raise errors.OpPrereqError, ("Unknown secondary node '%s'" % - self.op.snode) - elif snode_name == pnode.name: - raise errors.OpPrereqError, ("The secondary node cannot be" - " the primary node.") - self.secondaries.append(snode_name) - - # Check lv size requirements - nodenames = [pnode.name] + self.secondaries - nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) - - # Required free disk space as a function of disk and swap space - req_size_dict = { - constants.DT_DISKLESS: 0, - constants.DT_PLAIN: self.op.disk_size + self.op.swap_size, - constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2, - # 256 MB are added for drbd metadata, 128MB for each drbd device - constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256, - } - - if self.op.disk_template not in req_size_dict: - raise errors.ProgrammerError, ("Disk template '%s' size requirement" - " is unknown" % self.op.disk_template) - - req_size = req_size_dict[self.op.disk_template] - - for node in nodenames: - info = nodeinfo.get(node, None) - if not info: - raise errors.OpPrereqError, ("Cannot get current information" - " from node '%s'" % nodeinfo) - if req_size > info['vg_free']: - raise errors.OpPrereqError, ("Not enough disk space on target node %s." - " %d MB available, %d MB required" % - (node, info['vg_free'], req_size)) - - # os verification - os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name] - if not isinstance(os_obj, objects.OS): - raise errors.OpPrereqError, ("OS '%s' not in supported os list for" - " primary node" % self.op.os_type) + raise errors.OpPrereqError("Invalid disk template name") - # instance verification - hostname1 = utils.LookupHostname(self.op.instance_name) - if not hostname1: - raise errors.OpPrereqError, ("Instance name '%s' not found in dns" % - self.op.instance_name) + # instance name verification + hostname1 = utils.HostInfo(self.op.instance_name) - self.op.instance_name = instance_name = hostname1['hostname'] + self.op.instance_name = instance_name = hostname1.name instance_list = self.cfg.GetInstanceList() if instance_name in instance_list: - raise errors.OpPrereqError, ("Instance '%s' is already in the cluster" % - instance_name) + raise errors.OpPrereqError("Instance '%s' is already in the cluster" % + instance_name) + # ip validity checks ip = getattr(self.op, "ip", None) if ip is None or ip.lower() == "none": inst_ip = None elif ip.lower() == "auto": - inst_ip = hostname1['ip'] + inst_ip = hostname1.ip else: if not utils.IsValidIP(ip): - raise errors.OpPrereqError, ("given IP address '%s' doesn't look" - " like a valid IP" % ip) + raise errors.OpPrereqError("given IP address '%s' doesn't look" + " like a valid IP" % ip) inst_ip = ip - self.inst_ip = inst_ip + self.inst_ip = self.op.ip = inst_ip + + if self.op.start and not self.op.ip_check: + raise errors.OpPrereqError("Cannot ignore IP address conflicts when" + " adding an instance in start mode") + + if self.op.ip_check: + if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT): + raise errors.OpPrereqError("IP %s of instance %s already in use" % + (hostname1.ip, instance_name)) - command = ["fping", "-q", hostname1['ip']] - result = utils.RunCmd(command) - if not result.failed: - raise errors.OpPrereqError, ("IP %s of instance %s already in use" % - (hostname1['ip'], instance_name)) + # MAC address verification + if self.op.mac != "auto": + if not utils.IsValidMac(self.op.mac.lower()): + raise errors.OpPrereqError("invalid MAC address specified: %s" % + self.op.mac) # bridge verification bridge = getattr(self.op, "bridge", None) @@ -2648,10 +3313,91 @@ class LUCreateInstance(LogicalUnit): else: self.op.bridge = bridge + # boot order verification + if self.op.hvm_boot_order is not None: + if len(self.op.hvm_boot_order.strip("acdn")) != 0: + raise errors.OpPrereqError("invalid boot order specified," + " must be one or more of [acdn]") + # file storage checks + if (self.op.file_driver and + not self.op.file_driver in constants.FILE_DRIVER): + raise errors.OpPrereqError("Invalid file driver name '%s'" % + self.op.file_driver) + + if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir): + raise errors.OpPrereqError("File storage directory not a relative" + " path") + #### allocator run + + if [self.op.iallocator, self.op.pnode].count(None) != 1: + raise errors.OpPrereqError("One and only one of iallocator and primary" + " node must be given") + + if self.op.iallocator is not None: + self._RunAllocator() + + #### node related checks + + # check primary node + pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode)) + if pnode is None: + raise errors.OpPrereqError("Primary node '%s' is unknown" % + self.op.pnode) + self.op.pnode = pnode.name + self.pnode = pnode + self.secondaries = [] + + # mirror node verification + if self.op.disk_template in constants.DTS_NET_MIRROR: + if getattr(self.op, "snode", None) is None: + raise errors.OpPrereqError("The networked disk templates need" + " a mirror node") + + snode_name = self.cfg.ExpandNodeName(self.op.snode) + if snode_name is None: + raise errors.OpPrereqError("Unknown secondary node '%s'" % + self.op.snode) + elif snode_name == pnode.name: + raise errors.OpPrereqError("The secondary node cannot be" + " the primary node.") + self.secondaries.append(snode_name) + + req_size = _ComputeDiskSize(self.op.disk_template, + self.op.disk_size, self.op.swap_size) + + # Check lv size requirements + if req_size is not None: + nodenames = [pnode.name] + self.secondaries + nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) + for node in nodenames: + info = nodeinfo.get(node, None) + if not info: + raise errors.OpPrereqError("Cannot get current information" + " from node '%s'" % nodeinfo) + vg_free = info.get('vg_free', None) + if not isinstance(vg_free, int): + raise errors.OpPrereqError("Can't compute free disk space on" + " node %s" % node) + if req_size > info['vg_free']: + raise errors.OpPrereqError("Not enough disk space on target node %s." + " %d MB available, %d MB required" % + (node, info['vg_free'], req_size)) + + # os verification + os_obj = rpc.call_os_get(pnode.name, self.op.os_type) + if not os_obj: + raise errors.OpPrereqError("OS '%s' not in supported os list for" + " primary node" % self.op.os_type) + + if self.op.kernel_path == constants.VALUE_NONE: + raise errors.OpPrereqError("Can't set instance kernel to none") + + + # bridge check on primary node if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]): - raise errors.OpPrereqError, ("target bridge '%s' does not exist on" - " destination node '%s'" % - (self.op.bridge, pnode.name)) + raise errors.OpPrereqError("target bridge '%s' does not exist on" + " destination node '%s'" % + (self.op.bridge, pnode.name)) if self.op.start: self.instance_status = 'up' @@ -2665,15 +3411,40 @@ class LUCreateInstance(LogicalUnit): instance = self.op.instance_name pnode_name = self.pnode.name - nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC()) + if self.op.mac == "auto": + mac_address = self.cfg.GenerateMAC() + else: + mac_address = self.op.mac + + nic = objects.NIC(bridge=self.op.bridge, mac=mac_address) if self.inst_ip is not None: nic.ip = self.inst_ip - disks = _GenerateDiskTemplate(self.cfg, + ht_kind = self.sstore.GetHypervisorType() + if ht_kind in constants.HTS_REQ_PORT: + network_port = self.cfg.AllocatePort() + else: + network_port = None + + # this is needed because os.path.join does not accept None arguments + if self.op.file_storage_dir is None: + string_file_storage_dir = "" + else: + string_file_storage_dir = self.op.file_storage_dir + + # build the full file storage dir path + file_storage_dir = os.path.normpath(os.path.join( + self.sstore.GetFileStorageDir(), + string_file_storage_dir, instance)) + + + disks = _GenerateDiskTemplate(self.cfg, self.op.disk_template, instance, pnode_name, self.secondaries, self.op.disk_size, - self.op.swap_size) + self.op.swap_size, + file_storage_dir, + self.op.file_driver) iobj = objects.Instance(name=instance, os=self.op.os_type, primary_node=pnode_name, @@ -2682,32 +3453,36 @@ class LUCreateInstance(LogicalUnit): nics=[nic], disks=disks, disk_template=self.op.disk_template, status=self.instance_status, + network_port=network_port, + kernel_path=self.op.kernel_path, + initrd_path=self.op.initrd_path, + hvm_boot_order=self.op.hvm_boot_order, ) feedback_fn("* creating instance disks...") if not _CreateDisks(self.cfg, iobj): _RemoveDisks(iobj, self.cfg) - raise errors.OpExecError, ("Device creation failed, reverting...") + raise errors.OpExecError("Device creation failed, reverting...") feedback_fn("adding instance %s to cluster config" % instance) self.cfg.AddInstance(iobj) if self.op.wait_for_sync: - disk_abort = not _WaitForSync(self.cfg, iobj) - elif iobj.disk_template == "remote_raid1": + disk_abort = not _WaitForSync(self.cfg, iobj, self.proc) + elif iobj.disk_template in constants.DTS_NET_MIRROR: # make sure the disks are not degraded (still sync-ing is ok) time.sleep(15) feedback_fn("* checking mirrors status") - disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True) + disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True) else: disk_abort = False if disk_abort: _RemoveDisks(iobj, self.cfg) self.cfg.RemoveInstance(iobj.name) - raise errors.OpExecError, ("There are some degraded disks for" - " this instance") + raise errors.OpExecError("There are some degraded disks for" + " this instance") feedback_fn("creating os for instance %s on node %s" % (instance, pnode_name)) @@ -2716,9 +3491,9 @@ class LUCreateInstance(LogicalUnit): if self.op.mode == constants.INSTANCE_CREATE: feedback_fn("* running the instance OS create scripts...") if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"): - raise errors.OpExecError, ("could not add os for instance %s" - " on node %s" % - (instance, pnode_name)) + raise errors.OpExecError("could not add os for instance %s" + " on node %s" % + (instance, pnode_name)) elif self.op.mode == constants.INSTANCE_IMPORT: feedback_fn("* running the instance OS import scripts...") @@ -2726,19 +3501,19 @@ class LUCreateInstance(LogicalUnit): src_image = self.src_image if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb", src_node, src_image): - raise errors.OpExecError, ("Could not import os for instance" - " %s on node %s" % - (instance, pnode_name)) + raise errors.OpExecError("Could not import os for instance" + " %s on node %s" % + (instance, pnode_name)) else: # also checked in the prereq part - raise errors.ProgrammerError, ("Unknown OS initialization mode '%s'" - % self.op.mode) + raise errors.ProgrammerError("Unknown OS initialization mode '%s'" + % self.op.mode) if self.op.start: logger.Info("starting instance %s on node %s" % (instance, pnode_name)) feedback_fn("* starting instance...") if not rpc.call_instance_start(pnode_name, iobj, None): - raise errors.OpExecError, ("Could not start instance") + raise errors.OpExecError("Could not start instance") class LUConnectConsole(NoHooksLU): @@ -2760,8 +3535,8 @@ class LUConnectConsole(NoHooksLU): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): @@ -2773,215 +3548,18 @@ class LUConnectConsole(NoHooksLU): node_insts = rpc.call_instance_list([node])[node] if node_insts is False: - raise errors.OpExecError, ("Can't connect to node %s." % node) + raise errors.OpExecError("Can't connect to node %s." % node) if instance.name not in node_insts: - raise errors.OpExecError, ("Instance %s is not running." % instance.name) + raise errors.OpExecError("Instance %s is not running." % instance.name) logger.Debug("connecting to console of %s on %s" % (instance.name, node)) hyper = hypervisor.GetHypervisor() - console_cmd = hyper.GetShellCommandForConsole(instance.name) - return node, console_cmd - - -class LUAddMDDRBDComponent(LogicalUnit): - """Adda new mirror member to an instance's disk. - - """ - HPATH = "mirror-add" - HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name", "remote_node", "disk_name"] - - def BuildHooksEnv(self): - """Build hooks env. - - This runs on the master, the primary and all the secondaries. - - """ - env = { - "NEW_SECONDARY": self.op.remote_node, - "DISK_NAME": self.op.disk_name, - } - env.update(_BuildInstanceHookEnvByObject(self.instance)) - nl = [self.sstore.GetMasterNode(), self.instance.primary_node, - self.op.remote_node,] + list(self.instance.secondary_nodes) - return env, nl, nl - - def CheckPrereq(self): - """Check prerequisites. - - This checks that the instance is in the cluster. - - """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) - self.instance = instance - - remote_node = self.cfg.ExpandNodeName(self.op.remote_node) - if remote_node is None: - raise errors.OpPrereqError, ("Node '%s' not known" % self.op.remote_node) - self.remote_node = remote_node - - if remote_node == instance.primary_node: - raise errors.OpPrereqError, ("The specified node is the primary node of" - " the instance.") - - if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError, ("Instance's disk layout is not" - " remote_raid1.") - for disk in instance.disks: - if disk.iv_name == self.op.disk_name: - break - else: - raise errors.OpPrereqError, ("Can't find this device ('%s') in the" - " instance." % self.op.disk_name) - if len(disk.children) > 1: - raise errors.OpPrereqError, ("The device already has two slave" - " devices.\n" - "This would create a 3-disk raid1" - " which we don't allow.") - self.disk = disk - - def Exec(self, feedback_fn): - """Add the mirror component - - """ - disk = self.disk - instance = self.instance - - remote_node = self.remote_node - lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]] - names = _GenerateUniqueNames(self.cfg, lv_names) - new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node, - remote_node, disk.size, names) - - logger.Info("adding new mirror component on secondary") - #HARDCODE - if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False, - _GetInstanceInfoText(instance)): - raise errors.OpExecError, ("Failed to create new component on secondary" - " node %s" % remote_node) - - logger.Info("adding new mirror component on primary") - #HARDCODE - if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd, - _GetInstanceInfoText(instance)): - # remove secondary dev - self.cfg.SetDiskID(new_drbd, remote_node) - rpc.call_blockdev_remove(remote_node, new_drbd) - raise errors.OpExecError, ("Failed to create volume on primary") - - # the device exists now - # call the primary node to add the mirror to md - logger.Info("adding new mirror component to md") - if not rpc.call_blockdev_addchild(instance.primary_node, - disk, new_drbd): - logger.Error("Can't add mirror compoment to md!") - self.cfg.SetDiskID(new_drbd, remote_node) - if not rpc.call_blockdev_remove(remote_node, new_drbd): - logger.Error("Can't rollback on secondary") - self.cfg.SetDiskID(new_drbd, instance.primary_node) - if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): - logger.Error("Can't rollback on primary") - raise errors.OpExecError, "Can't add mirror component to md array" - - disk.children.append(new_drbd) - - self.cfg.AddInstance(instance) - - _WaitForSync(self.cfg, instance) - - return 0 - - -class LURemoveMDDRBDComponent(LogicalUnit): - """Remove a component from a remote_raid1 disk. - - """ - HPATH = "mirror-remove" - HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name", "disk_name", "disk_id"] - - def BuildHooksEnv(self): - """Build hooks env. + console_cmd = hyper.GetShellCommandForConsole(instance) - This runs on the master, the primary and all the secondaries. - - """ - env = { - "DISK_NAME": self.op.disk_name, - "DISK_ID": self.op.disk_id, - "OLD_SECONDARY": self.old_secondary, - } - env.update(_BuildInstanceHookEnvByObject(self.instance)) - nl = [self.sstore.GetMasterNode(), - self.instance.primary_node] + list(self.instance.secondary_nodes) - return env, nl, nl - - def CheckPrereq(self): - """Check prerequisites. - - This checks that the instance is in the cluster. - - """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) - self.instance = instance - - if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError, ("Instance's disk layout is not" - " remote_raid1.") - for disk in instance.disks: - if disk.iv_name == self.op.disk_name: - break - else: - raise errors.OpPrereqError, ("Can't find this device ('%s') in the" - " instance." % self.op.disk_name) - for child in disk.children: - if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id: - break - else: - raise errors.OpPrereqError, ("Can't find the device with this port.") - - if len(disk.children) < 2: - raise errors.OpPrereqError, ("Cannot remove the last component from" - " a mirror.") - self.disk = disk - self.child = child - if self.child.logical_id[0] == instance.primary_node: - oid = 1 - else: - oid = 0 - self.old_secondary = self.child.logical_id[oid] - - def Exec(self, feedback_fn): - """Remove the mirror component - - """ - instance = self.instance - disk = self.disk - child = self.child - logger.Info("remove mirror component") - self.cfg.SetDiskID(disk, instance.primary_node) - if not rpc.call_blockdev_removechild(instance.primary_node, - disk, child): - raise errors.OpExecError, ("Can't remove child from mirror.") - - for node in child.logical_id[:2]: - self.cfg.SetDiskID(child, node) - if not rpc.call_blockdev_remove(node, child): - logger.Error("Warning: failed to remove device from node %s," - " continuing operation." % node) - - disk.children.remove(child) - self.cfg.AddInstance(instance) + # build ssh cmdline + return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True) class LUReplaceDisks(LogicalUnit): @@ -2990,7 +3568,7 @@ class LUReplaceDisks(LogicalUnit): """ HPATH = "mirrors-replace" HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name"] + _OP_REQP = ["instance_name", "mode", "disks"] def BuildHooksEnv(self): """Build hooks env. @@ -2999,12 +3577,17 @@ class LUReplaceDisks(LogicalUnit): """ env = { + "MODE": self.op.mode, "NEW_SECONDARY": self.op.remote_node, "OLD_SECONDARY": self.instance.secondary_nodes[0], } env.update(_BuildInstanceHookEnvByObject(self.instance)) - nl = [self.sstore.GetMasterNode(), - self.instance.primary_node] + list(self.instance.secondary_nodes) + nl = [ + self.sstore.GetMasterNode(), + self.instance.primary_node, + ] + if self.op.remote_node is not None: + nl.append(self.op.remote_node) return env, nl, nl def CheckPrereq(self): @@ -3016,42 +3599,90 @@ class LUReplaceDisks(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance + self.op.instance_name = instance.name - if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError, ("Instance's disk layout is not" - " remote_raid1.") + if instance.disk_template not in constants.DTS_NET_MIRROR: + raise errors.OpPrereqError("Instance's disk layout is not" + " network mirrored.") if len(instance.secondary_nodes) != 1: - raise errors.OpPrereqError, ("The instance has a strange layout," - " expected one secondary but found %d" % - len(instance.secondary_nodes)) + raise errors.OpPrereqError("The instance has a strange layout," + " expected one secondary but found %d" % + len(instance.secondary_nodes)) + + self.sec_node = instance.secondary_nodes[0] remote_node = getattr(self.op, "remote_node", None) - if remote_node is None: - remote_node = instance.secondary_nodes[0] - else: + if remote_node is not None: remote_node = self.cfg.ExpandNodeName(remote_node) if remote_node is None: - raise errors.OpPrereqError, ("Node '%s' not known" % - self.op.remote_node) + raise errors.OpPrereqError("Node '%s' not known" % + self.op.remote_node) + self.remote_node_info = self.cfg.GetNodeInfo(remote_node) + else: + self.remote_node_info = None if remote_node == instance.primary_node: - raise errors.OpPrereqError, ("The specified node is the primary node of" - " the instance.") + raise errors.OpPrereqError("The specified node is the primary node of" + " the instance.") + elif remote_node == self.sec_node: + if self.op.mode == constants.REPLACE_DISK_SEC: + # this is for DRBD8, where we can't execute the same mode of + # replacement as for drbd7 (no different port allocated) + raise errors.OpPrereqError("Same secondary given, cannot execute" + " replacement") + # the user gave the current secondary, switch to + # 'no-replace-secondary' mode for drbd7 + remote_node = None + if (instance.disk_template == constants.DT_REMOTE_RAID1 and + self.op.mode != constants.REPLACE_DISK_ALL): + raise errors.OpPrereqError("Template 'remote_raid1' only allows all" + " disks replacement, not individual ones") + if instance.disk_template == constants.DT_DRBD8: + if (self.op.mode == constants.REPLACE_DISK_ALL and + remote_node is not None): + # switch to replace secondary mode + self.op.mode = constants.REPLACE_DISK_SEC + + if self.op.mode == constants.REPLACE_DISK_ALL: + raise errors.OpPrereqError("Template 'drbd' only allows primary or" + " secondary disk replacement, not" + " both at once") + elif self.op.mode == constants.REPLACE_DISK_PRI: + if remote_node is not None: + raise errors.OpPrereqError("Template 'drbd' does not allow changing" + " the secondary while doing a primary" + " node disk replacement") + self.tgt_node = instance.primary_node + self.oth_node = instance.secondary_nodes[0] + elif self.op.mode == constants.REPLACE_DISK_SEC: + self.new_node = remote_node # this can be None, in which case + # we don't change the secondary + self.tgt_node = instance.secondary_nodes[0] + self.oth_node = instance.primary_node + else: + raise errors.ProgrammerError("Unhandled disk replace mode") + + for name in self.op.disks: + if instance.FindDisk(name) is None: + raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" % + (name, instance.name)) self.op.remote_node = remote_node - def Exec(self, feedback_fn): + def _ExecRR1(self, feedback_fn): """Replace the disks of an instance. """ instance = self.instance iv_names = {} # start of work - remote_node = self.op.remote_node + if self.op.remote_node is None: + remote_node = self.sec_node + else: + remote_node = self.op.remote_node cfg = self.cfg - vgname = cfg.GetVGName() for dev in instance.disks: size = dev.size lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]] @@ -3062,28 +3693,29 @@ class LUReplaceDisks(LogicalUnit): logger.Info("adding new mirror component on secondary for %s" % dev.iv_name) #HARDCODE - if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False, + if not _CreateBlockDevOnSecondary(cfg, remote_node, instance, + new_drbd, False, _GetInstanceInfoText(instance)): - raise errors.OpExecError, ("Failed to create new component on" - " secondary node %s\n" - "Full abort, cleanup manually!" % - remote_node) + raise errors.OpExecError("Failed to create new component on secondary" + " node %s. Full abort, cleanup manually!" % + remote_node) logger.Info("adding new mirror component on primary") #HARDCODE - if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd, + if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, + instance, new_drbd, _GetInstanceInfoText(instance)): # remove secondary dev cfg.SetDiskID(new_drbd, remote_node) rpc.call_blockdev_remove(remote_node, new_drbd) - raise errors.OpExecError("Failed to create volume on primary!\n" - "Full abort, cleanup manually!!") + raise errors.OpExecError("Failed to create volume on primary!" + " Full abort, cleanup manually!!") # the device exists now # call the primary node to add the mirror to md logger.Info("adding new mirror component to md") - if not rpc.call_blockdev_addchild(instance.primary_node, dev, - new_drbd): + if not rpc.call_blockdev_addchildren(instance.primary_node, dev, + [new_drbd]): logger.Error("Can't add mirror compoment to md!") cfg.SetDiskID(new_drbd, remote_node) if not rpc.call_blockdev_remove(remote_node, new_drbd): @@ -3091,7 +3723,7 @@ class LUReplaceDisks(LogicalUnit): cfg.SetDiskID(new_drbd, instance.primary_node) if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): logger.Error("Can't rollback on primary") - raise errors.OpExecError, ("Full abort, cleanup manually!!") + raise errors.OpExecError("Full abort, cleanup manually!!") dev.children.append(new_drbd) cfg.AddInstance(instance) @@ -3099,7 +3731,7 @@ class LUReplaceDisks(LogicalUnit): # this can fail as the old devices are degraded and _WaitForSync # does a combined result over all disks, so we don't check its # return value - _WaitForSync(cfg, instance, unlock=True) + _WaitForSync(cfg, instance, self.proc, unlock=True) # so check manually all the devices for name in iv_names: @@ -3107,18 +3739,18 @@ class LUReplaceDisks(LogicalUnit): cfg.SetDiskID(dev, instance.primary_node) is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5] if is_degr: - raise errors.OpExecError, ("MD device %s is degraded!" % name) + raise errors.OpExecError("MD device %s is degraded!" % name) cfg.SetDiskID(new_drbd, instance.primary_node) is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5] if is_degr: - raise errors.OpExecError, ("New drbd device %s is degraded!" % name) + raise errors.OpExecError("New drbd device %s is degraded!" % name) for name in iv_names: dev, child, new_drbd = iv_names[name] logger.Info("remove mirror %s component" % name) cfg.SetDiskID(dev, instance.primary_node) - if not rpc.call_blockdev_removechild(instance.primary_node, - dev, child): + if not rpc.call_blockdev_removechildren(instance.primary_node, + dev, [child]): logger.Error("Can't remove child from mirror, aborting" " *this device cleanup*.\nYou need to cleanup manually!!") continue @@ -3134,6 +3766,358 @@ class LUReplaceDisks(LogicalUnit): cfg.AddInstance(instance) + def _ExecD8DiskOnly(self, feedback_fn): + """Replace a disk on the primary or secondary for dbrd8. + + The algorithm for replace is quite complicated: + - for each disk to be replaced: + - create new LVs on the target node with unique names + - detach old LVs from the drbd device + - rename old LVs to name_replaced. + - rename new LVs to old LVs + - attach the new LVs (with the old names now) to the drbd device + - wait for sync across all devices + - for each modified disk: + - remove old LVs (which have the name name_replaces.) + + Failures are not very well handled. + + """ + steps_total = 6 + warning, info = (self.proc.LogWarning, self.proc.LogInfo) + instance = self.instance + iv_names = {} + vgname = self.cfg.GetVGName() + # start of work + cfg = self.cfg + tgt_node = self.tgt_node + oth_node = self.oth_node + + # Step: check device activation + self.proc.LogStep(1, steps_total, "check device existence") + info("checking volume groups") + my_vg = cfg.GetVGName() + results = rpc.call_vg_list([oth_node, tgt_node]) + if not results: + raise errors.OpExecError("Can't list volume groups on the nodes") + for node in oth_node, tgt_node: + res = results.get(node, False) + if not res or my_vg not in res: + raise errors.OpExecError("Volume group '%s' not found on %s" % + (my_vg, node)) + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + for node in tgt_node, oth_node: + info("checking %s on %s" % (dev.iv_name, node)) + cfg.SetDiskID(dev, node) + if not rpc.call_blockdev_find(node, dev): + raise errors.OpExecError("Can't find device %s on node %s" % + (dev.iv_name, node)) + + # Step: check other node consistency + self.proc.LogStep(2, steps_total, "check peer consistency") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s consistency on %s" % (dev.iv_name, oth_node)) + if not _CheckDiskConsistency(self.cfg, dev, oth_node, + oth_node==instance.primary_node): + raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe" + " to replace disks on this node (%s)" % + (oth_node, tgt_node)) + + # Step: create new storage + self.proc.LogStep(3, steps_total, "allocate new storage") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + size = dev.size + cfg.SetDiskID(dev, tgt_node) + lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]] + names = _GenerateUniqueNames(cfg, lv_names) + lv_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + new_lvs = [lv_data, lv_meta] + old_lvs = dev.children + iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) + info("creating new local storage on %s for %s" % + (tgt_node, dev.iv_name)) + # since we *always* want to create this LV, we use the + # _Create...OnPrimary (which forces the creation), even if we + # are talking about the secondary node + for new_lv in new_lvs: + if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new LV named '%s' on" + " node '%s'" % + (new_lv.logical_id[1], tgt_node)) + + # Step: for each lv, detach+rename*2+attach + self.proc.LogStep(4, steps_total, "change drbd configuration") + for dev, old_lvs, new_lvs in iv_names.itervalues(): + info("detaching %s drbd from local storage" % dev.iv_name) + if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs): + raise errors.OpExecError("Can't detach drbd from local storage on node" + " %s for device %s" % (tgt_node, dev.iv_name)) + #dev.children = [] + #cfg.Update(instance) + + # ok, we created the new LVs, so now we know we have the needed + # storage; as such, we proceed on the target node to rename + # old_lv to _old, and new_lv to old_lv; note that we rename LVs + # using the assumption that logical_id == physical_id (which in + # turn is the unique_id on that node) + + # FIXME(iustin): use a better name for the replaced LVs + temp_suffix = int(time.time()) + ren_fn = lambda d, suff: (d.physical_id[0], + d.physical_id[1] + "_replaced-%s" % suff) + # build the rename list based on what LVs exist on the node + rlist = [] + for to_ren in old_lvs: + find_res = rpc.call_blockdev_find(tgt_node, to_ren) + if find_res is not None: # device exists + rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) + + info("renaming the old LVs on the target node") + if not rpc.call_blockdev_rename(tgt_node, rlist): + raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node) + # now we rename the new LVs to the old LVs + info("renaming the new LVs on the target node") + rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)] + if not rpc.call_blockdev_rename(tgt_node, rlist): + raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node) + + for old, new in zip(old_lvs, new_lvs): + new.logical_id = old.logical_id + cfg.SetDiskID(new, tgt_node) + + for disk in old_lvs: + disk.logical_id = ren_fn(disk, temp_suffix) + cfg.SetDiskID(disk, tgt_node) + + # now that the new lvs have the old name, we can add them to the device + info("adding new mirror component on %s" % tgt_node) + if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs): + for new_lv in new_lvs: + if not rpc.call_blockdev_remove(tgt_node, new_lv): + warning("Can't rollback device %s", hint="manually cleanup unused" + " logical volumes") + raise errors.OpExecError("Can't add local storage to drbd") + + dev.children = new_lvs + cfg.Update(instance) + + # Step: wait for sync + + # this can fail as the old devices are degraded and _WaitForSync + # does a combined result over all disks, so we don't check its + # return value + self.proc.LogStep(5, steps_total, "sync devices") + _WaitForSync(cfg, instance, self.proc, unlock=True) + + # so check manually all the devices + for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + cfg.SetDiskID(dev, instance.primary_node) + is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5] + if is_degr: + raise errors.OpExecError("DRBD device %s is degraded!" % name) + + # Step: remove old storage + self.proc.LogStep(6, steps_total, "removing old storage") + for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + info("remove logical volumes for %s" % name) + for lv in old_lvs: + cfg.SetDiskID(lv, tgt_node) + if not rpc.call_blockdev_remove(tgt_node, lv): + warning("Can't remove old LV", hint="manually remove unused LVs") + continue + + def _ExecD8Secondary(self, feedback_fn): + """Replace the secondary node for drbd8. + + The algorithm for replace is quite complicated: + - for all disks of the instance: + - create new LVs on the new node with same names + - shutdown the drbd device on the old secondary + - disconnect the drbd network on the primary + - create the drbd device on the new secondary + - network attach the drbd on the primary, using an artifice: + the drbd code for Attach() will connect to the network if it + finds a device which is connected to the good local disks but + not network enabled + - wait for sync across all devices + - remove all disks from the old secondary + + Failures are not very well handled. + + """ + steps_total = 6 + warning, info = (self.proc.LogWarning, self.proc.LogInfo) + instance = self.instance + iv_names = {} + vgname = self.cfg.GetVGName() + # start of work + cfg = self.cfg + old_node = self.tgt_node + new_node = self.new_node + pri_node = instance.primary_node + + # Step: check device activation + self.proc.LogStep(1, steps_total, "check device existence") + info("checking volume groups") + my_vg = cfg.GetVGName() + results = rpc.call_vg_list([pri_node, new_node]) + if not results: + raise errors.OpExecError("Can't list volume groups on the nodes") + for node in pri_node, new_node: + res = results.get(node, False) + if not res or my_vg not in res: + raise errors.OpExecError("Volume group '%s' not found on %s" % + (my_vg, node)) + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s on %s" % (dev.iv_name, pri_node)) + cfg.SetDiskID(dev, pri_node) + if not rpc.call_blockdev_find(pri_node, dev): + raise errors.OpExecError("Can't find device %s on node %s" % + (dev.iv_name, pri_node)) + + # Step: check other node consistency + self.proc.LogStep(2, steps_total, "check peer consistency") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s consistency on %s" % (dev.iv_name, pri_node)) + if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True): + raise errors.OpExecError("Primary node (%s) has degraded storage," + " unsafe to replace the secondary" % + pri_node) + + # Step: create new storage + self.proc.LogStep(3, steps_total, "allocate new storage") + for dev in instance.disks: + size = dev.size + info("adding new local storage on %s for %s" % (new_node, dev.iv_name)) + # since we *always* want to create this LV, we use the + # _Create...OnPrimary (which forces the creation), even if we + # are talking about the secondary node + for new_lv in dev.children: + if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new LV named '%s' on" + " node '%s'" % + (new_lv.logical_id[1], new_node)) + + iv_names[dev.iv_name] = (dev, dev.children) + + self.proc.LogStep(4, steps_total, "changing drbd configuration") + for dev in instance.disks: + size = dev.size + info("activating a new drbd on %s for %s" % (new_node, dev.iv_name)) + # create new devices on new_node + new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, + logical_id=(pri_node, new_node, + dev.logical_id[2]), + children=dev.children) + if not _CreateBlockDevOnSecondary(cfg, new_node, instance, + new_drbd, False, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new DRBD on" + " node '%s'" % new_node) + + for dev in instance.disks: + # we have new devices, shutdown the drbd on the old secondary + info("shutting down drbd for %s on old node" % dev.iv_name) + cfg.SetDiskID(dev, old_node) + if not rpc.call_blockdev_shutdown(old_node, dev): + warning("Failed to shutdown drbd for %s on old node" % dev.iv_name, + hint="Please cleanup this device manually as soon as possible") + + info("detaching primary drbds from the network (=> standalone)") + done = 0 + for dev in instance.disks: + cfg.SetDiskID(dev, pri_node) + # set the physical (unique in bdev terms) id to None, meaning + # detach from network + dev.physical_id = (None,) * len(dev.physical_id) + # and 'find' the device, which will 'fix' it to match the + # standalone state + if rpc.call_blockdev_find(pri_node, dev): + done += 1 + else: + warning("Failed to detach drbd %s from network, unusual case" % + dev.iv_name) + + if not done: + # no detaches succeeded (very unlikely) + raise errors.OpExecError("Can't detach at least one DRBD from old node") + + # if we managed to detach at least one, we update all the disks of + # the instance to point to the new secondary + info("updating instance configuration") + for dev in instance.disks: + dev.logical_id = (pri_node, new_node) + dev.logical_id[2:] + cfg.SetDiskID(dev, pri_node) + cfg.Update(instance) + + # and now perform the drbd attach + info("attaching primary drbds to new secondary (standalone => connected)") + failures = [] + for dev in instance.disks: + info("attaching primary drbd for %s to new secondary node" % dev.iv_name) + # since the attach is smart, it's enough to 'find' the device, + # it will automatically activate the network, if the physical_id + # is correct + cfg.SetDiskID(dev, pri_node) + if not rpc.call_blockdev_find(pri_node, dev): + warning("can't attach drbd %s to new secondary!" % dev.iv_name, + "please do a gnt-instance info to see the status of disks") + + # this can fail as the old devices are degraded and _WaitForSync + # does a combined result over all disks, so we don't check its + # return value + self.proc.LogStep(5, steps_total, "sync devices") + _WaitForSync(cfg, instance, self.proc, unlock=True) + + # so check manually all the devices + for name, (dev, old_lvs) in iv_names.iteritems(): + cfg.SetDiskID(dev, pri_node) + is_degr = rpc.call_blockdev_find(pri_node, dev)[5] + if is_degr: + raise errors.OpExecError("DRBD device %s is degraded!" % name) + + self.proc.LogStep(6, steps_total, "removing old storage") + for name, (dev, old_lvs) in iv_names.iteritems(): + info("remove logical volumes for %s" % name) + for lv in old_lvs: + cfg.SetDiskID(lv, old_node) + if not rpc.call_blockdev_remove(old_node, lv): + warning("Can't remove LV on old secondary", + hint="Cleanup stale volumes by hand") + + def Exec(self, feedback_fn): + """Execute disk replacement. + + This dispatches the disk replacement to the appropriate handler. + + """ + instance = self.instance + if instance.disk_template == constants.DT_REMOTE_RAID1: + fn = self._ExecRR1 + elif instance.disk_template == constants.DT_DRBD8: + if self.op.remote_node is None: + fn = self._ExecD8DiskOnly + else: + fn = self._ExecD8Secondary + else: + raise errors.ProgrammerError("Unhandled disk replacement case") + return fn(feedback_fn) + class LUQueryInstanceData(NoHooksLU): """Query runtime instance data. @@ -3148,15 +4132,15 @@ class LUQueryInstanceData(NoHooksLU): """ if not isinstance(self.op.instances, list): - raise errors.OpPrereqError, "Invalid argument type 'instances'" + raise errors.OpPrereqError("Invalid argument type 'instances'") if self.op.instances: self.wanted_instances = [] names = self.op.instances for name in names: instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name)) if instance is None: - raise errors.OpPrereqError, ("No such instance name '%s'" % name) - self.wanted_instances.append(instance) + raise errors.OpPrereqError("No such instance name '%s'" % name) + self.wanted_instances.append(instance) else: self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name in self.cfg.GetInstanceList()] @@ -3169,7 +4153,7 @@ class LUQueryInstanceData(NoHooksLU): """ self.cfg.SetDiskID(dev, instance.primary_node) dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev) - if dev.dev_type == "drbd": + if dev.dev_type in constants.LDS_DRBD: # we change the snode then (otherwise we use the one passed in) if dev.logical_id[0] == instance.primary_node: snode = dev.logical_id[1] @@ -3228,6 +4212,11 @@ class LUQueryInstanceData(NoHooksLU): "memory": instance.memory, "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics], "disks": disks, + "network_port": instance.network_port, + "vcpus": instance.vcpus, + "kernel_path": instance.kernel_path, + "initrd_path": instance.initrd_path, + "hvm_boot_order": instance.hvm_boot_order, } result[instance.name] = idict @@ -3235,38 +4224,7 @@ class LUQueryInstanceData(NoHooksLU): return result -class LUQueryNodeData(NoHooksLU): - """Logical unit for querying node data. - - """ - _OP_REQP = ["nodes"] - - def CheckPrereq(self): - """Check prerequisites. - - This only checks the optional node list against the existing names. - - """ - self.wanted_nodes = _GetWantedNodes(self, self.op.nodes) - - def Exec(self, feedback_fn): - """Compute and return the list of nodes. - - """ - ilist = [self.cfg.GetInstanceInfo(iname) for iname - in self.cfg.GetInstanceList()] - result = [] - for node in self.wanted_nodes: - result.append((node.name, node.primary_ip, node.secondary_ip, - [inst.name for inst in ilist - if inst.primary_node == node.name], - [inst.name for inst in ilist - if node.name in inst.secondary_nodes], - )) - return result - - -class LUSetInstanceParms(LogicalUnit): +class LUSetInstanceParams(LogicalUnit): """Modifies an instances's parameters. """ @@ -3285,7 +4243,7 @@ class LUSetInstanceParms(LogicalUnit): args['memory'] = self.mem if self.vcpus: args['vcpus'] = self.vcpus - if self.do_ip or self.do_bridge: + if self.do_ip or self.do_bridge or self.mac: if self.do_ip: ip = self.ip else: @@ -3294,7 +4252,11 @@ class LUSetInstanceParms(LogicalUnit): bridge = self.bridge else: bridge = self.instance.nics[0].bridge - args['nics'] = [(ip, bridge)] + if self.mac: + mac = self.mac + else: + mac = self.instance.nics[0].mac + args['nics'] = [(ip, bridge, mac)] env = _BuildInstanceHookEnvByObject(self.instance, override=args) nl = [self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes) @@ -3309,35 +4271,77 @@ class LUSetInstanceParms(LogicalUnit): self.mem = getattr(self.op, "mem", None) self.vcpus = getattr(self.op, "vcpus", None) self.ip = getattr(self.op, "ip", None) + self.mac = getattr(self.op, "mac", None) self.bridge = getattr(self.op, "bridge", None) - if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4: - raise errors.OpPrereqError, ("No changes submitted") + self.kernel_path = getattr(self.op, "kernel_path", None) + self.initrd_path = getattr(self.op, "initrd_path", None) + self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None) + all_params = [self.mem, self.vcpus, self.ip, self.bridge, self.mac, + self.kernel_path, self.initrd_path, self.hvm_boot_order] + if all_params.count(None) == len(all_params): + raise errors.OpPrereqError("No changes submitted") if self.mem is not None: try: self.mem = int(self.mem) except ValueError, err: - raise errors.OpPrereqError, ("Invalid memory size: %s" % str(err)) + raise errors.OpPrereqError("Invalid memory size: %s" % str(err)) if self.vcpus is not None: try: self.vcpus = int(self.vcpus) except ValueError, err: - raise errors.OpPrereqError, ("Invalid vcpus number: %s" % str(err)) + raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err)) if self.ip is not None: self.do_ip = True if self.ip.lower() == "none": self.ip = None else: if not utils.IsValidIP(self.ip): - raise errors.OpPrereqError, ("Invalid IP address '%s'." % self.ip) + raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip) else: self.do_ip = False self.do_bridge = (self.bridge is not None) + if self.mac is not None: + if self.cfg.IsMacInUse(self.mac): + raise errors.OpPrereqError('MAC address %s already in use in cluster' % + self.mac) + if not utils.IsValidMac(self.mac): + raise errors.OpPrereqError('Invalid MAC address %s' % self.mac) + + if self.kernel_path is not None: + self.do_kernel_path = True + if self.kernel_path == constants.VALUE_NONE: + raise errors.OpPrereqError("Can't set instance to no kernel") + + if self.kernel_path != constants.VALUE_DEFAULT: + if not os.path.isabs(self.kernel_path): + raise errors.OpPrereqError("The kernel path must be an absolute" + " filename") + else: + self.do_kernel_path = False + + if self.initrd_path is not None: + self.do_initrd_path = True + if self.initrd_path not in (constants.VALUE_NONE, + constants.VALUE_DEFAULT): + if not os.path.isabs(self.initrd_path): + raise errors.OpPrereqError("The initrd path must be an absolute" + " filename") + else: + self.do_initrd_path = False + + # boot order verification + if self.hvm_boot_order is not None: + if self.hvm_boot_order != constants.VALUE_DEFAULT: + if len(self.hvm_boot_order.strip("acdn")) != 0: + raise errors.OpPrereqError("invalid boot order specified," + " must be one or more of [acdn]" + " or 'default'") instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("No such instance name '%s'" % - self.op.instance_name) + raise errors.OpPrereqError("No such instance name '%s'" % + self.op.instance_name) self.op.instance_name = instance.name self.instance = instance return @@ -3361,6 +4365,21 @@ class LUSetInstanceParms(LogicalUnit): if self.bridge: instance.nics[0].bridge = self.bridge result.append(("bridge", self.bridge)) + if self.mac: + instance.nics[0].mac = self.mac + result.append(("mac", self.mac)) + if self.do_kernel_path: + instance.kernel_path = self.kernel_path + result.append(("kernel_path", self.kernel_path)) + if self.do_initrd_path: + instance.initrd_path = self.initrd_path + result.append(("initrd_path", self.initrd_path)) + if self.hvm_boot_order: + if self.hvm_boot_order == constants.VALUE_DEFAULT: + instance.hvm_boot_order = None + else: + instance.hvm_boot_order = self.hvm_boot_order + result.append(("hvm_boot_order", self.hvm_boot_order)) self.cfg.AddInstance(instance) @@ -3388,7 +4407,7 @@ class LUQueryExports(NoHooksLU): that node. """ - return rpc.call_export_list([node.name for node in self.nodes]) + return rpc.call_export_list(self.nodes) class LUExportInstance(LogicalUnit): @@ -3417,22 +4436,22 @@ class LUExportInstance(LogicalUnit): def CheckPrereq(self): """Check prerequisites. - This checks that the instance name is a valid one. + This checks that the instance and node names are valid. """ instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) self.instance = self.cfg.GetInstanceInfo(instance_name) if self.instance is None: - raise errors.OpPrereqError, ("Instance '%s' not found" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not found" % + self.op.instance_name) # node verification dst_node_short = self.cfg.ExpandNodeName(self.op.target_node) self.dst_node = self.cfg.GetNodeInfo(dst_node_short) if self.dst_node is None: - raise errors.OpPrereqError, ("Destination node '%s' is unknown." % - self.op.target_node) + raise errors.OpPrereqError("Destination node '%s' is unknown." % + self.op.target_node) self.op.target_node = self.dst_node.name def Exec(self, feedback_fn): @@ -3442,10 +4461,11 @@ class LUExportInstance(LogicalUnit): instance = self.instance dst_node = self.dst_node src_node = instance.primary_node - # shutdown the instance, unless requested not to do so if self.op.shutdown: - op = opcodes.OpShutdownInstance(instance_name=instance.name) - self.processor.ChainOpCode(op, feedback_fn) + # shutdown the instance, but not the disks + if not rpc.call_instance_shutdown(src_node, instance): + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, src_node)) vgname = self.cfg.GetVGName() @@ -3461,29 +4481,27 @@ class LUExportInstance(LogicalUnit): logger.Error("could not snapshot block device %s on node %s" % (disk.logical_id[1], src_node)) else: - new_dev = objects.Disk(dev_type="lvm", size=disk.size, + new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, logical_id=(vgname, new_dev_name), physical_id=(vgname, new_dev_name), iv_name=disk.iv_name) snap_disks.append(new_dev) finally: - if self.op.shutdown: - op = opcodes.OpStartupInstance(instance_name=instance.name, - force=False) - self.processor.ChainOpCode(op, feedback_fn) + if self.op.shutdown and instance.status == "up": + if not rpc.call_instance_start(src_node, instance, None): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance") # TODO: check for size for dev in snap_disks: - if not rpc.call_snapshot_export(src_node, dev, dst_node.name, - instance): - logger.Error("could not export block device %s from node" - " %s to node %s" % - (dev.logical_id[1], src_node, dst_node.name)) + if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance): + logger.Error("could not export block device %s from node %s to node %s" + % (dev.logical_id[1], src_node, dst_node.name)) if not rpc.call_blockdev_remove(src_node, dev): - logger.Error("could not remove snapshot block device %s from" - " node %s" % (dev.logical_id[1], src_node)) + logger.Error("could not remove snapshot block device %s from node %s" % + (dev.logical_id[1], src_node)) if not rpc.call_finalize_export(dst_node.name, instance, snap_disks): logger.Error("could not finalize export for instance %s on node %s" % @@ -3497,9 +4515,574 @@ class LUExportInstance(LogicalUnit): # substitutes an empty list with the full cluster node list. if nodelist: op = opcodes.OpQueryExports(nodes=nodelist) - exportlist = self.processor.ChainOpCode(op, feedback_fn) + exportlist = self.proc.ChainOpCode(op) for node in exportlist: if instance.name in exportlist[node]: if not rpc.call_export_remove(node, instance.name): logger.Error("could not remove older export for instance %s" " on node %s" % (instance.name, node)) + + +class LURemoveExport(NoHooksLU): + """Remove exports related to the named instance. + + """ + _OP_REQP = ["instance_name"] + + def CheckPrereq(self): + """Check prerequisites. + """ + pass + + def Exec(self, feedback_fn): + """Remove any export. + + """ + instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) + # If the instance was not found we'll try with the name that was passed in. + # This will only work if it was an FQDN, though. + fqdn_warn = False + if not instance_name: + fqdn_warn = True + instance_name = self.op.instance_name + + op = opcodes.OpQueryExports(nodes=[]) + exportlist = self.proc.ChainOpCode(op) + found = False + for node in exportlist: + if instance_name in exportlist[node]: + found = True + if not rpc.call_export_remove(node, instance_name): + logger.Error("could not remove export for instance %s" + " on node %s" % (instance_name, node)) + + if fqdn_warn and not found: + feedback_fn("Export not found. If trying to remove an export belonging" + " to a deleted instance please use its Fully Qualified" + " Domain Name.") + + +class TagsLU(NoHooksLU): + """Generic tags LU. + + This is an abstract class which is the parent of all the other tags LUs. + + """ + def CheckPrereq(self): + """Check prerequisites. + + """ + if self.op.kind == constants.TAG_CLUSTER: + self.target = self.cfg.GetClusterInfo() + elif self.op.kind == constants.TAG_NODE: + name = self.cfg.ExpandNodeName(self.op.name) + if name is None: + raise errors.OpPrereqError("Invalid node name (%s)" % + (self.op.name,)) + self.op.name = name + self.target = self.cfg.GetNodeInfo(name) + elif self.op.kind == constants.TAG_INSTANCE: + name = self.cfg.ExpandInstanceName(self.op.name) + if name is None: + raise errors.OpPrereqError("Invalid instance name (%s)" % + (self.op.name,)) + self.op.name = name + self.target = self.cfg.GetInstanceInfo(name) + else: + raise errors.OpPrereqError("Wrong tag type requested (%s)" % + str(self.op.kind)) + + +class LUGetTags(TagsLU): + """Returns the tags of a given object. + + """ + _OP_REQP = ["kind", "name"] + + def Exec(self, feedback_fn): + """Returns the tag list. + + """ + return self.target.GetTags() + + +class LUSearchTags(NoHooksLU): + """Searches the tags for a given pattern. + + """ + _OP_REQP = ["pattern"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks the pattern passed for validity by compiling it. + + """ + try: + self.re = re.compile(self.op.pattern) + except re.error, err: + raise errors.OpPrereqError("Invalid search pattern '%s': %s" % + (self.op.pattern, err)) + + def Exec(self, feedback_fn): + """Returns the tag list. + + """ + cfg = self.cfg + tgts = [("/cluster", cfg.GetClusterInfo())] + ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()] + tgts.extend([("/instances/%s" % i.name, i) for i in ilist]) + nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()] + tgts.extend([("/nodes/%s" % n.name, n) for n in nlist]) + results = [] + for path, target in tgts: + for tag in target.GetTags(): + if self.re.search(tag): + results.append((path, tag)) + return results + + +class LUAddTags(TagsLU): + """Sets a tag on a given object. + + """ + _OP_REQP = ["kind", "name", "tags"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks the type and length of the tag name and value. + + """ + TagsLU.CheckPrereq(self) + for tag in self.op.tags: + objects.TaggableObject.ValidateTag(tag) + + def Exec(self, feedback_fn): + """Sets the tag. + + """ + try: + for tag in self.op.tags: + self.target.AddTag(tag) + except errors.TagError, err: + raise errors.OpExecError("Error while setting tag: %s" % str(err)) + try: + self.cfg.Update(self.target) + except errors.ConfigurationError: + raise errors.OpRetryError("There has been a modification to the" + " config file and the operation has been" + " aborted. Please retry.") + + +class LUDelTags(TagsLU): + """Delete a list of tags from a given object. + + """ + _OP_REQP = ["kind", "name", "tags"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks that we have the given tag. + + """ + TagsLU.CheckPrereq(self) + for tag in self.op.tags: + objects.TaggableObject.ValidateTag(tag) + del_tags = frozenset(self.op.tags) + cur_tags = self.target.GetTags() + if not del_tags <= cur_tags: + diff_tags = del_tags - cur_tags + diff_names = ["'%s'" % tag for tag in diff_tags] + diff_names.sort() + raise errors.OpPrereqError("Tag(s) %s not found" % + (",".join(diff_names))) + + def Exec(self, feedback_fn): + """Remove the tag from the object. + + """ + for tag in self.op.tags: + self.target.RemoveTag(tag) + try: + self.cfg.Update(self.target) + except errors.ConfigurationError: + raise errors.OpRetryError("There has been a modification to the" + " config file and the operation has been" + " aborted. Please retry.") + +class LUTestDelay(NoHooksLU): + """Sleep for a specified amount of time. + + This LU sleeps on the master and/or nodes for a specified amoutn of + time. + + """ + _OP_REQP = ["duration", "on_master", "on_nodes"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks that we have a good list of nodes and/or the duration + is valid. + + """ + + if self.op.on_nodes: + self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes) + + def Exec(self, feedback_fn): + """Do the actual sleep. + + """ + if self.op.on_master: + if not utils.TestDelay(self.op.duration): + raise errors.OpExecError("Error during master delay test") + if self.op.on_nodes: + result = rpc.call_test_delay(self.op.on_nodes, self.op.duration) + if not result: + raise errors.OpExecError("Complete failure from rpc call") + for node, node_result in result.items(): + if not node_result: + raise errors.OpExecError("Failure during rpc call to node %s," + " result: %s" % (node, node_result)) + + +class IAllocator(object): + """IAllocator framework. + + An IAllocator instance has three sets of attributes: + - cfg/sstore that are needed to query the cluster + - input data (all members of the _KEYS class attribute are required) + - four buffer attributes (in|out_data|text), that represent the + input (to the external script) in text and data structure format, + and the output from it, again in two formats + - the result variables from the script (success, info, nodes) for + easy usage + + """ + _KEYS = [ + "mode", "name", + "mem_size", "disks", "disk_template", + "os", "tags", "nics", "vcpus", + ] + + def __init__(self, cfg, sstore, **kwargs): + self.cfg = cfg + self.sstore = sstore + # init buffer variables + self.in_text = self.out_text = self.in_data = self.out_data = None + # init all input fields so that pylint is happy + self.mode = self.name = None + self.mem_size = self.disks = self.disk_template = None + self.os = self.tags = self.nics = self.vcpus = None + # computed fields + self.required_nodes = None + # init result fields + self.success = self.info = self.nodes = None + for key in kwargs: + if key not in self._KEYS: + raise errors.ProgrammerError("Invalid input parameter '%s' to" + " IAllocator" % key) + setattr(self, key, kwargs[key]) + for key in self._KEYS: + if key not in kwargs: + raise errors.ProgrammerError("Missing input parameter '%s' to" + " IAllocator" % key) + self._BuildInputData() + + def _ComputeClusterData(self): + """Compute the generic allocator input data. + + This is the data that is independent of the actual operation. + + """ + cfg = self.cfg + # cluster data + data = { + "version": 1, + "cluster_name": self.sstore.GetClusterName(), + "cluster_tags": list(cfg.GetClusterInfo().GetTags()), + # we don't have job IDs + } + + # node data + node_results = {} + node_list = cfg.GetNodeList() + node_data = rpc.call_node_info(node_list, cfg.GetVGName()) + for nname in node_list: + ninfo = cfg.GetNodeInfo(nname) + if nname not in node_data or not isinstance(node_data[nname], dict): + raise errors.OpExecError("Can't get data for node %s" % nname) + remote_info = node_data[nname] + for attr in ['memory_total', 'memory_free', + 'vg_size', 'vg_free']: + if attr not in remote_info: + raise errors.OpExecError("Node '%s' didn't return attribute '%s'" % + (nname, attr)) + try: + int(remote_info[attr]) + except ValueError, err: + raise errors.OpExecError("Node '%s' returned invalid value for '%s':" + " %s" % (nname, attr, str(err))) + pnr = { + "tags": list(ninfo.GetTags()), + "total_memory": utils.TryConvert(int, remote_info['memory_total']), + "free_memory": utils.TryConvert(int, remote_info['memory_free']), + "total_disk": utils.TryConvert(int, remote_info['vg_size']), + "free_disk": utils.TryConvert(int, remote_info['vg_free']), + "primary_ip": ninfo.primary_ip, + "secondary_ip": ninfo.secondary_ip, + } + node_results[nname] = pnr + data["nodes"] = node_results + + # instance data + instance_data = {} + i_list = cfg.GetInstanceList() + for iname in i_list: + iinfo = cfg.GetInstanceInfo(iname) + nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge} + for n in iinfo.nics] + pir = { + "tags": list(iinfo.GetTags()), + "should_run": iinfo.status == "up", + "vcpus": iinfo.vcpus, + "memory": iinfo.memory, + "os": iinfo.os, + "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes), + "nics": nic_data, + "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks], + "disk_template": iinfo.disk_template, + } + instance_data[iname] = pir + + data["instances"] = instance_data + + self.in_data = data + + def _AddNewInstance(self): + """Add new instance data to allocator structure. + + This in combination with _AllocatorGetClusterData will create the + correct structure needed as input for the allocator. + + The checks for the completeness of the opcode must have already been + done. + + """ + data = self.in_data + if len(self.disks) != 2: + raise errors.OpExecError("Only two-disk configurations supported") + + disk_space = _ComputeDiskSize(self.disk_template, + self.disks[0]["size"], self.disks[1]["size"]) + + if self.disk_template in constants.DTS_NET_MIRROR: + self.required_nodes = 2 + else: + self.required_nodes = 1 + request = { + "type": "allocate", + "name": self.name, + "disk_template": self.disk_template, + "tags": self.tags, + "os": self.os, + "vcpus": self.vcpus, + "memory": self.mem_size, + "disks": self.disks, + "disk_space_total": disk_space, + "nics": self.nics, + "required_nodes": self.required_nodes, + } + data["request"] = request + + def _AddRelocateInstance(self): + """Add relocate instance data to allocator structure. + + This in combination with _IAllocatorGetClusterData will create the + correct structure needed as input for the allocator. + + The checks for the completeness of the opcode must have already been + done. + + """ + instance = self.cfg.GetInstanceInfo(self.name) + if instance is None: + raise errors.ProgrammerError("Unknown instance '%s' passed to" + " IAllocator" % self.name) + + if instance.disk_template not in constants.DTS_NET_MIRROR: + raise errors.OpPrereqError("Can't relocate non-mirrored instances") + + if len(instance.secondary_nodes) != 1: + raise errors.OpPrereqError("Instance has not exactly one secondary node") + + self.required_nodes = 1 + + disk_space = _ComputeDiskSize(instance.disk_template, + instance.disks[0].size, + instance.disks[1].size) + + request = { + "type": "relocate", + "name": self.name, + "disk_space_total": disk_space, + "required_nodes": self.required_nodes, + "nodes": list(instance.secondary_nodes), + } + self.in_data["request"] = request + + def _BuildInputData(self): + """Build input data structures. + + """ + self._ComputeClusterData() + + if self.mode == constants.IALLOCATOR_MODE_ALLOC: + self._AddNewInstance() + else: + self._AddRelocateInstance() + + self.in_text = serializer.Dump(self.in_data) + + def Run(self, name, validate=True): + """Run an instance allocator and return the results. + + """ + data = self.in_text + + alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH, + os.path.isfile) + if alloc_script is None: + raise errors.OpExecError("Can't find allocator '%s'" % name) + + fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.") + try: + os.write(fd, data) + os.close(fd) + result = utils.RunCmd([alloc_script, fin_name]) + if result.failed: + raise errors.OpExecError("Instance allocator call failed: %s," + " output: %s" % + (result.fail_reason, result.output)) + finally: + os.unlink(fin_name) + self.out_text = result.stdout + if validate: + self._ValidateResult() + + def _ValidateResult(self): + """Process the allocator results. + + This will process and if successful save the result in + self.out_data and the other parameters. + + """ + try: + rdict = serializer.Load(self.out_text) + except Exception, err: + raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) + + if not isinstance(rdict, dict): + raise errors.OpExecError("Can't parse iallocator results: not a dict") + + for key in "success", "info", "nodes": + if key not in rdict: + raise errors.OpExecError("Can't parse iallocator results:" + " missing key '%s'" % key) + setattr(self, key, rdict[key]) + + if not isinstance(rdict["nodes"], list): + raise errors.OpExecError("Can't parse iallocator results: 'nodes' key" + " is not a list") + self.out_data = rdict + + +class LUTestAllocator(NoHooksLU): + """Run allocator tests. + + This LU runs the allocator tests + + """ + _OP_REQP = ["direction", "mode", "name"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks the opcode parameters depending on the director and mode test. + + """ + if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: + for attr in ["name", "mem_size", "disks", "disk_template", + "os", "tags", "nics", "vcpus"]: + if not hasattr(self.op, attr): + raise errors.OpPrereqError("Missing attribute '%s' on opcode input" % + attr) + iname = self.cfg.ExpandInstanceName(self.op.name) + if iname is not None: + raise errors.OpPrereqError("Instance '%s' already in the cluster" % + iname) + if not isinstance(self.op.nics, list): + raise errors.OpPrereqError("Invalid parameter 'nics'") + for row in self.op.nics: + if (not isinstance(row, dict) or + "mac" not in row or + "ip" not in row or + "bridge" not in row): + raise errors.OpPrereqError("Invalid contents of the" + " 'nics' parameter") + if not isinstance(self.op.disks, list): + raise errors.OpPrereqError("Invalid parameter 'disks'") + if len(self.op.disks) != 2: + raise errors.OpPrereqError("Only two-disk configurations supported") + for row in self.op.disks: + if (not isinstance(row, dict) or + "size" not in row or + not isinstance(row["size"], int) or + "mode" not in row or + row["mode"] not in ['r', 'w']): + raise errors.OpPrereqError("Invalid contents of the" + " 'disks' parameter") + elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: + if not hasattr(self.op, "name"): + raise errors.OpPrereqError("Missing attribute 'name' on opcode input") + fname = self.cfg.ExpandInstanceName(self.op.name) + if fname is None: + raise errors.OpPrereqError("Instance '%s' not found for relocation" % + self.op.name) + self.op.name = fname + else: + raise errors.OpPrereqError("Invalid test allocator mode '%s'" % + self.op.mode) + + if self.op.direction == constants.IALLOCATOR_DIR_OUT: + if not hasattr(self.op, "allocator") or self.op.allocator is None: + raise errors.OpPrereqError("Missing allocator name") + elif self.op.direction != constants.IALLOCATOR_DIR_IN: + raise errors.OpPrereqError("Wrong allocator test '%s'" % + self.op.direction) + + def Exec(self, feedback_fn): + """Run the allocator test. + + """ + ial = IAllocator(self.cfg, self.sstore, + mode=self.op.mode, + name=self.op.name, + mem_size=self.op.mem_size, + disks=self.op.disks, + disk_template=self.op.disk_template, + os=self.op.os, + tags=self.op.tags, + nics=self.op.nics, + vcpus=self.op.vcpus, + ) + + if self.op.direction == constants.IALLOCATOR_DIR_IN: + result = ial.in_text + else: + ial.Run(self.op.allocator, validate=False) + result = ial.out_text + return result