X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/cb91d46e65af89484bf1f017e39730594fed836e..fc490dbe206e4c89b75d0f3e96ff4418242bac34:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index b9cf898..2e26cfb 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +# # # Copyright (C) 2006, 2007 Google Inc. @@ -26,7 +26,6 @@ import os import os.path import sha -import socket import time import tempfile import re @@ -45,7 +44,7 @@ from ganeti import opcodes from ganeti import ssconf class LogicalUnit(object): - """Logical Unit base class.. + """Logical Unit base class. Subclasses must follow these rules: - implement CheckPrereq which also fills in the opcode instance @@ -70,24 +69,24 @@ class LogicalUnit(object): validity. """ - self.processor = processor + self.proc = processor self.op = op self.cfg = cfg self.sstore = sstore for attr_name in self._OP_REQP: attr_val = getattr(op, attr_name, None) if attr_val is None: - raise errors.OpPrereqError, ("Required parameter '%s' missing" % - attr_name) + raise errors.OpPrereqError("Required parameter '%s' missing" % + attr_name) if self.REQ_CLUSTER: if not cfg.IsCluster(): - raise errors.OpPrereqError, ("Cluster not initialized yet," - " use 'gnt-cluster init' first.") + raise errors.OpPrereqError("Cluster not initialized yet," + " use 'gnt-cluster init' first.") if self.REQ_MASTER: master = sstore.GetMasterNode() - if master != socket.gethostname(): - raise errors.OpPrereqError, ("Commands must be run on the master" - " node %s" % master) + if master != utils.HostInfo().name: + raise errors.OpPrereqError("Commands must be run on the master" + " node %s" % master) def CheckPrereq(self): """Check prerequisites for this LU. @@ -161,31 +160,72 @@ class NoHooksLU(LogicalUnit): This is a no-op, since we don't run hooks. """ - return + return {}, [], [] + + +def _AddHostToEtcHosts(hostname): + """Wrapper around utils.SetEtcHostsEntry. + + """ + hi = utils.HostInfo(name=hostname) + utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()]) + + +def _RemoveHostFromEtcHosts(hostname): + """Wrapper around utils.RemoveEtcHostsEntry. + + """ + hi = utils.HostInfo(name=hostname) + utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name) + utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName()) def _GetWantedNodes(lu, nodes): - """Returns list of checked and expanded nodes. + """Returns list of checked and expanded node names. Args: nodes: List of nodes (strings) or None for all """ - if nodes is not None and not isinstance(nodes, list): - raise errors.OpPrereqError, "Invalid argument type 'nodes'" + if not isinstance(nodes, list): + raise errors.OpPrereqError("Invalid argument type 'nodes'") if nodes: - wanted_nodes = [] + wanted = [] for name in nodes: - node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name)) + node = lu.cfg.ExpandNodeName(name) if node is None: - raise errors.OpPrereqError, ("No such node name '%s'" % name) - wanted_nodes.append(node) + raise errors.OpPrereqError("No such node name '%s'" % name) + wanted.append(node) + + else: + wanted = lu.cfg.GetNodeList() + return utils.NiceSort(wanted) + + +def _GetWantedInstances(lu, instances): + """Returns list of checked and expanded instance names. + + Args: + instances: List of instances (strings) or None for all + + """ + if not isinstance(instances, list): + raise errors.OpPrereqError("Invalid argument type 'instances'") + + if instances: + wanted = [] + + for name in instances: + instance = lu.cfg.ExpandInstanceName(name) + if instance is None: + raise errors.OpPrereqError("No such instance name '%s'" % name) + wanted.append(instance) - return wanted_nodes else: - return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()] + wanted = lu.cfg.GetInstanceList() + return utils.NiceSort(wanted) def _CheckOutputFields(static, dynamic, selected): @@ -202,89 +242,65 @@ def _CheckOutputFields(static, dynamic, selected): all_fields = static_fields | dynamic_fields if not all_fields.issuperset(selected): - raise errors.OpPrereqError, ("Unknown output fields selected: %s" - % ",".join(frozenset(selected). - difference(all_fields))) + raise errors.OpPrereqError("Unknown output fields selected: %s" + % ",".join(frozenset(selected). + difference(all_fields))) -def _UpdateEtcHosts(fullnode, ip): - """Ensure a node has a correct entry in /etc/hosts. +def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, + memory, vcpus, nics): + """Builds instance related env variables for hooks from single variables. Args: - fullnode - Fully qualified domain name of host. (str) - ip - IPv4 address of host (str) - + secondary_nodes: List of secondary nodes as strings """ - node = fullnode.split(".", 1)[0] - - f = open('/etc/hosts', 'r+') - - inthere = False - - save_lines = [] - add_lines = [] - removed = False - - while True: - rawline = f.readline() - - if not rawline: - # End of file - break - - line = rawline.split('\n')[0] - - # Strip off comments - line = line.split('#')[0] - - if not line: - # Entire line was comment, skip - save_lines.append(rawline) - continue - - fields = line.split() - - haveall = True - havesome = False - for spec in [ ip, fullnode, node ]: - if spec not in fields: - haveall = False - if spec in fields: - havesome = True - - if haveall: - inthere = True - save_lines.append(rawline) - continue - - if havesome and not haveall: - # Line (old, or manual?) which is missing some. Remove. - removed = True - continue - - save_lines.append(rawline) + env = { + "OP_TARGET": name, + "INSTANCE_NAME": name, + "INSTANCE_PRIMARY": primary_node, + "INSTANCE_SECONDARIES": " ".join(secondary_nodes), + "INSTANCE_OS_TYPE": os_type, + "INSTANCE_STATUS": status, + "INSTANCE_MEMORY": memory, + "INSTANCE_VCPUS": vcpus, + } + + if nics: + nic_count = len(nics) + for idx, (ip, bridge, mac) in enumerate(nics): + if ip is None: + ip = "" + env["INSTANCE_NIC%d_IP" % idx] = ip + env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge + env["INSTANCE_NIC%d_HWADDR" % idx] = mac + else: + nic_count = 0 - if not inthere: - add_lines.append('%s\t%s %s\n' % (ip, fullnode, node)) + env["INSTANCE_NIC_COUNT"] = nic_count - if removed: - if add_lines: - save_lines = save_lines + add_lines + return env - # We removed a line, write a new file and replace old. - fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc') - newfile = os.fdopen(fd, 'w') - newfile.write(''.join(save_lines)) - newfile.close() - os.rename(tmpname, '/etc/hosts') - elif add_lines: - # Simply appending a new line will do the trick. - f.seek(0, 2) - for add in add_lines: - f.write(add) +def _BuildInstanceHookEnvByObject(instance, override=None): + """Builds instance related env variables for hooks from an object. - f.close() + Args: + instance: objects.Instance object of instance + override: dict of values to override + """ + args = { + 'name': instance.name, + 'primary_node': instance.primary_node, + 'secondary_nodes': instance.secondary_nodes, + 'os_type': instance.os, + 'status': instance.os, + 'memory': instance.memory, + 'vcpus': instance.vcpus, + 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics], + } + if override: + args.update(override) + return _BuildInstanceHookEnv(**args) def _UpdateKnownHosts(fullnode, ip, pubkey): @@ -296,10 +312,10 @@ def _UpdateKnownHosts(fullnode, ip, pubkey): pubkey - the public key of the cluster """ - if os.path.exists('/etc/ssh/ssh_known_hosts'): - f = open('/etc/ssh/ssh_known_hosts', 'r+') + if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE): + f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+') else: - f = open('/etc/ssh/ssh_known_hosts', 'w+') + f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+') inthere = False @@ -307,39 +323,35 @@ def _UpdateKnownHosts(fullnode, ip, pubkey): add_lines = [] removed = False - while True: - rawline = f.readline() + for rawline in f: logger.Debug('read %s' % (repr(rawline),)) - if not rawline: - # End of file - break - - line = rawline.split('\n')[0] - - parts = line.split(' ') - fields = parts[0].split(',') - key = parts[2] - - haveall = True - havesome = False - for spec in [ ip, fullnode ]: - if spec not in fields: - haveall = False - if spec in fields: - havesome = True - - logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),)) - if haveall and key == pubkey: - inthere = True - save_lines.append(rawline) - logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),)) - continue + parts = rawline.rstrip('\r\n').split() + + # Ignore unwanted lines + if len(parts) >= 3 and not rawline.lstrip()[0] == '#': + fields = parts[0].split(',') + key = parts[2] + + haveall = True + havesome = False + for spec in [ ip, fullnode ]: + if spec not in fields: + haveall = False + if spec in fields: + havesome = True + + logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),)) + if haveall and key == pubkey: + inthere = True + save_lines.append(rawline) + logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),)) + continue - if havesome and (not haveall or key != pubkey): - removed = True - logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),)) - continue + if havesome and (not haveall or key != pubkey): + removed = True + logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),)) + continue save_lines.append(rawline) @@ -351,12 +363,15 @@ def _UpdateKnownHosts(fullnode, ip, pubkey): save_lines = save_lines + add_lines # Write a new file and replace old. - fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh') + fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.', + constants.DATA_DIR) newfile = os.fdopen(fd, 'w') - newfile.write(''.join(save_lines)) - newfile.close() + try: + newfile.write(''.join(save_lines)) + finally: + newfile.close() logger.Debug("Wrote new known_hosts.") - os.rename(tmpname, '/etc/ssh/ssh_known_hosts') + os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE) elif add_lines: # Simply appending a new line will do the trick. @@ -394,26 +409,23 @@ def _InitSSHSetup(node): node: the name of this host as a fqdn """ - utils.RemoveFile('/root/.ssh/known_hosts') - - if os.path.exists('/root/.ssh/id_dsa'): - utils.CreateBackup('/root/.ssh/id_dsa') - if os.path.exists('/root/.ssh/id_dsa.pub'): - utils.CreateBackup('/root/.ssh/id_dsa.pub') + priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) - utils.RemoveFile('/root/.ssh/id_dsa') - utils.RemoveFile('/root/.ssh/id_dsa.pub') + for name in priv_key, pub_key: + if os.path.exists(name): + utils.CreateBackup(name) + utils.RemoveFile(name) result = utils.RunCmd(["ssh-keygen", "-t", "dsa", - "-f", "/root/.ssh/id_dsa", + "-f", priv_key, "-q", "-N", ""]) if result.failed: - raise errors.OpExecError, ("could not generate ssh keypair, error %s" % - result.output) + raise errors.OpExecError("Could not generate ssh keypair, error %s" % + result.output) - f = open('/root/.ssh/id_dsa.pub', 'r') + f = open(pub_key, 'r') try: - utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192)) + utils.AddAuthorizedKey(auth_keys, f.read(8192)) finally: f.close() @@ -435,18 +447,30 @@ def _InitGanetiServerSetup(ss): "-keyout", constants.SSL_CERT_FILE, "-out", constants.SSL_CERT_FILE, "-batch"]) if result.failed: - raise errors.OpExecError, ("could not generate server ssl cert, command" - " %s had exitcode %s and error message %s" % - (result.cmd, result.exit_code, result.output)) + raise errors.OpExecError("could not generate server ssl cert, command" + " %s had exitcode %s and error message %s" % + (result.cmd, result.exit_code, result.output)) os.chmod(constants.SSL_CERT_FILE, 0400) result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"]) if result.failed: - raise errors.OpExecError, ("could not start the node daemon, command %s" - " had exitcode %s and error %s" % - (result.cmd, result.exit_code, result.output)) + raise errors.OpExecError("Could not start the node daemon, command %s" + " had exitcode %s and error %s" % + (result.cmd, result.exit_code, result.output)) + + +def _CheckInstanceBridgesExist(instance): + """Check that the brigdes needed by an instance exist. + + """ + # check bridges existance + brlist = [nic.bridge for nic in instance.nics] + if not rpc.call_bridges_exist(instance.primary_node, brlist): + raise errors.OpPrereqError("one or more target bridges %s does not" + " exist on destination node '%s'" % + (brlist, instance.primary_node)) class LUInitCluster(LogicalUnit): @@ -466,66 +490,75 @@ class LUInitCluster(LogicalUnit): ourselves in the post-run node list. """ - - env = {"CLUSTER": self.op.cluster_name, - "MASTER": self.hostname['hostname_full']} - return env, [], [self.hostname['hostname_full']] + env = {"OP_TARGET": self.op.cluster_name} + return env, [], [self.hostname.name] def CheckPrereq(self): """Verify that the passed name is a valid one. """ if config.ConfigWriter.IsCluster(): - raise errors.OpPrereqError, ("Cluster is already initialised") + raise errors.OpPrereqError("Cluster is already initialised") - hostname_local = socket.gethostname() - self.hostname = hostname = utils.LookupHostname(hostname_local) - if not hostname: - raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" % - hostname_local) + if self.op.hypervisor_type == constants.HT_XEN_HVM31: + if not os.path.exists(constants.VNC_PASSWORD_FILE): + raise errors.OpPrereqError("Please prepare the cluster VNC" + "password file %s" % + constants.VNC_PASSWORD_FILE) - self.clustername = clustername = utils.LookupHostname(self.op.cluster_name) - if not clustername: - raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')" - % self.op.cluster_name) + self.hostname = hostname = utils.HostInfo() - result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']]) - if result.failed: - raise errors.OpPrereqError, ("Inconsistency: this host's name resolves" - " to %s,\nbut this ip address does not" - " belong to this host." - " Aborting." % hostname['ip']) + if hostname.ip.startswith("127."): + raise errors.OpPrereqError("This host's IP resolves to the private" + " range (%s). Please fix DNS or %s." % + (hostname.ip, constants.ETC_HOSTS)) + + self.clustername = clustername = utils.HostInfo(self.op.cluster_name) + + if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS): + raise errors.OpPrereqError("Inconsistency: this host's name resolves" + " to %s,\nbut this ip address does not" + " belong to this host." + " Aborting." % hostname.ip) secondary_ip = getattr(self.op, "secondary_ip", None) if secondary_ip and not utils.IsValidIP(secondary_ip): - raise errors.OpPrereqError, ("Invalid secondary ip given") - if secondary_ip and secondary_ip != hostname['ip']: - result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip]) - if result.failed: - raise errors.OpPrereqError, ("You gave %s as secondary IP,\n" - "but it does not belong to this host." % - secondary_ip) + raise errors.OpPrereqError("Invalid secondary ip given") + if (secondary_ip and + secondary_ip != hostname.ip and + (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS))): + raise errors.OpPrereqError("You gave %s as secondary IP," + " but it does not belong to this host." % + secondary_ip) self.secondary_ip = secondary_ip # checks presence of the volume group given vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name) if vgstatus: - raise errors.OpPrereqError, ("Error: %s" % vgstatus) + raise errors.OpPrereqError("Error: %s" % vgstatus) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", self.op.mac_prefix): - raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" % - self.op.mac_prefix) + raise errors.OpPrereqError("Invalid mac prefix given '%s'" % + self.op.mac_prefix) - if self.op.hypervisor_type not in hypervisor.VALID_HTYPES: - raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" % - self.op.hypervisor_type) + if self.op.hypervisor_type not in constants.HYPER_TYPES: + raise errors.OpPrereqError("Invalid hypervisor type given '%s'" % + self.op.hypervisor_type) result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev]) if result.failed: - raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" % - (self.op.master_netdev, result.output)) + raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % + (self.op.master_netdev, + result.output.strip())) + + if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and + os.access(constants.NODE_INITD_SCRIPT, os.X_OK)): + raise errors.OpPrereqError("Init.d script '%s' missing or not" + " executable." % constants.NODE_INITD_SCRIPT) def Exec(self, feedback_fn): """Initialize the cluster. @@ -535,41 +568,37 @@ class LUInitCluster(LogicalUnit): hostname = self.hostname # set up the simple store - ss = ssconf.SimpleStore() + self.sstore = ss = ssconf.SimpleStore() ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type) - ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full']) - ss.SetKey(ss.SS_MASTER_IP, clustername['ip']) + ss.SetKey(ss.SS_MASTER_NODE, hostname.name) + ss.SetKey(ss.SS_MASTER_IP, clustername.ip) ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev) + ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name) # set up the inter-node password and certificate _InitGanetiServerSetup(ss) # start the master ip - rpc.call_node_start_master(hostname['hostname_full']) + rpc.call_node_start_master(hostname.name) # set up ssh config and /etc/hosts - f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r') + f = open(constants.SSH_HOST_RSA_PUB, 'r') try: sshline = f.read() finally: f.close() sshkey = sshline.split(" ")[1] - _UpdateEtcHosts(hostname['hostname_full'], - hostname['ip'], - ) + _AddHostToEtcHosts(hostname.name) - _UpdateKnownHosts(hostname['hostname_full'], - hostname['ip'], - sshkey, - ) + _UpdateKnownHosts(hostname.name, hostname.ip, sshkey) - _InitSSHSetup(hostname['hostname']) + _InitSSHSetup(hostname.name) # init of cluster config file - cfgw = config.ConfigWriter() - cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip, - clustername['hostname'], sshkey, self.op.mac_prefix, + self.cfg = cfgw = config.ConfigWriter() + cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip, + sshkey, self.op.mac_prefix, self.op.vg_name, self.op.def_bridge) @@ -590,17 +619,25 @@ class LUDestroyCluster(NoHooksLU): master = self.sstore.GetMasterNode() nodelist = self.cfg.GetNodeList() - if len(nodelist) > 0 and nodelist != [master]: - raise errors.OpPrereqError, ("There are still %d node(s) in " - "this cluster." % (len(nodelist) - 1)) + if len(nodelist) != 1 or nodelist[0] != master: + raise errors.OpPrereqError("There are still %d node(s) in" + " this cluster." % (len(nodelist) - 1)) + instancelist = self.cfg.GetInstanceList() + if instancelist: + raise errors.OpPrereqError("There are still %d instance(s) in" + " this cluster." % len(instancelist)) def Exec(self, feedback_fn): """Destroys the cluster. """ - utils.CreateBackup('/root/.ssh/id_dsa') - utils.CreateBackup('/root/.ssh/id_dsa.pub') - rpc.call_node_leave_cluster(self.sstore.GetMasterNode()) + master = self.sstore.GetMasterNode() + if not rpc.call_node_stop_master(master): + raise errors.OpExecError("Could not disable the master role") + priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) + utils.CreateBackup(priv_key) + utils.CreateBackup(pub_key) + rpc.call_node_leave_cluster(master) class LUVerifyCluster(NoHooksLU): @@ -623,6 +660,7 @@ class LUVerifyCluster(NoHooksLU): node: name of the node to check file_list: required list of files local_cksum: dictionary of local files and their checksums + """ # compares ganeti version local_version = constants.PROTOCOL_VERSION @@ -719,7 +757,7 @@ class LUVerifyCluster(NoHooksLU): (instance, node)) bad = True - return not bad + return bad def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn): """Verify if there are any unknown volumes in the cluster. @@ -767,9 +805,9 @@ class LUVerifyCluster(NoHooksLU): """ bad = False feedback_fn("* Verifying global settings") - self.cfg.VerifyConfig() + for msg in self.cfg.VerifyConfig(): + feedback_fn(" - ERROR: %s" % msg) - master = self.sstore.GetMasterNode() vg_name = self.cfg.GetVGName() nodelist = utils.NiceSort(self.cfg.GetNodeList()) instancelist = utils.NiceSort(self.cfg.GetInstanceList()) @@ -805,12 +843,17 @@ class LUVerifyCluster(NoHooksLU): # node_volume volumeinfo = all_volumeinfo[node] - if type(volumeinfo) != dict: + if isinstance(volumeinfo, basestring): + feedback_fn(" - ERROR: LVM problem on node %s: %s" % + (node, volumeinfo[-400:].encode('string_escape'))) + bad = True + node_volume[node] = {} + elif not isinstance(volumeinfo, dict): feedback_fn(" - ERROR: connection to %s failed" % (node,)) bad = True continue - - node_volume[node] = volumeinfo + else: + node_volume[node] = volumeinfo # node_instance nodeinstance = all_instanceinfo[node] @@ -846,7 +889,158 @@ class LUVerifyCluster(NoHooksLU): return int(bad) -def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): +class LUVerifyDisks(NoHooksLU): + """Verifies the cluster disks status. + + """ + _OP_REQP = [] + + def CheckPrereq(self): + """Check prerequisites. + + This has no prerequisites. + + """ + pass + + def Exec(self, feedback_fn): + """Verify integrity of cluster disks. + + """ + result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {} + + vg_name = self.cfg.GetVGName() + nodes = utils.NiceSort(self.cfg.GetNodeList()) + instances = [self.cfg.GetInstanceInfo(name) + for name in self.cfg.GetInstanceList()] + + nv_dict = {} + for inst in instances: + inst_lvs = {} + if (inst.status != "up" or + inst.disk_template not in constants.DTS_NET_MIRROR): + continue + inst.MapLVsByNode(inst_lvs) + # transform { iname: {node: [vol,],},} to {(node, vol): iname} + for node, vol_list in inst_lvs.iteritems(): + for vol in vol_list: + nv_dict[(node, vol)] = inst + + if not nv_dict: + return result + + node_lvs = rpc.call_volume_list(nodes, vg_name) + + to_act = set() + for node in nodes: + # node_volume + lvs = node_lvs[node] + + if isinstance(lvs, basestring): + logger.Info("error enumerating LVs on node %s: %s" % (node, lvs)) + res_nlvm[node] = lvs + elif not isinstance(lvs, dict): + logger.Info("connection to node %s failed or invalid data returned" % + (node,)) + res_nodes.append(node) + continue + + for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems(): + inst = nv_dict.pop((node, lv_name), None) + if (not lv_online and inst is not None + and inst.name not in res_instances): + res_instances.append(inst.name) + + # any leftover items in nv_dict are missing LVs, let's arrange the + # data better + for key, inst in nv_dict.iteritems(): + if inst.name not in res_missing: + res_missing[inst.name] = [] + res_missing[inst.name].append(key) + + return result + + +class LURenameCluster(LogicalUnit): + """Rename the cluster. + + """ + HPATH = "cluster-rename" + HTYPE = constants.HTYPE_CLUSTER + _OP_REQP = ["name"] + + def BuildHooksEnv(self): + """Build hooks env. + + """ + env = { + "OP_TARGET": self.sstore.GetClusterName(), + "NEW_NAME": self.op.name, + } + mn = self.sstore.GetMasterNode() + return env, [mn], [mn] + + def CheckPrereq(self): + """Verify that the passed name is a valid one. + + """ + hostname = utils.HostInfo(self.op.name) + + new_name = hostname.name + self.ip = new_ip = hostname.ip + old_name = self.sstore.GetClusterName() + old_ip = self.sstore.GetMasterIP() + if new_name == old_name and new_ip == old_ip: + raise errors.OpPrereqError("Neither the name nor the IP address of the" + " cluster has changed") + if new_ip != old_ip: + result = utils.RunCmd(["fping", "-q", new_ip]) + if not result.failed: + raise errors.OpPrereqError("The given cluster IP address (%s) is" + " reachable on the network. Aborting." % + new_ip) + + self.op.name = new_name + + def Exec(self, feedback_fn): + """Rename the cluster. + + """ + clustername = self.op.name + ip = self.ip + ss = self.sstore + + # shutdown the master IP + master = ss.GetMasterNode() + if not rpc.call_node_stop_master(master): + raise errors.OpExecError("Could not disable the master role") + + try: + # modify the sstore + ss.SetKey(ss.SS_MASTER_IP, ip) + ss.SetKey(ss.SS_CLUSTER_NAME, clustername) + + # Distribute updated ss config to all nodes + myself = self.cfg.GetNodeInfo(master) + dist_nodes = self.cfg.GetNodeList() + if myself.name in dist_nodes: + dist_nodes.remove(myself.name) + + logger.Debug("Copying updated ssconf data to all nodes") + for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]: + fname = ss.KeyToFilename(keyname) + result = rpc.call_upload_file(dist_nodes, fname) + for to_node in dist_nodes: + if not result[to_node]: + logger.Error("copy of file %s to node %s failed" % + (fname, to_node)) + finally: + if not rpc.call_node_start_master(master): + logger.Error("Could not re-enable the master role on the master," + " please restart manually.") + + +def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False): """Sleep and poll for an instance's disk to sync. """ @@ -854,7 +1048,7 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): return True if not oneshot: - logger.ToStdout("Waiting for instance %s to sync disks." % instance.name) + proc.LogInfo("Waiting for instance %s to sync disks." % instance.name) node = instance.primary_node @@ -868,21 +1062,22 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): cumul_degraded = False rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks) if not rstats: - logger.ToStderr("Can't get any data from node %s" % node) + proc.LogWarning("Can't get any data from node %s" % node) retries += 1 if retries >= 10: - raise errors.RemoteError, ("Can't contact node %s for mirror data," - " aborting." % node) + raise errors.RemoteError("Can't contact node %s for mirror data," + " aborting." % node) time.sleep(6) continue retries = 0 for i in range(len(rstats)): mstat = rstats[i] if mstat is None: - logger.ToStderr("Can't compute data for node %s/%s" % + proc.LogWarning("Can't compute data for node %s/%s" % (node, instance.disks[i].iv_name)) continue - perc_done, est_time, is_degraded = mstat + # we ignore the ldisk parameter + perc_done, est_time, is_degraded, _ = mstat cumul_degraded = cumul_degraded or (is_degraded and perc_done is None) if perc_done is not None: done = False @@ -891,8 +1086,8 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): max_time = est_time else: rem_time = "no time estimate" - logger.ToStdout("- device %s: %5.2f%% done, %s" % - (instance.disks[i].iv_name, perc_done, rem_time)) + proc.LogInfo("- device %s: %5.2f%% done, %s" % + (instance.disks[i].iv_name, perc_done, rem_time)) if done or oneshot: break @@ -905,16 +1100,23 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): utils.Lock('cmd') if done: - logger.ToStdout("Instance %s's disks are in sync." % instance.name) + proc.LogInfo("Instance %s's disks are in sync." % instance.name) return not cumul_degraded -def _CheckDiskConsistency(cfgw, dev, node, on_primary): +def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False): """Check that mirrors are not degraded. - """ + The ldisk parameter, if True, will change the test from the + is_degraded attribute (which represents overall non-ok status for + the device(s)) to the ldisk (representing the local storage status). + """ cfgw.SetDiskID(dev, node) + if ldisk: + idx = 6 + else: + idx = 5 result = True if on_primary or dev.AssembleOnSecondary(): @@ -923,7 +1125,7 @@ def _CheckDiskConsistency(cfgw, dev, node, on_primary): logger.ToStderr("Can't get any data from node %s" % node) result = False else: - result = result and (not rstats[5]) + result = result and (not rstats[idx]) if dev.children: for child in dev.children: result = result and _CheckDiskConsistency(cfgw, child, node, on_primary) @@ -952,7 +1154,7 @@ class LUDiagnoseOS(NoHooksLU): node_list = self.cfg.GetNodeList() node_data = rpc.call_os_diagnose(node_list) if node_data == False: - raise errors.OpExecError, "Can't gather the list of OSes" + raise errors.OpExecError("Can't gather the list of OSes") return node_data @@ -971,9 +1173,13 @@ class LURemoveNode(LogicalUnit): node would not allows itself to run. """ + env = { + "OP_TARGET": self.op.node_name, + "NODE_NAME": self.op.node_name, + } all_nodes = self.cfg.GetNodeList() all_nodes.remove(self.op.node_name) - return {"NODE_NAME": self.op.node_name}, all_nodes, all_nodes + return env, all_nodes, all_nodes def CheckPrereq(self): """Check prerequisites. @@ -986,27 +1192,25 @@ class LURemoveNode(LogicalUnit): Any errors are signalled by raising errors.OpPrereqError. """ - node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) if node is None: - logger.Error("Error: Node '%s' is unknown." % self.op.node_name) - return 1 + raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name) instance_list = self.cfg.GetInstanceList() masternode = self.sstore.GetMasterNode() if node.name == masternode: - raise errors.OpPrereqError, ("Node is the master node," - " you need to failover first.") + raise errors.OpPrereqError("Node is the master node," + " you need to failover first.") for instance_name in instance_list: instance = self.cfg.GetInstanceInfo(instance_name) if node.name == instance.primary_node: - raise errors.OpPrereqError, ("Instance %s still running on the node," - " please remove first." % instance_name) + raise errors.OpPrereqError("Instance %s still running on the node," + " please remove first." % instance_name) if node.name in instance.secondary_nodes: - raise errors.OpPrereqError, ("Instance %s has node as a secondary," - " please remove first." % instance_name) + raise errors.OpPrereqError("Instance %s has node as a secondary," + " please remove first." % instance_name) self.op.node_name = node.name self.node = node @@ -1026,12 +1230,14 @@ class LURemoveNode(LogicalUnit): self.cfg.RemoveNode(node.name) + _RemoveHostFromEtcHosts(node.name) + class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. """ - _OP_REQP = ["output_fields"] + _OP_REQP = ["output_fields", "names"] def CheckPrereq(self): """Check prerequisites. @@ -1040,21 +1246,24 @@ class LUQueryNodes(NoHooksLU): """ self.dynamic_fields = frozenset(["dtotal", "dfree", - "mtotal", "mnode", "mfree"]) + "mtotal", "mnode", "mfree", + "bootid"]) - _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"], + _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt", + "pinst_list", "sinst_list", + "pip", "sip"], dynamic=self.dynamic_fields, selected=self.op.output_fields) + self.wanted = _GetWantedNodes(self, self.op.names) def Exec(self, feedback_fn): """Computes the list of nodes and their attributes. """ - nodenames = utils.NiceSort(self.cfg.GetNodeList()) + nodenames = self.wanted nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames] - # begin data gathering if self.dynamic_fields.intersection(self.op.output_fields): @@ -1069,23 +1278,28 @@ class LUQueryNodes(NoHooksLU): "mfree": utils.TryConvert(int, nodeinfo['memory_free']), "dtotal": utils.TryConvert(int, nodeinfo['vg_size']), "dfree": utils.TryConvert(int, nodeinfo['vg_free']), + "bootid": nodeinfo['bootid'], } else: live_data[name] = {} else: live_data = dict.fromkeys(nodenames, {}) - node_to_primary = dict.fromkeys(nodenames, 0) - node_to_secondary = dict.fromkeys(nodenames, 0) + node_to_primary = dict([(name, set()) for name in nodenames]) + node_to_secondary = dict([(name, set()) for name in nodenames]) - if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields: + inst_fields = frozenset(("pinst_cnt", "pinst_list", + "sinst_cnt", "sinst_list")) + if inst_fields & frozenset(self.op.output_fields): instancelist = self.cfg.GetInstanceList() - for instance in instancelist: - instanceinfo = self.cfg.GetInstanceInfo(instance) - node_to_primary[instanceinfo.primary_node] += 1 - for secnode in instanceinfo.secondary_nodes: - node_to_secondary[secnode] += 1 + for instance_name in instancelist: + inst = self.cfg.GetInstanceInfo(instance_name) + if inst.primary_node in node_to_primary: + node_to_primary[inst.primary_node].add(inst.name) + for secnode in inst.secondary_nodes: + if secnode in node_to_secondary: + node_to_secondary[secnode].add(inst.name) # end data gathering @@ -1095,19 +1309,22 @@ class LUQueryNodes(NoHooksLU): for field in self.op.output_fields: if field == "name": val = node.name - elif field == "pinst": - val = node_to_primary[node.name] - elif field == "sinst": - val = node_to_secondary[node.name] + elif field == "pinst_list": + val = list(node_to_primary[node.name]) + elif field == "sinst_list": + val = list(node_to_secondary[node.name]) + elif field == "pinst_cnt": + val = len(node_to_primary[node.name]) + elif field == "sinst_cnt": + val = len(node_to_secondary[node.name]) elif field == "pip": val = node.primary_ip elif field == "sip": val = node.secondary_ip elif field in self.dynamic_fields: - val = live_data[node.name].get(field, "?") + val = live_data[node.name].get(field, None) else: - raise errors.ParameterError, field - val = str(val) + raise errors.ParameterError(field) node_output.append(val) output.append(node_output) @@ -1137,7 +1354,7 @@ class LUQueryNodeVolumes(NoHooksLU): """Computes the list of nodes and their attributes. """ - nodenames = utils.NiceSort([node.name for node in self.nodes]) + nodenames = self.nodes volumes = rpc.call_node_volumes(nodenames) ilist = [self.cfg.GetInstanceInfo(iname) for iname @@ -1147,6 +1364,9 @@ class LUQueryNodeVolumes(NoHooksLU): output = [] for node in nodenames: + if node not in volumes or not volumes[node]: + continue + node_vols = volumes[node][:] node_vols.sort(key=lambda vol: vol['dev']) @@ -1173,7 +1393,7 @@ class LUQueryNodeVolumes(NoHooksLU): else: val = '-' else: - raise errors.ParameterError, field + raise errors.ParameterError(field) node_output.append(str(val)) output.append(node_output) @@ -1181,42 +1401,6 @@ class LUQueryNodeVolumes(NoHooksLU): return output -def _CheckNodesDirs(node_list, paths): - """Verify if the given nodes have the same files. - - Args: - node_list: the list of node names to check - paths: the list of directories to checksum and compare - - Returns: - list of (node, different_file, message); if empty, the files are in sync - - """ - file_names = [] - for dir_name in paths: - flist = [os.path.join(dir_name, name) for name in os.listdir(dir_name)] - flist = [name for name in flist if os.path.isfile(name)] - file_names.extend(flist) - - local_checksums = utils.FingerprintFiles(file_names) - - results = [] - verify_params = {'filelist': file_names} - all_node_results = rpc.call_node_verify(node_list, verify_params) - for node_name in node_list: - node_result = all_node_results.get(node_name, False) - if not node_result or 'filelist' not in node_result: - results.append((node_name, "'all files'", "node communication error")) - continue - remote_checksums = node_result['filelist'] - for fname in local_checksums: - if fname not in remote_checksums: - results.append((node_name, fname, "missing file")) - elif remote_checksums[fname] != local_checksums[fname]: - results.append((node_name, fname, "wrong checksum")) - return results - - class LUAddNode(LogicalUnit): """Logical unit for adding node to the cluster. @@ -1232,6 +1416,7 @@ class LUAddNode(LogicalUnit): """ env = { + "OP_TARGET": self.op.node_name, "NODE_NAME": self.op.node_name, "NODE_PIP": self.op.primary_ip, "NODE_SIP": self.op.secondary_ip, @@ -1254,22 +1439,20 @@ class LUAddNode(LogicalUnit): node_name = self.op.node_name cfg = self.cfg - dns_data = utils.LookupHostname(node_name) - if not dns_data: - raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name) + dns_data = utils.HostInfo(node_name) - node = dns_data['hostname'] - primary_ip = self.op.primary_ip = dns_data['ip'] + node = dns_data.name + primary_ip = self.op.primary_ip = dns_data.ip secondary_ip = getattr(self.op, "secondary_ip", None) if secondary_ip is None: secondary_ip = primary_ip if not utils.IsValidIP(secondary_ip): - raise errors.OpPrereqError, ("Invalid secondary IP given") + raise errors.OpPrereqError("Invalid secondary IP given") self.op.secondary_ip = secondary_ip node_list = cfg.GetNodeList() if node in node_list: - raise errors.OpPrereqError, ("Node %s is already in the configuration" - % node) + raise errors.OpPrereqError("Node %s is already in the configuration" + % node) for existing_node_name in node_list: existing_node = cfg.GetNodeInfo(existing_node_name) @@ -1277,8 +1460,8 @@ class LUAddNode(LogicalUnit): existing_node.secondary_ip == primary_ip or existing_node.primary_ip == secondary_ip or existing_node.secondary_ip == secondary_ip): - raise errors.OpPrereqError, ("New node ip address(es) conflict with" - " existing node %s" % existing_node.name) + raise errors.OpPrereqError("New node ip address(es) conflict with" + " existing node %s" % existing_node.name) # check that the type of the node (single versus dual homed) is the # same as for the master @@ -1287,29 +1470,32 @@ class LUAddNode(LogicalUnit): newbie_singlehomed = secondary_ip == primary_ip if master_singlehomed != newbie_singlehomed: if master_singlehomed: - raise errors.OpPrereqError, ("The master has no private ip but the" - " new node has one") + raise errors.OpPrereqError("The master has no private ip but the" + " new node has one") else: - raise errors.OpPrereqError ("The master has a private ip but the" - " new node doesn't have one") + raise errors.OpPrereqError("The master has a private ip but the" + " new node doesn't have one") # checks reachablity - command = ["fping", "-q", primary_ip] - result = utils.RunCmd(command) - if result.failed: - raise errors.OpPrereqError, ("Node not reachable by ping") + if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): + raise errors.OpPrereqError("Node not reachable by ping") if not newbie_singlehomed: # check reachability from my secondary ip to newbie's secondary ip - command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip] - result = utils.RunCmd(command) - if result.failed: - raise errors.OpPrereqError, ("Node secondary ip not reachable by ping") + if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, + source=myself.secondary_ip): + raise errors.OpPrereqError("Node secondary ip not reachable by TCP" + " based ping to noded port") self.new_node = objects.Node(name=node, primary_ip=primary_ip, secondary_ip=secondary_ip) + if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31: + if not os.path.exists(constants.VNC_PASSWORD_FILE): + raise errors.OpPrereqError("Cluster VNC password file %s missing" % + constants.VNC_PASSWORD_FILE) + def Exec(self, feedback_fn): """Adds the new node to the cluster. @@ -1320,7 +1506,7 @@ class LUAddNode(LogicalUnit): # set up inter-node password and certificate and restarts the node daemon gntpass = self.sstore.GetNodeDaemonPassword() if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass): - raise errors.OpExecError, ("ganeti password corruption detected") + raise errors.OpExecError("ganeti password corruption detected") f = open(constants.SSL_CERT_FILE) try: gntpem = f.read(8192) @@ -1331,13 +1517,11 @@ class LUAddNode(LogicalUnit): # cert doesn't contain this, the here-document will be correctly # parsed by the shell sequence below if re.search('^!EOF\.', gntpem, re.MULTILINE): - raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate") + raise errors.OpExecError("invalid PEM encoding in the SSL certificate") if not gntpem.endswith("\n"): - raise errors.OpExecError, ("PEM must end with newline") + raise errors.OpExecError("PEM must end with newline") logger.Info("copy cluster pass to %s and starting the node daemon" % node) - # remove first the root's known_hosts file - utils.RemoveFile("/root/.ssh/known_hosts") # and then connect with ssh to set password and start ganeti-noded # note that all the below variables are sanitized at this point, # either by being constants or by the checks above @@ -1352,9 +1536,9 @@ class LUAddNode(LogicalUnit): result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True) if result.failed: - raise errors.OpExecError, ("Remote command on node %s, error: %s," - " output: %s" % - (node, result.fail_reason, result.output)) + raise errors.OpExecError("Remote command on node %s, error: %s," + " output: %s" % + (node, result.fail_reason, result.output)) # check connectivity time.sleep(4) @@ -1365,18 +1549,19 @@ class LUAddNode(LogicalUnit): logger.Info("communication to node %s fine, sw version %s match" % (node, result)) else: - raise errors.OpExecError, ("Version mismatch master version %s," - " node version %s" % - (constants.PROTOCOL_VERSION, result)) + raise errors.OpExecError("Version mismatch master version %s," + " node version %s" % + (constants.PROTOCOL_VERSION, result)) else: - raise errors.OpExecError, ("Cannot get version from the new node") + raise errors.OpExecError("Cannot get version from the new node") # setup ssh on node logger.Info("copy ssh key to node %s" % node) + priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) keyarray = [] - keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub", - "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub", - "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"] + keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, + constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, + priv_key, pub_key] for i in keyfiles: f = open(i, 'r') @@ -1389,21 +1574,30 @@ class LUAddNode(LogicalUnit): keyarray[3], keyarray[4], keyarray[5]) if not result: - raise errors.OpExecError, ("Cannot transfer ssh keys to the new node") + raise errors.OpExecError("Cannot transfer ssh keys to the new node") # Add node to our /etc/hosts, and add key to known_hosts - _UpdateEtcHosts(new_node.name, new_node.primary_ip) + _AddHostToEtcHosts(new_node.name) + _UpdateKnownHosts(new_node.name, new_node.primary_ip, self.cfg.GetHostKey()) if new_node.secondary_ip != new_node.primary_ip: - result = ssh.SSHCall(node, "root", - "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip) - if result.failed: - raise errors.OpExecError, ("Node claims it doesn't have the" - " secondary ip you gave (%s).\n" - "Please fix and re-run this command." % - new_node.secondary_ip) + if not rpc.call_node_tcp_ping(new_node.name, + constants.LOCALHOST_IP_ADDRESS, + new_node.secondary_ip, + constants.DEFAULT_NODED_PORT, + 10, False): + raise errors.OpExecError("Node claims it doesn't have the secondary ip" + " you gave (%s). Please fix and re-run this" + " command." % new_node.secondary_ip) + + success, msg = ssh.VerifyNodeHostname(node) + if not success: + raise errors.OpExecError("Node '%s' claims it has a different hostname" + " than the one the resolver gives: %s." + " Please fix and re-run this command." % + (node, msg)) # Distribute updated /etc/hosts and known_hosts to all nodes, # including the node just added @@ -1413,7 +1607,7 @@ class LUAddNode(LogicalUnit): dist_nodes.remove(myself.name) logger.Debug("Copying hosts and known_hosts to all nodes") - for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"): + for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): result = rpc.call_upload_file(dist_nodes, fname) for to_node in dist_nodes: if not result[to_node]: @@ -1421,6 +1615,8 @@ class LUAddNode(LogicalUnit): (fname, to_node)) to_copy = ss.GetFileList() + if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31: + to_copy.append(constants.VNC_PASSWORD_FILE) for fname in to_copy: if not ssh.CopyFileToNode(node, fname): logger.Error("could not copy file %s to node %s" % (fname, node)) @@ -1448,6 +1644,7 @@ class LUMasterFailover(LogicalUnit): """ env = { + "OP_TARGET": self.new_master, "NEW_MASTER": self.new_master, "OLD_MASTER": self.old_master, } @@ -1459,15 +1656,14 @@ class LUMasterFailover(LogicalUnit): This checks that we are not already the master. """ - self.new_master = socket.gethostname() - + self.new_master = utils.HostInfo().name self.old_master = self.sstore.GetMasterNode() if self.old_master == self.new_master: - raise errors.OpPrereqError, ("This commands must be run on the node" - " where you want the new master to be.\n" - "%s is already the master" % - self.old_master) + raise errors.OpPrereqError("This commands must be run on the node" + " where you want the new master to be." + " %s is already the master" % + self.old_master) def Exec(self, feedback_fn): """Failover the master node. @@ -1477,7 +1673,6 @@ class LUMasterFailover(LogicalUnit): master. """ - #TODO: do not rely on gethostname returning the FQDN logger.Info("setting master to %s, old master: %s" % (self.new_master, self.old_master)) @@ -1496,8 +1691,8 @@ class LUMasterFailover(LogicalUnit): if not rpc.call_node_start_master(self.new_master): logger.Error("could not start the master role on the new master" " %s, please check" % self.new_master) - feedback_fn("Error in activating the master IP on the new master,\n" - "please fix manually.") + feedback_fn("Error in activating the master IP on the new master," + " please fix manually.") @@ -1506,6 +1701,7 @@ class LUQueryClusterInfo(NoHooksLU): """ _OP_REQP = [] + REQ_MASTER = False def CheckPrereq(self): """No prerequsites needed for this LU. @@ -1517,10 +1713,8 @@ class LUQueryClusterInfo(NoHooksLU): """Return cluster config. """ - instances = [self.cfg.GetInstanceInfo(name) - for name in self.cfg.GetInstanceList()] result = { - "name": self.cfg.GetClusterName(), + "name": self.sstore.GetClusterName(), "software_version": constants.RELEASE_VERSION, "protocol_version": constants.PROTOCOL_VERSION, "config_version": constants.CONFIG_VERSION, @@ -1528,9 +1722,6 @@ class LUQueryClusterInfo(NoHooksLU): "export_version": constants.EXPORT_VERSION, "master": self.sstore.GetMasterNode(), "architecture": (platform.architecture()[0], platform.machine()), - "instances": [(instance.name, instance.primary_node) - for instance in instances], - "nodes": self.cfg.GetNodeList(), } return result @@ -1566,7 +1757,7 @@ class LUClusterCopyFile(NoHooksLU): """ filename = self.op.filename - myname = socket.gethostname() + myname = utils.HostInfo().name for node in self.nodes: if node == myname: @@ -1614,8 +1805,8 @@ class LURunClusterCommand(NoHooksLU): """ data = [] for node in self.nodes: - result = utils.RunCmd(["ssh", node.name, self.op.command]) - data.append((node.name, result.cmd, result.output, result.exit_code)) + result = ssh.SSHCall(node, "root", self.op.command) + data.append((node, result.output, result.exit_code)) return data @@ -1635,8 +1826,8 @@ class LUActivateInstanceDisks(NoHooksLU): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance @@ -1646,7 +1837,7 @@ class LUActivateInstanceDisks(NoHooksLU): """ disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg) if not disks_ok: - raise errors.OpExecError, ("Cannot activate block devices") + raise errors.OpExecError("Cannot activate block devices") return disks_info @@ -1668,25 +1859,65 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False): """ device_info = [] disks_ok = True + iname = instance.name + # With the two passes mechanism we try to reduce the window of + # opportunity for the race condition of switching DRBD to primary + # before handshaking occured, but we do not eliminate it + + # The proper fix would be to wait (with some limits) until the + # connection has been made and drbd transitions from WFConnection + # into any other network-connected state (Connected, SyncTarget, + # SyncSource, etc.) + + # 1st pass, assemble on all nodes in secondary mode for inst_disk in instance.disks: - master_result = None for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): cfg.SetDiskID(node_disk, node) - is_primary = node == instance.primary_node - result = rpc.call_blockdev_assemble(node, node_disk, is_primary) + result = rpc.call_blockdev_assemble(node, node_disk, iname, False) if not result: - logger.Error("could not prepare block device %s on node %s (is_pri" - "mary=%s)" % (inst_disk.iv_name, node, is_primary)) - if is_primary or not ignore_secondaries: + logger.Error("could not prepare block device %s on node %s" + " (is_primary=False, pass=1)" % (inst_disk.iv_name, node)) + if not ignore_secondaries: disks_ok = False - if is_primary: - master_result = result - device_info.append((instance.primary_node, inst_disk.iv_name, - master_result)) + + # FIXME: race condition on drbd migration to primary + + # 2nd pass, do only the primary node + for inst_disk in instance.disks: + for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): + if node != instance.primary_node: + continue + cfg.SetDiskID(node_disk, node) + result = rpc.call_blockdev_assemble(node, node_disk, iname, True) + if not result: + logger.Error("could not prepare block device %s on node %s" + " (is_primary=True, pass=2)" % (inst_disk.iv_name, node)) + disks_ok = False + device_info.append((instance.primary_node, inst_disk.iv_name, result)) + + # leave the disks configured for the primary node + # this is a workaround that would be fixed better by + # improving the logical/physical id handling + for disk in instance.disks: + cfg.SetDiskID(disk, instance.primary_node) return disks_ok, device_info +def _StartInstanceDisks(cfg, instance, force): + """Start the disks of an instance. + + """ + disks_ok, dummy = _AssembleInstanceDisks(instance, cfg, + ignore_secondaries=force) + if not disks_ok: + _ShutdownInstanceDisks(instance, cfg) + if force is not None and not force: + logger.Error("If the message above refers to a secondary node," + " you can retry the operation using '--force'.") + raise errors.OpExecError("Disk consistency error") + + class LUDeactivateInstanceDisks(NoHooksLU): """Shutdown an instance's disks. @@ -1702,8 +1933,8 @@ class LUDeactivateInstanceDisks(NoHooksLU): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): @@ -1714,12 +1945,12 @@ class LUDeactivateInstanceDisks(NoHooksLU): ins_l = rpc.call_instance_list([instance.primary_node]) ins_l = ins_l[instance.primary_node] if not type(ins_l) is list: - raise errors.OpExecError, ("Can't contact node '%s'" % - instance.primary_node) + raise errors.OpExecError("Can't contact node '%s'" % + instance.primary_node) if self.instance.name in ins_l: - raise errors.OpExecError, ("Instance is running, can't shutdown" - " block devices.") + raise errors.OpExecError("Instance is running, can't shutdown" + " block devices.") _ShutdownInstanceDisks(instance, self.cfg) @@ -1745,6 +1976,36 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False): return result +def _CheckNodeFreeMemory(cfg, node, reason, requested): + """Checks if a node has enough free memory. + + This function check if a given node has the needed amount of free + memory. In case the node has less memory or we cannot get the + information from the node, this function raise an OpPrereqError + exception. + + Args: + - cfg: a ConfigWriter instance + - node: the node name + - reason: string to use in the error message + - requested: the amount of memory in MiB + + """ + nodeinfo = rpc.call_node_info([node], cfg.GetVGName()) + if not nodeinfo or not isinstance(nodeinfo, dict): + raise errors.OpPrereqError("Could not contact node %s for resource" + " information" % (node,)) + + free_mem = nodeinfo[node].get('memory_free') + if not isinstance(free_mem, int): + raise errors.OpPrereqError("Can't compute free memory on node %s, result" + " was '%s'" % (node, free_mem)) + if requested > free_mem: + raise errors.OpPrereqError("Not enough memory on node %s for %s:" + " needed %s MiB, available %s MiB" % + (node, reason, requested, free_mem)) + + class LUStartupInstance(LogicalUnit): """Starts an instance. @@ -1760,11 +2021,9 @@ class LUStartupInstance(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, - "INSTANCE_PRIMARY": self.instance.primary_node, - "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), "FORCE": self.op.force, } + env.update(_BuildInstanceHookEnvByObject(self.instance)) nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes)) return env, nl, nl @@ -1778,15 +2037,15 @@ class LUStartupInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) # check bridges existance - brlist = [nic.bridge for nic in instance.nics] - if not rpc.call_bridges_exist(instance.primary_node, brlist): - raise errors.OpPrereqError, ("one or more target bridges %s does not" - " exist on destination node '%s'" % - (brlist, instance.primary_node)) + _CheckInstanceBridgesExist(instance) + + _CheckNodeFreeMemory(self.cfg, instance.primary_node, + "starting instance %s" % instance.name, + instance.memory) self.instance = instance self.op.instance_name = instance.name @@ -1801,43 +2060,22 @@ class LUStartupInstance(LogicalUnit): node_current = instance.primary_node - nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName()) - if not nodeinfo: - raise errors.OpExecError, ("Could not contact node %s for infos" % - (node_current)) - - freememory = nodeinfo[node_current]['memory_free'] - memory = instance.memory - if memory > freememory: - raise errors.OpExecError, ("Not enough memory to start instance" - " %s on node %s" - " needed %s MiB, available %s MiB" % - (instance.name, node_current, memory, - freememory)) - - disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg, - ignore_secondaries=force) - if not disks_ok: - _ShutdownInstanceDisks(instance, self.cfg) - if not force: - logger.Error("If the message above refers to a secondary node," - " you can retry the operation using '--force'.") - raise errors.OpExecError, ("Disk consistency error") + _StartInstanceDisks(self.cfg, instance, force) if not rpc.call_instance_start(node_current, instance, extra_args): _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError, ("Could not start instance") + raise errors.OpExecError("Could not start instance") self.cfg.MarkInstanceUp(instance.name) -class LUShutdownInstance(LogicalUnit): - """Shutdown an instance. +class LURebootInstance(LogicalUnit): + """Reboot an instance. """ - HPATH = "instance-stop" + HPATH = "instance-reboot" HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name"] + _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"] def BuildHooksEnv(self): """Build hooks env. @@ -1846,10 +2084,9 @@ class LUShutdownInstance(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, - "INSTANCE_PRIMARY": self.instance.primary_node, - "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), + "IGNORE_SECONDARIES": self.op.ignore_secondaries, } + env.update(_BuildInstanceHookEnvByObject(self.instance)) nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes)) return env, nl, nl @@ -1863,28 +2100,56 @@ class LUShutdownInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + + # check bridges existance + _CheckInstanceBridgesExist(instance) + self.instance = instance + self.op.instance_name = instance.name def Exec(self, feedback_fn): - """Shutdown the instance. + """Reboot the instance. """ instance = self.instance + ignore_secondaries = self.op.ignore_secondaries + reboot_type = self.op.reboot_type + extra_args = getattr(self.op, "extra_args", "") + node_current = instance.primary_node - if not rpc.call_instance_shutdown(node_current, instance): - logger.Error("could not shutdown instance") - self.cfg.MarkInstanceDown(instance.name) - _ShutdownInstanceDisks(instance, self.cfg) + if reboot_type not in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD, + constants.INSTANCE_REBOOT_FULL]: + raise errors.ParameterError("reboot type not in [%s, %s, %s]" % + (constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD, + constants.INSTANCE_REBOOT_FULL)) + + if reboot_type in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD]: + if not rpc.call_instance_reboot(node_current, instance, + reboot_type, extra_args): + raise errors.OpExecError("Could not reboot instance") + else: + if not rpc.call_instance_shutdown(node_current, instance): + raise errors.OpExecError("could not shutdown instance for full reboot") + _ShutdownInstanceDisks(instance, self.cfg) + _StartInstanceDisks(self.cfg, instance, ignore_secondaries) + if not rpc.call_instance_start(node_current, instance, extra_args): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance for full reboot") + + self.cfg.MarkInstanceUp(instance.name) -class LURemoveInstance(LogicalUnit): - """Remove an instance. +class LUShutdownInstance(LogicalUnit): + """Shutdown an instance. """ - HPATH = "instance-remove" + HPATH = "instance-stop" HTYPE = constants.HTYPE_INSTANCE _OP_REQP = ["instance_name"] @@ -1894,11 +2159,7 @@ class LURemoveInstance(LogicalUnit): This runs on master, primary and secondary nodes of the instance. """ - env = { - "INSTANCE_NAME": self.op.instance_name, - "INSTANCE_PRIMARY": self.instance.primary_node, - "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), - } + env = _BuildInstanceHookEnvByObject(self.instance) nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes)) return env, nl, nl @@ -1912,36 +2173,248 @@ class LURemoveInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): - """Remove the instance. + """Shutdown the instance. """ instance = self.instance - logger.Info("shutting down instance %s on node %s" % - (instance.name, instance.primary_node)) - - if not rpc.call_instance_shutdown(instance.primary_node, instance): - raise errors.OpExecError, ("Could not shutdown instance %s on node %s" % - (instance.name, instance.primary_node)) + node_current = instance.primary_node + if not rpc.call_instance_shutdown(node_current, instance): + logger.Error("could not shutdown instance") - logger.Info("removing block devices for instance %s" % instance.name) + self.cfg.MarkInstanceDown(instance.name) + _ShutdownInstanceDisks(instance, self.cfg) - _RemoveDisks(instance, self.cfg) - logger.Info("removing instance %s out of cluster config" % instance.name) +class LUReinstallInstance(LogicalUnit): + """Reinstall an instance. - self.cfg.RemoveInstance(instance.name) + """ + HPATH = "instance-reinstall" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name"] + def BuildHooksEnv(self): + """Build hooks env. -class LUQueryInstances(NoHooksLU): - """Logical unit for querying instances. + This runs on master, primary and secondary nodes of the instance. + + """ + env = _BuildInstanceHookEnvByObject(self.instance) + nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + + list(self.instance.secondary_nodes)) + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster and is not running. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + if instance.disk_template == constants.DT_DISKLESS: + raise errors.OpPrereqError("Instance '%s' has no disks" % + self.op.instance_name) + if instance.status != "down": + raise errors.OpPrereqError("Instance '%s' is marked to be up" % + self.op.instance_name) + remote_info = rpc.call_instance_info(instance.primary_node, instance.name) + if remote_info: + raise errors.OpPrereqError("Instance '%s' is running on the node %s" % + (self.op.instance_name, + instance.primary_node)) + + self.op.os_type = getattr(self.op, "os_type", None) + if self.op.os_type is not None: + # OS verification + pnode = self.cfg.GetNodeInfo( + self.cfg.ExpandNodeName(instance.primary_node)) + if pnode is None: + raise errors.OpPrereqError("Primary node '%s' is unknown" % + self.op.pnode) + os_obj = rpc.call_os_get(pnode.name, self.op.os_type) + if not os_obj: + raise errors.OpPrereqError("OS '%s' not in supported OS list for" + " primary node" % self.op.os_type) + + self.instance = instance + + def Exec(self, feedback_fn): + """Reinstall the instance. + + """ + inst = self.instance + + if self.op.os_type is not None: + feedback_fn("Changing OS to '%s'..." % self.op.os_type) + inst.os = self.op.os_type + self.cfg.AddInstance(inst) + + _StartInstanceDisks(self.cfg, inst, None) + try: + feedback_fn("Running the instance OS create scripts...") + if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"): + raise errors.OpExecError("Could not install OS for instance %s" + " on node %s" % + (inst.name, inst.primary_node)) + finally: + _ShutdownInstanceDisks(inst, self.cfg) + + +class LURenameInstance(LogicalUnit): + """Rename an instance. + + """ + HPATH = "instance-rename" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name", "new_name"] + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on master, primary and secondary nodes of the instance. + + """ + env = _BuildInstanceHookEnvByObject(self.instance) + env["INSTANCE_NEW_NAME"] = self.op.new_name + nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + + list(self.instance.secondary_nodes)) + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster and is not running. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + if instance.status != "down": + raise errors.OpPrereqError("Instance '%s' is marked to be up" % + self.op.instance_name) + remote_info = rpc.call_instance_info(instance.primary_node, instance.name) + if remote_info: + raise errors.OpPrereqError("Instance '%s' is running on the node %s" % + (self.op.instance_name, + instance.primary_node)) + self.instance = instance + + # new name verification + name_info = utils.HostInfo(self.op.new_name) + + self.op.new_name = new_name = name_info.name + instance_list = self.cfg.GetInstanceList() + if new_name in instance_list: + raise errors.OpPrereqError("Instance '%s' is already in the cluster" % + instance_name) + + if not getattr(self.op, "ignore_ip", False): + command = ["fping", "-q", name_info.ip] + result = utils.RunCmd(command) + if not result.failed: + raise errors.OpPrereqError("IP %s of instance %s already in use" % + (name_info.ip, new_name)) + + + def Exec(self, feedback_fn): + """Reinstall the instance. + + """ + inst = self.instance + old_name = inst.name + + self.cfg.RenameInstance(inst.name, self.op.new_name) + + # re-read the instance from the configuration after rename + inst = self.cfg.GetInstanceInfo(self.op.new_name) + + _StartInstanceDisks(self.cfg, inst, None) + try: + if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name, + "sda", "sdb"): + msg = ("Could run OS rename script for instance %s on node %s (but the" + " instance has been renamed in Ganeti)" % + (inst.name, inst.primary_node)) + logger.Error(msg) + finally: + _ShutdownInstanceDisks(inst, self.cfg) + + +class LURemoveInstance(LogicalUnit): + """Remove an instance. + + """ + HPATH = "instance-remove" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name"] + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on master, primary and secondary nodes of the instance. + + """ + env = _BuildInstanceHookEnvByObject(self.instance) + nl = [self.sstore.GetMasterNode()] + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + self.instance = instance + + def Exec(self, feedback_fn): + """Remove the instance. + + """ + instance = self.instance + logger.Info("shutting down instance %s on node %s" % + (instance.name, instance.primary_node)) + + if not rpc.call_instance_shutdown(instance.primary_node, instance): + if self.op.ignore_failures: + feedback_fn("Warning: can't shutdown instance") + else: + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, instance.primary_node)) + + logger.Info("removing block devices for instance %s" % instance.name) + + if not _RemoveDisks(instance, self.cfg): + if self.op.ignore_failures: + feedback_fn("Warning: can't remove instance's disks") + else: + raise errors.OpExecError("Can't remove instance's disks") + + logger.Info("removing instance %s out of cluster config" % instance.name) + + self.cfg.RemoveInstance(instance.name) + + +class LUQueryInstances(NoHooksLU): + """Logical unit for querying instances. """ - _OP_REQP = ["output_fields"] + _OP_REQP = ["output_fields", "names"] def CheckPrereq(self): """Check prerequisites. @@ -1949,19 +2422,21 @@ class LUQueryInstances(NoHooksLU): This checks that the fields required are valid output fields. """ - self.dynamic_fields = frozenset(["oper_state", "oper_ram"]) + self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"]) _CheckOutputFields(static=["name", "os", "pnode", "snodes", "admin_state", "admin_ram", - "disk_template", "ip", "mac", "bridge"], + "disk_template", "ip", "mac", "bridge", + "sda_size", "sdb_size", "vcpus"], dynamic=self.dynamic_fields, selected=self.op.output_fields) + self.wanted = _GetWantedInstances(self, self.op.names) + def Exec(self, feedback_fn): """Computes the list of nodes and their attributes. """ - - instance_names = utils.NiceSort(self.cfg.GetInstanceList()) + instance_names = self.wanted instance_list = [self.cfg.GetInstanceInfo(iname) for iname in instance_names] @@ -1996,25 +2471,34 @@ class LUQueryInstances(NoHooksLU): elif field == "pnode": val = instance.primary_node elif field == "snodes": - val = ",".join(instance.secondary_nodes) or "-" + val = list(instance.secondary_nodes) elif field == "admin_state": - if instance.status == "down": - val = "no" - else: - val = "yes" + val = (instance.status != "down") elif field == "oper_state": if instance.primary_node in bad_nodes: - val = "(node down)" + val = None + else: + val = bool(live_data.get(instance.name)) + elif field == "status": + if instance.primary_node in bad_nodes: + val = "ERROR_nodedown" else: - if live_data.get(instance.name): - val = "running" + running = bool(live_data.get(instance.name)) + if running: + if instance.status != "down": + val = "running" + else: + val = "ERROR_up" else: - val = "stopped" + if instance.status != "down": + val = "ERROR_down" + else: + val = "ADMIN_down" elif field == "admin_ram": val = instance.memory elif field == "oper_ram": if instance.primary_node in bad_nodes: - val = "(node down)" + val = None elif instance.name in live_data: val = live_data[instance.name].get("memory", "?") else: @@ -2027,9 +2511,16 @@ class LUQueryInstances(NoHooksLU): val = instance.nics[0].bridge elif field == "mac": val = instance.nics[0].mac + elif field == "sda_size" or field == "sdb_size": + disk = instance.FindDisk(field[:3]) + if disk is None: + val = None + else: + val = disk.size + elif field == "vcpus": + val = instance.vcpus else: - raise errors.ParameterError, field - val = str(val) + raise errors.ParameterError(field) iout.append(val) output.append(iout) @@ -2051,11 +2542,9 @@ class LUFailoverInstance(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, - "INSTANCE_PRIMARY": self.instance.primary_node, - "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), "IGNORE_CONSISTENCY": self.op.ignore_consistency, } + env.update(_BuildInstanceHookEnvByObject(self.instance)) nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes) return env, nl, nl @@ -2068,28 +2557,29 @@ class LUFailoverInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + + if instance.disk_template not in constants.DTS_NET_MIRROR: + raise errors.OpPrereqError("Instance's disk layout is not" + " network mirrored, cannot failover.") + + secondary_nodes = instance.secondary_nodes + if not secondary_nodes: + raise errors.ProgrammerError("no secondary node but using " + "DT_REMOTE_RAID1 template") + target_node = secondary_nodes[0] # check memory requirements on the secondary node - target_node = instance.secondary_nodes[0] - nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName()) - info = nodeinfo.get(target_node, None) - if not info: - raise errors.OpPrereqError, ("Cannot get current information" - " from node '%s'" % nodeinfo) - if instance.memory > info['memory_free']: - raise errors.OpPrereqError, ("Not enough memory on target node %s." - " %d MB available, %d MB required" % - (target_node, info['memory_free'], - instance.memory)) + _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" % + instance.name, instance.memory) # check bridge existance brlist = [nic.bridge for nic in instance.nics] - if not rpc.call_bridges_exist(instance.primary_node, brlist): - raise errors.OpPrereqError, ("one or more target bridges %s does not" - " exist on destination node '%s'" % - (brlist, instance.primary_node)) + if not rpc.call_bridges_exist(target_node, brlist): + raise errors.OpPrereqError("One or more target bridges %s does not" + " exist on destination node '%s'" % + (brlist, target_node)) self.instance = instance @@ -2110,36 +2600,25 @@ class LUFailoverInstance(LogicalUnit): # for remote_raid1, these are md over drbd if not _CheckDiskConsistency(self.cfg, dev, target_node, False): if not self.op.ignore_consistency: - raise errors.OpExecError, ("Disk %s is degraded on target node," - " aborting failover." % dev.iv_name) - - feedback_fn("* checking target node resource availability") - nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName()) - - if not nodeinfo: - raise errors.OpExecError, ("Could not contact target node %s." % - target_node) - - free_memory = int(nodeinfo[target_node]['memory_free']) - memory = instance.memory - if memory > free_memory: - raise errors.OpExecError, ("Not enough memory to create instance %s on" - " node %s. needed %s MiB, available %s MiB" % - (instance.name, target_node, memory, - free_memory)) + raise errors.OpExecError("Disk %s is degraded on target node," + " aborting failover." % dev.iv_name) feedback_fn("* shutting down instance on source node") logger.Info("Shutting down instance %s on node %s" % (instance.name, source_node)) if not rpc.call_instance_shutdown(source_node, instance): - logger.Error("Could not shutdown instance %s on node %s. Proceeding" - " anyway. Please make sure node %s is down" % - (instance.name, source_node, source_node)) + if self.op.ignore_consistency: + logger.Error("Could not shutdown instance %s on node %s. Proceeding" + " anyway. Please make sure node %s is down" % + (instance.name, source_node, source_node)) + else: + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, source_node)) feedback_fn("* deactivating the instance's disks on source node") if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True): - raise errors.OpExecError, ("Can't shut down the instance's disks.") + raise errors.OpExecError("Can't shut down the instance's disks.") instance.primary_node = target_node # distribute new instance config to the other nodes @@ -2153,7 +2632,7 @@ class LUFailoverInstance(LogicalUnit): ignore_secondaries=True) if not disks_ok: _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError, ("Can't activate the instance's disks") + raise errors.OpExecError("Can't activate the instance's disks") feedback_fn("* starting the instance on the target node") if not rpc.call_instance_start(target_node, instance, None): @@ -2162,20 +2641,20 @@ class LUFailoverInstance(LogicalUnit): (instance.name, target_node)) -def _CreateBlockDevOnPrimary(cfg, node, device): +def _CreateBlockDevOnPrimary(cfg, node, instance, device, info): """Create a tree of block devices on the primary node. This always creates all devices. """ - if device.children: for child in device.children: - if not _CreateBlockDevOnPrimary(cfg, node, child): + if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info): return False cfg.SetDiskID(device, node) - new_id = rpc.call_blockdev_create(node, device, device.size, True) + new_id = rpc.call_blockdev_create(node, device, device.size, + instance.name, True, info) if not new_id: return False if device.physical_id is None: @@ -2183,7 +2662,7 @@ def _CreateBlockDevOnPrimary(cfg, node, device): return True -def _CreateBlockDevOnSecondary(cfg, node, device, force): +def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info): """Create a tree of block devices on a secondary node. If this device type has to be created on secondaries, create it and @@ -2196,13 +2675,15 @@ def _CreateBlockDevOnSecondary(cfg, node, device, force): force = True if device.children: for child in device.children: - if not _CreateBlockDevOnSecondary(cfg, node, child, force): + if not _CreateBlockDevOnSecondary(cfg, node, instance, + child, force, info): return False if not force: return True cfg.SetDiskID(device, node) - new_id = rpc.call_blockdev_create(node, device, device.size, False) + new_id = rpc.call_blockdev_create(node, device, device.size, + instance.name, False, info) if not new_id: return False if device.physical_id is None: @@ -2210,23 +2691,52 @@ def _CreateBlockDevOnSecondary(cfg, node, device, force): return True -def _GenerateMDDRBDBranch(cfg, vgname, primary, secondary, size, base): +def _GenerateUniqueNames(cfg, exts): + """Generate a suitable LV name. + + This will generate a logical volume name for the given instance. + + """ + results = [] + for val in exts: + new_id = cfg.GenerateUniqueID() + results.append("%s%s" % (new_id, val)) + return results + + +def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names): """Generate a drbd device complete with its children. """ port = cfg.AllocatePort() - base = "%s_%s" % (base, port) - dev_data = objects.Disk(dev_type="lvm", size=size, - logical_id=(vgname, "%s.data" % base)) - dev_meta = objects.Disk(dev_type="lvm", size=128, - logical_id=(vgname, "%s.meta" % base)) - drbd_dev = objects.Disk(dev_type="drbd", size=size, + vgname = cfg.GetVGName() + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size, logical_id = (primary, secondary, port), children = [dev_data, dev_meta]) return drbd_dev -def _GenerateDiskTemplate(cfg, vgname, template_name, +def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name): + """Generate a drbd8 device complete with its children. + + """ + port = cfg.AllocatePort() + vgname = cfg.GetVGName() + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, + logical_id = (primary, secondary, port), + children = [dev_data, dev_meta], + iv_name=iv_name) + return drbd_dev + +def _GenerateDiskTemplate(cfg, template_name, instance_name, primary_node, secondary_nodes, disk_sz, swap_sz): """Generate the entire disk layout for a given template type. @@ -2234,58 +2744,81 @@ def _GenerateDiskTemplate(cfg, vgname, template_name, """ #TODO: compute space requirements - if template_name == "diskless": + vgname = cfg.GetVGName() + if template_name == constants.DT_DISKLESS: disks = [] - elif template_name == "plain": + elif template_name == constants.DT_PLAIN: if len(secondary_nodes) != 0: raise errors.ProgrammerError("Wrong template configuration") - sda_dev = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, "%s.os" % instance_name), + + names = _GenerateUniqueNames(cfg, [".sda", ".sdb"]) + sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz, + logical_id=(vgname, names[0]), iv_name = "sda") - sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, "%s.swap" % instance_name), + sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz, + logical_id=(vgname, names[1]), iv_name = "sdb") disks = [sda_dev, sdb_dev] - elif template_name == "local_raid1": + elif template_name == constants.DT_LOCAL_RAID1: if len(secondary_nodes) != 0: raise errors.ProgrammerError("Wrong template configuration") - sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, "%s.os_m1" % instance_name)) - sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, "%s.os_m2" % instance_name)) - md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda", + + + names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2", + ".sdb_m1", ".sdb_m2"]) + sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz, + logical_id=(vgname, names[0])) + sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz, + logical_id=(vgname, names[1])) + md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda", size=disk_sz, children = [sda_dev_m1, sda_dev_m2]) - sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, "%s.swap_m1" % - instance_name)) - sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, "%s.swap_m2" % - instance_name)) - md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb", + sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz, + logical_id=(vgname, names[2])) + sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz, + logical_id=(vgname, names[3])) + md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb", size=swap_sz, children = [sdb_dev_m1, sdb_dev_m2]) disks = [md_sda_dev, md_sdb_dev] - elif template_name == "remote_raid1": + elif template_name == constants.DT_REMOTE_RAID1: if len(secondary_nodes) != 1: raise errors.ProgrammerError("Wrong template configuration") remote_node = secondary_nodes[0] - drbd_sda_dev = _GenerateMDDRBDBranch(cfg, vgname, - primary_node, remote_node, disk_sz, - "%s-sda" % instance_name) - md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda", + names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta", + ".sdb_data", ".sdb_meta"]) + drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node, + disk_sz, names[0:2]) + md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda", children = [drbd_sda_dev], size=disk_sz) - drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, vgname, - primary_node, remote_node, swap_sz, - "%s-sdb" % instance_name) - md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb", + drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node, + swap_sz, names[2:4]) + md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb", children = [drbd_sdb_dev], size=swap_sz) disks = [md_sda_dev, md_sdb_dev] + elif template_name == constants.DT_DRBD8: + if len(secondary_nodes) != 1: + raise errors.ProgrammerError("Wrong template configuration") + remote_node = secondary_nodes[0] + names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta", + ".sdb_data", ".sdb_meta"]) + drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node, + disk_sz, names[0:2], "sda") + drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node, + swap_sz, names[2:4], "sdb") + disks = [drbd_sda_dev, drbd_sdb_dev] else: raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) return disks +def _GetInstanceInfoText(instance): + """Compute that text that should be added to the disk's metadata. + + """ + return "originstname+%s" % instance.name + + def _CreateDisks(cfg, instance): """Create all disks for an instance. @@ -2298,17 +2831,21 @@ def _CreateDisks(cfg, instance): True or False showing the success of the creation process """ + info = _GetInstanceInfoText(instance) + for device in instance.disks: logger.Info("creating volume %s for instance %s" % (device.iv_name, instance.name)) #HARDCODE for secondary_node in instance.secondary_nodes: - if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False): + if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance, + device, False, info): logger.Error("failed to create volume %s (%s) on secondary node %s!" % (device.iv_name, device, secondary_node)) return False #HARDCODE - if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device): + if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, + instance, device, info): logger.Error("failed to create volume %s on primary!" % device.iv_name) return False @@ -2320,7 +2857,7 @@ def _RemoveDisks(instance, cfg): This abstracts away some work from `AddInstance()` and `RemoveInstance()`. Note that in case some of the devices couldn't - be remove, the removal will continue with the other ones (compare + be removed, the removal will continue with the other ones (compare with `_CreateDisks()`). Args: @@ -2352,7 +2889,7 @@ class LUCreateInstance(LogicalUnit): HTYPE = constants.HTYPE_INSTANCE _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode", "disk_template", "swap_size", "mode", "start", "vcpus", - "wait_for_sync"] + "wait_for_sync", "ip_check", "mac"] def BuildHooksEnv(self): """Build hooks env. @@ -2361,23 +2898,25 @@ class LUCreateInstance(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, - "INSTANCE_PRIMARY": self.op.pnode, - "INSTANCE_SECONDARIES": " ".join(self.secondaries), - "DISK_TEMPLATE": self.op.disk_template, - "MEM_SIZE": self.op.mem_size, - "DISK_SIZE": self.op.disk_size, - "SWAP_SIZE": self.op.swap_size, - "VCPUS": self.op.vcpus, - "BRIDGE": self.op.bridge, + "INSTANCE_DISK_TEMPLATE": self.op.disk_template, + "INSTANCE_DISK_SIZE": self.op.disk_size, + "INSTANCE_SWAP_SIZE": self.op.swap_size, "INSTANCE_ADD_MODE": self.op.mode, } if self.op.mode == constants.INSTANCE_IMPORT: - env["SRC_NODE"] = self.op.src_node - env["SRC_PATH"] = self.op.src_path - env["SRC_IMAGE"] = self.src_image - if self.inst_ip: - env["INSTANCE_IP"] = self.inst_ip + env["INSTANCE_SRC_NODE"] = self.op.src_node + env["INSTANCE_SRC_PATH"] = self.op.src_path + env["INSTANCE_SRC_IMAGE"] = self.src_image + + env.update(_BuildInstanceHookEnv(name=self.op.instance_name, + primary_node=self.op.pnode, + secondary_nodes=self.secondaries, + status=self.instance_status, + os_type=self.op.os_type, + memory=self.op.mem_size, + vcpus=self.op.vcpus, + nics=[(self.inst_ip, self.op.bridge, self.op.mac)], + )) nl = ([self.sstore.GetMasterNode(), self.op.pnode] + self.secondaries) @@ -2388,41 +2927,45 @@ class LUCreateInstance(LogicalUnit): """Check prerequisites. """ + for attr in ["kernel_path", "initrd_path", "hvm_boot_order"]: + if not hasattr(self.op, attr): + setattr(self.op, attr, None) + if self.op.mode not in (constants.INSTANCE_CREATE, constants.INSTANCE_IMPORT): - raise errors.OpPrereqError, ("Invalid instance creation mode '%s'" % - self.op.mode) + raise errors.OpPrereqError("Invalid instance creation mode '%s'" % + self.op.mode) if self.op.mode == constants.INSTANCE_IMPORT: src_node = getattr(self.op, "src_node", None) src_path = getattr(self.op, "src_path", None) if src_node is None or src_path is None: - raise errors.OpPrereqError, ("Importing an instance requires source" - " node and path options") + raise errors.OpPrereqError("Importing an instance requires source" + " node and path options") src_node_full = self.cfg.ExpandNodeName(src_node) if src_node_full is None: - raise errors.OpPrereqError, ("Unknown source node '%s'" % src_node) + raise errors.OpPrereqError("Unknown source node '%s'" % src_node) self.op.src_node = src_node = src_node_full if not os.path.isabs(src_path): - raise errors.OpPrereqError, ("The source path must be absolute") + raise errors.OpPrereqError("The source path must be absolute") export_info = rpc.call_export_info(src_node, src_path) if not export_info: - raise errors.OpPrereqError, ("No export found in dir %s" % src_path) + raise errors.OpPrereqError("No export found in dir %s" % src_path) if not export_info.has_section(constants.INISECT_EXP): - raise errors.ProgrammerError, ("Corrupted export config") + raise errors.ProgrammerError("Corrupted export config") ei_version = export_info.get(constants.INISECT_EXP, 'version') if (int(ei_version) != constants.EXPORT_VERSION): - raise errors.OpPrereqError, ("Wrong export version %s (wanted %d)" % - (ei_version, constants.EXPORT_VERSION)) + raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % + (ei_version, constants.EXPORT_VERSION)) if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1: - raise errors.OpPrereqError, ("Can't import instance with more than" - " one data disk") + raise errors.OpPrereqError("Can't import instance with more than" + " one data disk") # FIXME: are the old os-es, disk sizes, etc. useful? self.op.os_type = export_info.get(constants.INISECT_EXP, 'os') @@ -2431,98 +2974,112 @@ class LUCreateInstance(LogicalUnit): self.src_image = diskimage else: # INSTANCE_CREATE if getattr(self.op, "os_type", None) is None: - raise errors.OpPrereqError, ("No guest OS specified") + raise errors.OpPrereqError("No guest OS specified") # check primary node pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode)) if pnode is None: - raise errors.OpPrereqError, ("Primary node '%s' is uknown" % - self.op.pnode) + raise errors.OpPrereqError("Primary node '%s' is unknown" % + self.op.pnode) self.op.pnode = pnode.name self.pnode = pnode self.secondaries = [] # disk template and mirror node verification if self.op.disk_template not in constants.DISK_TEMPLATES: - raise errors.OpPrereqError, ("Invalid disk template name") + raise errors.OpPrereqError("Invalid disk template name") - if self.op.disk_template == constants.DT_REMOTE_RAID1: + if self.op.disk_template in constants.DTS_NET_MIRROR: if getattr(self.op, "snode", None) is None: - raise errors.OpPrereqError, ("The 'remote_raid1' disk template needs" - " a mirror node") + raise errors.OpPrereqError("The networked disk templates need" + " a mirror node") snode_name = self.cfg.ExpandNodeName(self.op.snode) if snode_name is None: - raise errors.OpPrereqError, ("Unknown secondary node '%s'" % - self.op.snode) + raise errors.OpPrereqError("Unknown secondary node '%s'" % + self.op.snode) elif snode_name == pnode.name: - raise errors.OpPrereqError, ("The secondary node cannot be" - " the primary node.") + raise errors.OpPrereqError("The secondary node cannot be" + " the primary node.") self.secondaries.append(snode_name) - # Check lv size requirements - nodenames = [pnode.name] + self.secondaries - nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) - # Required free disk space as a function of disk and swap space req_size_dict = { - constants.DT_DISKLESS: 0, + constants.DT_DISKLESS: None, constants.DT_PLAIN: self.op.disk_size + self.op.swap_size, constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2, # 256 MB are added for drbd metadata, 128MB for each drbd device constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256, + constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256, } if self.op.disk_template not in req_size_dict: - raise errors.ProgrammerError, ("Disk template '%s' size requirement" - " is unknown" % self.op.disk_template) + raise errors.ProgrammerError("Disk template '%s' size requirement" + " is unknown" % self.op.disk_template) req_size = req_size_dict[self.op.disk_template] - for node in nodenames: - info = nodeinfo.get(node, None) - if not info: - raise errors.OpPrereqError, ("Cannot get current information" + # Check lv size requirements + if req_size is not None: + nodenames = [pnode.name] + self.secondaries + nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) + for node in nodenames: + info = nodeinfo.get(node, None) + if not info: + raise errors.OpPrereqError("Cannot get current information" " from node '%s'" % nodeinfo) - if req_size > info['vg_free']: - raise errors.OpPrereqError, ("Not enough disk space on target node %s." + vg_free = info.get('vg_free', None) + if not isinstance(vg_free, int): + raise errors.OpPrereqError("Can't compute free disk space on" + " node %s" % node) + if req_size > info['vg_free']: + raise errors.OpPrereqError("Not enough disk space on target node %s." " %d MB available, %d MB required" % (node, info['vg_free'], req_size)) # os verification - os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name] - if not isinstance(os_obj, objects.OS): - raise errors.OpPrereqError, ("OS '%s' not in supported os list for" - " primary node" % self.op.os_type) + os_obj = rpc.call_os_get(pnode.name, self.op.os_type) + if not os_obj: + raise errors.OpPrereqError("OS '%s' not in supported os list for" + " primary node" % self.op.os_type) + + if self.op.kernel_path == constants.VALUE_NONE: + raise errors.OpPrereqError("Can't set instance kernel to none") # instance verification - hostname1 = utils.LookupHostname(self.op.instance_name) - if not hostname1: - raise errors.OpPrereqError, ("Instance name '%s' not found in dns" % - self.op.instance_name) + hostname1 = utils.HostInfo(self.op.instance_name) - self.op.instance_name = instance_name = hostname1['hostname'] + self.op.instance_name = instance_name = hostname1.name instance_list = self.cfg.GetInstanceList() if instance_name in instance_list: - raise errors.OpPrereqError, ("Instance '%s' is already in the cluster" % - instance_name) + raise errors.OpPrereqError("Instance '%s' is already in the cluster" % + instance_name) ip = getattr(self.op, "ip", None) if ip is None or ip.lower() == "none": inst_ip = None elif ip.lower() == "auto": - inst_ip = hostname1['ip'] + inst_ip = hostname1.ip else: if not utils.IsValidIP(ip): - raise errors.OpPrereqError, ("given IP address '%s' doesn't look" - " like a valid IP" % ip) + raise errors.OpPrereqError("given IP address '%s' doesn't look" + " like a valid IP" % ip) inst_ip = ip self.inst_ip = inst_ip - command = ["fping", "-q", hostname1['ip']] - result = utils.RunCmd(command) - if not result.failed: - raise errors.OpPrereqError, ("IP %s of instance %s already in use" % - (hostname1['ip'], instance_name)) + if self.op.start and not self.op.ip_check: + raise errors.OpPrereqError("Cannot ignore IP address conflicts when" + " adding an instance in start mode") + + if self.op.ip_check: + if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT): + raise errors.OpPrereqError("IP %s of instance %s already in use" % + (hostname1.ip, instance_name)) + + # MAC address verification + if self.op.mac != "auto": + if not utils.IsValidMac(self.op.mac.lower()): + raise errors.OpPrereqError("invalid MAC address specified: %s" % + self.op.mac) # bridge verification bridge = getattr(self.op, "bridge", None) @@ -2532,9 +3089,15 @@ class LUCreateInstance(LogicalUnit): self.op.bridge = bridge if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]): - raise errors.OpPrereqError, ("target bridge '%s' does not exist on" - " destination node '%s'" % - (self.op.bridge, pnode.name)) + raise errors.OpPrereqError("target bridge '%s' does not exist on" + " destination node '%s'" % + (self.op.bridge, pnode.name)) + + # boot order verification + if self.op.hvm_boot_order is not None: + if len(self.op.hvm_boot_order.strip("acdn")) != 0: + raise errors.OpPrereqError("invalid boot order specified," + " must be one or more of [acdn]") if self.op.start: self.instance_status = 'up' @@ -2548,11 +3111,22 @@ class LUCreateInstance(LogicalUnit): instance = self.op.instance_name pnode_name = self.pnode.name - nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC()) + if self.op.mac == "auto": + mac_address = self.cfg.GenerateMAC() + else: + mac_address = self.op.mac + + nic = objects.NIC(bridge=self.op.bridge, mac=mac_address) if self.inst_ip is not None: nic.ip = self.inst_ip - disks = _GenerateDiskTemplate(self.cfg, self.cfg.GetVGName(), + ht_kind = self.sstore.GetHypervisorType() + if ht_kind in constants.HTS_REQ_PORT: + network_port = self.cfg.AllocatePort() + else: + network_port = None + + disks = _GenerateDiskTemplate(self.cfg, self.op.disk_template, instance, pnode_name, self.secondaries, self.op.disk_size, @@ -2565,32 +3139,36 @@ class LUCreateInstance(LogicalUnit): nics=[nic], disks=disks, disk_template=self.op.disk_template, status=self.instance_status, + network_port=network_port, + kernel_path=self.op.kernel_path, + initrd_path=self.op.initrd_path, + hvm_boot_order=self.op.hvm_boot_order, ) feedback_fn("* creating instance disks...") if not _CreateDisks(self.cfg, iobj): _RemoveDisks(iobj, self.cfg) - raise errors.OpExecError, ("Device creation failed, reverting...") + raise errors.OpExecError("Device creation failed, reverting...") feedback_fn("adding instance %s to cluster config" % instance) self.cfg.AddInstance(iobj) if self.op.wait_for_sync: - disk_abort = not _WaitForSync(self.cfg, iobj) - elif iobj.disk_template == "remote_raid1": + disk_abort = not _WaitForSync(self.cfg, iobj, self.proc) + elif iobj.disk_template in constants.DTS_NET_MIRROR: # make sure the disks are not degraded (still sync-ing is ok) time.sleep(15) feedback_fn("* checking mirrors status") - disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True) + disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True) else: disk_abort = False if disk_abort: _RemoveDisks(iobj, self.cfg) self.cfg.RemoveInstance(iobj.name) - raise errors.OpExecError, ("There are some degraded disks for" - " this instance") + raise errors.OpExecError("There are some degraded disks for" + " this instance") feedback_fn("creating os for instance %s on node %s" % (instance, pnode_name)) @@ -2599,9 +3177,9 @@ class LUCreateInstance(LogicalUnit): if self.op.mode == constants.INSTANCE_CREATE: feedback_fn("* running the instance OS create scripts...") if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"): - raise errors.OpExecError, ("could not add os for instance %s" - " on node %s" % - (instance, pnode_name)) + raise errors.OpExecError("could not add os for instance %s" + " on node %s" % + (instance, pnode_name)) elif self.op.mode == constants.INSTANCE_IMPORT: feedback_fn("* running the instance OS import scripts...") @@ -2609,19 +3187,19 @@ class LUCreateInstance(LogicalUnit): src_image = self.src_image if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb", src_node, src_image): - raise errors.OpExecError, ("Could not import os for instance" - " %s on node %s" % - (instance, pnode_name)) + raise errors.OpExecError("Could not import os for instance" + " %s on node %s" % + (instance, pnode_name)) else: # also checked in the prereq part - raise errors.ProgrammerError, ("Unknown OS initialization mode '%s'" - % self.op.mode) + raise errors.ProgrammerError("Unknown OS initialization mode '%s'" + % self.op.mode) if self.op.start: logger.Info("starting instance %s on node %s" % (instance, pnode_name)) feedback_fn("* starting instance...") if not rpc.call_instance_start(pnode_name, iobj, None): - raise errors.OpExecError, ("Could not start instance") + raise errors.OpExecError("Could not start instance") class LUConnectConsole(NoHooksLU): @@ -2643,8 +3221,8 @@ class LUConnectConsole(NoHooksLU): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance def Exec(self, feedback_fn): @@ -2656,16 +3234,22 @@ class LUConnectConsole(NoHooksLU): node_insts = rpc.call_instance_list([node])[node] if node_insts is False: - raise errors.OpExecError, ("Can't connect to node %s." % node) + raise errors.OpExecError("Can't connect to node %s." % node) if instance.name not in node_insts: - raise errors.OpExecError, ("Instance %s is not running." % instance.name) + raise errors.OpExecError("Instance %s is not running." % instance.name) logger.Debug("connecting to console of %s on %s" % (instance.name, node)) hyper = hypervisor.GetHypervisor() - console_cmd = hyper.GetShellCommandForConsole(instance.name) - return node, console_cmd + console_cmd = hyper.GetShellCommandForConsole(instance) + # build ssh cmdline + argv = ["ssh", "-q", "-t"] + argv.extend(ssh.KNOWN_HOSTS_OPTS) + argv.extend(ssh.BATCH_MODE_OPTS) + argv.append(node) + argv.append(console_cmd) + return "ssh", argv class LUAddMDDRBDComponent(LogicalUnit): @@ -2683,10 +3267,10 @@ class LUAddMDDRBDComponent(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, "NEW_SECONDARY": self.op.remote_node, "DISK_NAME": self.op.disk_name, } + env.update(_BuildInstanceHookEnvByObject(self.instance)) nl = [self.sstore.GetMasterNode(), self.instance.primary_node, self.op.remote_node,] + list(self.instance.secondary_nodes) return env, nl, nl @@ -2700,33 +3284,32 @@ class LUAddMDDRBDComponent(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance remote_node = self.cfg.ExpandNodeName(self.op.remote_node) if remote_node is None: - raise errors.OpPrereqError, ("Node '%s' not known" % self.op.remote_node) + raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node) self.remote_node = remote_node if remote_node == instance.primary_node: - raise errors.OpPrereqError, ("The specified node is the primary node of" - " the instance.") + raise errors.OpPrereqError("The specified node is the primary node of" + " the instance.") if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError, ("Instance's disk layout is not" - " remote_raid1.") + raise errors.OpPrereqError("Instance's disk layout is not" + " remote_raid1.") for disk in instance.disks: if disk.iv_name == self.op.disk_name: break else: - raise errors.OpPrereqError, ("Can't find this device ('%s') in the" - " instance." % self.op.disk_name) + raise errors.OpPrereqError("Can't find this device ('%s') in the" + " instance." % self.op.disk_name) if len(disk.children) > 1: - raise errors.OpPrereqError, ("The device already has two slave" - " devices.\n" - "This would create a 3-disk raid1" - " which we don't allow.") + raise errors.OpPrereqError("The device already has two slave devices." + " This would create a 3-disk raid1 which we" + " don't allow.") self.disk = disk def Exec(self, feedback_fn): @@ -2737,30 +3320,34 @@ class LUAddMDDRBDComponent(LogicalUnit): instance = self.instance remote_node = self.remote_node - new_drbd = _GenerateMDDRBDBranch(self.cfg, self.cfg.GetVGName(), - instance.primary_node, remote_node, - disk.size, "%s-%s" % - (instance.name, self.op.disk_name)) + lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]] + names = _GenerateUniqueNames(self.cfg, lv_names) + new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node, + remote_node, disk.size, names) logger.Info("adding new mirror component on secondary") #HARDCODE - if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False): - raise errors.OpExecError, ("Failed to create new component on secondary" - " node %s" % remote_node) + if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance, + new_drbd, False, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new component on secondary" + " node %s" % remote_node) logger.Info("adding new mirror component on primary") #HARDCODE - if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd): + if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, + instance, new_drbd, + _GetInstanceInfoText(instance)): # remove secondary dev self.cfg.SetDiskID(new_drbd, remote_node) rpc.call_blockdev_remove(remote_node, new_drbd) - raise errors.OpExecError, ("Failed to create volume on primary") + raise errors.OpExecError("Failed to create volume on primary") # the device exists now # call the primary node to add the mirror to md logger.Info("adding new mirror component to md") - if not rpc.call_blockdev_addchild(instance.primary_node, - disk, new_drbd): + if not rpc.call_blockdev_addchildren(instance.primary_node, + disk, [new_drbd]): logger.Error("Can't add mirror compoment to md!") self.cfg.SetDiskID(new_drbd, remote_node) if not rpc.call_blockdev_remove(remote_node, new_drbd): @@ -2768,13 +3355,13 @@ class LUAddMDDRBDComponent(LogicalUnit): self.cfg.SetDiskID(new_drbd, instance.primary_node) if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): logger.Error("Can't rollback on primary") - raise errors.OpExecError, "Can't add mirror component to md array" + raise errors.OpExecError("Can't add mirror component to md array") disk.children.append(new_drbd) self.cfg.AddInstance(instance) - _WaitForSync(self.cfg, instance) + _WaitForSync(self.cfg, instance, self.proc) return 0 @@ -2794,11 +3381,11 @@ class LURemoveMDDRBDComponent(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, "DISK_NAME": self.op.disk_name, "DISK_ID": self.op.disk_id, "OLD_SECONDARY": self.old_secondary, } + env.update(_BuildInstanceHookEnvByObject(self.instance)) nl = [self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes) return env, nl, nl @@ -2812,28 +3399,29 @@ class LURemoveMDDRBDComponent(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError, ("Instance's disk layout is not" - " remote_raid1.") + raise errors.OpPrereqError("Instance's disk layout is not" + " remote_raid1.") for disk in instance.disks: if disk.iv_name == self.op.disk_name: break else: - raise errors.OpPrereqError, ("Can't find this device ('%s') in the" - " instance." % self.op.disk_name) + raise errors.OpPrereqError("Can't find this device ('%s') in the" + " instance." % self.op.disk_name) for child in disk.children: - if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id: + if (child.dev_type == constants.LD_DRBD7 and + child.logical_id[2] == self.op.disk_id): break else: - raise errors.OpPrereqError, ("Can't find the device with this port.") + raise errors.OpPrereqError("Can't find the device with this port.") if len(disk.children) < 2: - raise errors.OpPrereqError, ("Cannot remove the last component from" - " a mirror.") + raise errors.OpPrereqError("Cannot remove the last component from" + " a mirror.") self.disk = disk self.child = child if self.child.logical_id[0] == instance.primary_node: @@ -2851,9 +3439,9 @@ class LURemoveMDDRBDComponent(LogicalUnit): child = self.child logger.Info("remove mirror component") self.cfg.SetDiskID(disk, instance.primary_node) - if not rpc.call_blockdev_removechild(instance.primary_node, - disk, child): - raise errors.OpExecError, ("Can't remove child from mirror.") + if not rpc.call_blockdev_removechildren(instance.primary_node, + disk, [child]): + raise errors.OpExecError("Can't remove child from mirror.") for node in child.logical_id[:2]: self.cfg.SetDiskID(child, node) @@ -2871,7 +3459,7 @@ class LUReplaceDisks(LogicalUnit): """ HPATH = "mirrors-replace" HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name"] + _OP_REQP = ["instance_name", "mode", "disks"] def BuildHooksEnv(self): """Build hooks env. @@ -2880,12 +3468,17 @@ class LUReplaceDisks(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, + "MODE": self.op.mode, "NEW_SECONDARY": self.op.remote_node, "OLD_SECONDARY": self.instance.secondary_nodes[0], } - nl = [self.sstore.GetMasterNode(), - self.instance.primary_node] + list(self.instance.secondary_nodes) + env.update(_BuildInstanceHookEnvByObject(self.instance)) + nl = [ + self.sstore.GetMasterNode(), + self.instance.primary_node, + ] + if self.op.remote_node is not None: + nl.append(self.op.remote_node) return env, nl, nl def CheckPrereq(self): @@ -2897,71 +3490,123 @@ class LUReplaceDisks(LogicalUnit): instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) self.instance = instance + self.op.instance_name = instance.name - if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError, ("Instance's disk layout is not" - " remote_raid1.") + if instance.disk_template not in constants.DTS_NET_MIRROR: + raise errors.OpPrereqError("Instance's disk layout is not" + " network mirrored.") if len(instance.secondary_nodes) != 1: - raise errors.OpPrereqError, ("The instance has a strange layout," - " expected one secondary but found %d" % - len(instance.secondary_nodes)) + raise errors.OpPrereqError("The instance has a strange layout," + " expected one secondary but found %d" % + len(instance.secondary_nodes)) + + self.sec_node = instance.secondary_nodes[0] remote_node = getattr(self.op, "remote_node", None) - if remote_node is None: - remote_node = instance.secondary_nodes[0] - else: + if remote_node is not None: remote_node = self.cfg.ExpandNodeName(remote_node) if remote_node is None: - raise errors.OpPrereqError, ("Node '%s' not known" % - self.op.remote_node) + raise errors.OpPrereqError("Node '%s' not known" % + self.op.remote_node) + self.remote_node_info = self.cfg.GetNodeInfo(remote_node) + else: + self.remote_node_info = None if remote_node == instance.primary_node: - raise errors.OpPrereqError, ("The specified node is the primary node of" - " the instance.") + raise errors.OpPrereqError("The specified node is the primary node of" + " the instance.") + elif remote_node == self.sec_node: + if self.op.mode == constants.REPLACE_DISK_SEC: + # this is for DRBD8, where we can't execute the same mode of + # replacement as for drbd7 (no different port allocated) + raise errors.OpPrereqError("Same secondary given, cannot execute" + " replacement") + # the user gave the current secondary, switch to + # 'no-replace-secondary' mode for drbd7 + remote_node = None + if (instance.disk_template == constants.DT_REMOTE_RAID1 and + self.op.mode != constants.REPLACE_DISK_ALL): + raise errors.OpPrereqError("Template 'remote_raid1' only allows all" + " disks replacement, not individual ones") + if instance.disk_template == constants.DT_DRBD8: + if (self.op.mode == constants.REPLACE_DISK_ALL and + remote_node is not None): + # switch to replace secondary mode + self.op.mode = constants.REPLACE_DISK_SEC + + if self.op.mode == constants.REPLACE_DISK_ALL: + raise errors.OpPrereqError("Template 'drbd' only allows primary or" + " secondary disk replacement, not" + " both at once") + elif self.op.mode == constants.REPLACE_DISK_PRI: + if remote_node is not None: + raise errors.OpPrereqError("Template 'drbd' does not allow changing" + " the secondary while doing a primary" + " node disk replacement") + self.tgt_node = instance.primary_node + self.oth_node = instance.secondary_nodes[0] + elif self.op.mode == constants.REPLACE_DISK_SEC: + self.new_node = remote_node # this can be None, in which case + # we don't change the secondary + self.tgt_node = instance.secondary_nodes[0] + self.oth_node = instance.primary_node + else: + raise errors.ProgrammerError("Unhandled disk replace mode") + + for name in self.op.disks: + if instance.FindDisk(name) is None: + raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" % + (name, instance.name)) self.op.remote_node = remote_node - def Exec(self, feedback_fn): + def _ExecRR1(self, feedback_fn): """Replace the disks of an instance. """ instance = self.instance iv_names = {} # start of work - remote_node = self.op.remote_node + if self.op.remote_node is None: + remote_node = self.sec_node + else: + remote_node = self.op.remote_node cfg = self.cfg - vgname = cfg.GetVGName() for dev in instance.disks: size = dev.size - new_drbd = _GenerateMDDRBDBranch(cfg, vgname, instance.primary_node, - remote_node, size, - "%s-%s" % (instance.name, dev.iv_name)) + lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]] + names = _GenerateUniqueNames(cfg, lv_names) + new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node, + remote_node, size, names) iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd) logger.Info("adding new mirror component on secondary for %s" % dev.iv_name) #HARDCODE - if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False): - raise errors.OpExecError, ("Failed to create new component on" - " secondary node %s\n" - "Full abort, cleanup manually!" % - remote_node) + if not _CreateBlockDevOnSecondary(cfg, remote_node, instance, + new_drbd, False, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new component on secondary" + " node %s. Full abort, cleanup manually!" % + remote_node) logger.Info("adding new mirror component on primary") #HARDCODE - if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd): + if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, + instance, new_drbd, + _GetInstanceInfoText(instance)): # remove secondary dev cfg.SetDiskID(new_drbd, remote_node) rpc.call_blockdev_remove(remote_node, new_drbd) - raise errors.OpExecError("Failed to create volume on primary!\n" - "Full abort, cleanup manually!!") + raise errors.OpExecError("Failed to create volume on primary!" + " Full abort, cleanup manually!!") # the device exists now # call the primary node to add the mirror to md logger.Info("adding new mirror component to md") - if not rpc.call_blockdev_addchild(instance.primary_node, dev, - new_drbd): + if not rpc.call_blockdev_addchildren(instance.primary_node, dev, + [new_drbd]): logger.Error("Can't add mirror compoment to md!") cfg.SetDiskID(new_drbd, remote_node) if not rpc.call_blockdev_remove(remote_node, new_drbd): @@ -2969,7 +3614,7 @@ class LUReplaceDisks(LogicalUnit): cfg.SetDiskID(new_drbd, instance.primary_node) if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): logger.Error("Can't rollback on primary") - raise errors.OpExecError, ("Full abort, cleanup manually!!") + raise errors.OpExecError("Full abort, cleanup manually!!") dev.children.append(new_drbd) cfg.AddInstance(instance) @@ -2977,7 +3622,7 @@ class LUReplaceDisks(LogicalUnit): # this can fail as the old devices are degraded and _WaitForSync # does a combined result over all disks, so we don't check its # return value - _WaitForSync(cfg, instance, unlock=True) + _WaitForSync(cfg, instance, self.proc, unlock=True) # so check manually all the devices for name in iv_names: @@ -2985,18 +3630,18 @@ class LUReplaceDisks(LogicalUnit): cfg.SetDiskID(dev, instance.primary_node) is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5] if is_degr: - raise errors.OpExecError, ("MD device %s is degraded!" % name) + raise errors.OpExecError("MD device %s is degraded!" % name) cfg.SetDiskID(new_drbd, instance.primary_node) is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5] if is_degr: - raise errors.OpExecError, ("New drbd device %s is degraded!" % name) + raise errors.OpExecError("New drbd device %s is degraded!" % name) for name in iv_names: dev, child, new_drbd = iv_names[name] logger.Info("remove mirror %s component" % name) cfg.SetDiskID(dev, instance.primary_node) - if not rpc.call_blockdev_removechild(instance.primary_node, - dev, child): + if not rpc.call_blockdev_removechildren(instance.primary_node, + dev, [child]): logger.Error("Can't remove child from mirror, aborting" " *this device cleanup*.\nYou need to cleanup manually!!") continue @@ -3012,6 +3657,358 @@ class LUReplaceDisks(LogicalUnit): cfg.AddInstance(instance) + def _ExecD8DiskOnly(self, feedback_fn): + """Replace a disk on the primary or secondary for dbrd8. + + The algorithm for replace is quite complicated: + - for each disk to be replaced: + - create new LVs on the target node with unique names + - detach old LVs from the drbd device + - rename old LVs to name_replaced. + - rename new LVs to old LVs + - attach the new LVs (with the old names now) to the drbd device + - wait for sync across all devices + - for each modified disk: + - remove old LVs (which have the name name_replaces.) + + Failures are not very well handled. + + """ + steps_total = 6 + warning, info = (self.proc.LogWarning, self.proc.LogInfo) + instance = self.instance + iv_names = {} + vgname = self.cfg.GetVGName() + # start of work + cfg = self.cfg + tgt_node = self.tgt_node + oth_node = self.oth_node + + # Step: check device activation + self.proc.LogStep(1, steps_total, "check device existence") + info("checking volume groups") + my_vg = cfg.GetVGName() + results = rpc.call_vg_list([oth_node, tgt_node]) + if not results: + raise errors.OpExecError("Can't list volume groups on the nodes") + for node in oth_node, tgt_node: + res = results.get(node, False) + if not res or my_vg not in res: + raise errors.OpExecError("Volume group '%s' not found on %s" % + (my_vg, node)) + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + for node in tgt_node, oth_node: + info("checking %s on %s" % (dev.iv_name, node)) + cfg.SetDiskID(dev, node) + if not rpc.call_blockdev_find(node, dev): + raise errors.OpExecError("Can't find device %s on node %s" % + (dev.iv_name, node)) + + # Step: check other node consistency + self.proc.LogStep(2, steps_total, "check peer consistency") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s consistency on %s" % (dev.iv_name, oth_node)) + if not _CheckDiskConsistency(self.cfg, dev, oth_node, + oth_node==instance.primary_node): + raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe" + " to replace disks on this node (%s)" % + (oth_node, tgt_node)) + + # Step: create new storage + self.proc.LogStep(3, steps_total, "allocate new storage") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + size = dev.size + cfg.SetDiskID(dev, tgt_node) + lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]] + names = _GenerateUniqueNames(cfg, lv_names) + lv_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + new_lvs = [lv_data, lv_meta] + old_lvs = dev.children + iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) + info("creating new local storage on %s for %s" % + (tgt_node, dev.iv_name)) + # since we *always* want to create this LV, we use the + # _Create...OnPrimary (which forces the creation), even if we + # are talking about the secondary node + for new_lv in new_lvs: + if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new LV named '%s' on" + " node '%s'" % + (new_lv.logical_id[1], tgt_node)) + + # Step: for each lv, detach+rename*2+attach + self.proc.LogStep(4, steps_total, "change drbd configuration") + for dev, old_lvs, new_lvs in iv_names.itervalues(): + info("detaching %s drbd from local storage" % dev.iv_name) + if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs): + raise errors.OpExecError("Can't detach drbd from local storage on node" + " %s for device %s" % (tgt_node, dev.iv_name)) + #dev.children = [] + #cfg.Update(instance) + + # ok, we created the new LVs, so now we know we have the needed + # storage; as such, we proceed on the target node to rename + # old_lv to _old, and new_lv to old_lv; note that we rename LVs + # using the assumption that logical_id == physical_id (which in + # turn is the unique_id on that node) + + # FIXME(iustin): use a better name for the replaced LVs + temp_suffix = int(time.time()) + ren_fn = lambda d, suff: (d.physical_id[0], + d.physical_id[1] + "_replaced-%s" % suff) + # build the rename list based on what LVs exist on the node + rlist = [] + for to_ren in old_lvs: + find_res = rpc.call_blockdev_find(tgt_node, to_ren) + if find_res is not None: # device exists + rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) + + info("renaming the old LVs on the target node") + if not rpc.call_blockdev_rename(tgt_node, rlist): + raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node) + # now we rename the new LVs to the old LVs + info("renaming the new LVs on the target node") + rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)] + if not rpc.call_blockdev_rename(tgt_node, rlist): + raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node) + + for old, new in zip(old_lvs, new_lvs): + new.logical_id = old.logical_id + cfg.SetDiskID(new, tgt_node) + + for disk in old_lvs: + disk.logical_id = ren_fn(disk, temp_suffix) + cfg.SetDiskID(disk, tgt_node) + + # now that the new lvs have the old name, we can add them to the device + info("adding new mirror component on %s" % tgt_node) + if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs): + for new_lv in new_lvs: + if not rpc.call_blockdev_remove(tgt_node, new_lv): + warning("Can't rollback device %s", hint="manually cleanup unused" + " logical volumes") + raise errors.OpExecError("Can't add local storage to drbd") + + dev.children = new_lvs + cfg.Update(instance) + + # Step: wait for sync + + # this can fail as the old devices are degraded and _WaitForSync + # does a combined result over all disks, so we don't check its + # return value + self.proc.LogStep(5, steps_total, "sync devices") + _WaitForSync(cfg, instance, self.proc, unlock=True) + + # so check manually all the devices + for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + cfg.SetDiskID(dev, instance.primary_node) + is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5] + if is_degr: + raise errors.OpExecError("DRBD device %s is degraded!" % name) + + # Step: remove old storage + self.proc.LogStep(6, steps_total, "removing old storage") + for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + info("remove logical volumes for %s" % name) + for lv in old_lvs: + cfg.SetDiskID(lv, tgt_node) + if not rpc.call_blockdev_remove(tgt_node, lv): + warning("Can't remove old LV", hint="manually remove unused LVs") + continue + + def _ExecD8Secondary(self, feedback_fn): + """Replace the secondary node for drbd8. + + The algorithm for replace is quite complicated: + - for all disks of the instance: + - create new LVs on the new node with same names + - shutdown the drbd device on the old secondary + - disconnect the drbd network on the primary + - create the drbd device on the new secondary + - network attach the drbd on the primary, using an artifice: + the drbd code for Attach() will connect to the network if it + finds a device which is connected to the good local disks but + not network enabled + - wait for sync across all devices + - remove all disks from the old secondary + + Failures are not very well handled. + + """ + steps_total = 6 + warning, info = (self.proc.LogWarning, self.proc.LogInfo) + instance = self.instance + iv_names = {} + vgname = self.cfg.GetVGName() + # start of work + cfg = self.cfg + old_node = self.tgt_node + new_node = self.new_node + pri_node = instance.primary_node + + # Step: check device activation + self.proc.LogStep(1, steps_total, "check device existence") + info("checking volume groups") + my_vg = cfg.GetVGName() + results = rpc.call_vg_list([pri_node, new_node]) + if not results: + raise errors.OpExecError("Can't list volume groups on the nodes") + for node in pri_node, new_node: + res = results.get(node, False) + if not res or my_vg not in res: + raise errors.OpExecError("Volume group '%s' not found on %s" % + (my_vg, node)) + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s on %s" % (dev.iv_name, pri_node)) + cfg.SetDiskID(dev, pri_node) + if not rpc.call_blockdev_find(pri_node, dev): + raise errors.OpExecError("Can't find device %s on node %s" % + (dev.iv_name, pri_node)) + + # Step: check other node consistency + self.proc.LogStep(2, steps_total, "check peer consistency") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s consistency on %s" % (dev.iv_name, pri_node)) + if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True): + raise errors.OpExecError("Primary node (%s) has degraded storage," + " unsafe to replace the secondary" % + pri_node) + + # Step: create new storage + self.proc.LogStep(3, steps_total, "allocate new storage") + for dev in instance.disks: + size = dev.size + info("adding new local storage on %s for %s" % (new_node, dev.iv_name)) + # since we *always* want to create this LV, we use the + # _Create...OnPrimary (which forces the creation), even if we + # are talking about the secondary node + for new_lv in dev.children: + if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new LV named '%s' on" + " node '%s'" % + (new_lv.logical_id[1], new_node)) + + iv_names[dev.iv_name] = (dev, dev.children) + + self.proc.LogStep(4, steps_total, "changing drbd configuration") + for dev in instance.disks: + size = dev.size + info("activating a new drbd on %s for %s" % (new_node, dev.iv_name)) + # create new devices on new_node + new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, + logical_id=(pri_node, new_node, + dev.logical_id[2]), + children=dev.children) + if not _CreateBlockDevOnSecondary(cfg, new_node, instance, + new_drbd, False, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new DRBD on" + " node '%s'" % new_node) + + for dev in instance.disks: + # we have new devices, shutdown the drbd on the old secondary + info("shutting down drbd for %s on old node" % dev.iv_name) + cfg.SetDiskID(dev, old_node) + if not rpc.call_blockdev_shutdown(old_node, dev): + warning("Failed to shutdown drbd for %s on old node" % dev.iv_name, + hint="Please cleanup this device manually as soon as possible") + + info("detaching primary drbds from the network (=> standalone)") + done = 0 + for dev in instance.disks: + cfg.SetDiskID(dev, pri_node) + # set the physical (unique in bdev terms) id to None, meaning + # detach from network + dev.physical_id = (None,) * len(dev.physical_id) + # and 'find' the device, which will 'fix' it to match the + # standalone state + if rpc.call_blockdev_find(pri_node, dev): + done += 1 + else: + warning("Failed to detach drbd %s from network, unusual case" % + dev.iv_name) + + if not done: + # no detaches succeeded (very unlikely) + raise errors.OpExecError("Can't detach at least one DRBD from old node") + + # if we managed to detach at least one, we update all the disks of + # the instance to point to the new secondary + info("updating instance configuration") + for dev in instance.disks: + dev.logical_id = (pri_node, new_node) + dev.logical_id[2:] + cfg.SetDiskID(dev, pri_node) + cfg.Update(instance) + + # and now perform the drbd attach + info("attaching primary drbds to new secondary (standalone => connected)") + failures = [] + for dev in instance.disks: + info("attaching primary drbd for %s to new secondary node" % dev.iv_name) + # since the attach is smart, it's enough to 'find' the device, + # it will automatically activate the network, if the physical_id + # is correct + cfg.SetDiskID(dev, pri_node) + if not rpc.call_blockdev_find(pri_node, dev): + warning("can't attach drbd %s to new secondary!" % dev.iv_name, + "please do a gnt-instance info to see the status of disks") + + # this can fail as the old devices are degraded and _WaitForSync + # does a combined result over all disks, so we don't check its + # return value + self.proc.LogStep(5, steps_total, "sync devices") + _WaitForSync(cfg, instance, self.proc, unlock=True) + + # so check manually all the devices + for name, (dev, old_lvs) in iv_names.iteritems(): + cfg.SetDiskID(dev, pri_node) + is_degr = rpc.call_blockdev_find(pri_node, dev)[5] + if is_degr: + raise errors.OpExecError("DRBD device %s is degraded!" % name) + + self.proc.LogStep(6, steps_total, "removing old storage") + for name, (dev, old_lvs) in iv_names.iteritems(): + info("remove logical volumes for %s" % name) + for lv in old_lvs: + cfg.SetDiskID(lv, old_node) + if not rpc.call_blockdev_remove(old_node, lv): + warning("Can't remove LV on old secondary", + hint="Cleanup stale volumes by hand") + + def Exec(self, feedback_fn): + """Execute disk replacement. + + This dispatches the disk replacement to the appropriate handler. + + """ + instance = self.instance + if instance.disk_template == constants.DT_REMOTE_RAID1: + fn = self._ExecRR1 + elif instance.disk_template == constants.DT_DRBD8: + if self.op.remote_node is None: + fn = self._ExecD8DiskOnly + else: + fn = self._ExecD8Secondary + else: + raise errors.ProgrammerError("Unhandled disk replacement case") + return fn(feedback_fn) + class LUQueryInstanceData(NoHooksLU): """Query runtime instance data. @@ -3026,15 +4023,15 @@ class LUQueryInstanceData(NoHooksLU): """ if not isinstance(self.op.instances, list): - raise errors.OpPrereqError, "Invalid argument type 'instances'" + raise errors.OpPrereqError("Invalid argument type 'instances'") if self.op.instances: self.wanted_instances = [] names = self.op.instances for name in names: instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name)) if instance is None: - raise errors.OpPrereqError, ("No such instance name '%s'" % name) - self.wanted_instances.append(instance) + raise errors.OpPrereqError("No such instance name '%s'" % name) + self.wanted_instances.append(instance) else: self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name in self.cfg.GetInstanceList()] @@ -3047,7 +4044,7 @@ class LUQueryInstanceData(NoHooksLU): """ self.cfg.SetDiskID(dev, instance.primary_node) dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev) - if dev.dev_type == "drbd": + if dev.dev_type in constants.LDS_DRBD: # we change the snode then (otherwise we use the one passed in) if dev.logical_id[0] == instance.primary_node: snode = dev.logical_id[1] @@ -3080,7 +4077,6 @@ class LUQueryInstanceData(NoHooksLU): def Exec(self, feedback_fn): """Gather and return data""" - result = {} for instance in self.wanted_instances: remote_info = rpc.call_instance_info(instance.primary_node, @@ -3107,6 +4103,11 @@ class LUQueryInstanceData(NoHooksLU): "memory": instance.memory, "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics], "disks": disks, + "network_port": instance.network_port, + "vcpus": instance.vcpus, + "kernel_path": instance.kernel_path, + "initrd_path": instance.initrd_path, + "hvm_boot_order": instance.hvm_boot_order, } result[instance.name] = idict @@ -3114,38 +4115,6 @@ class LUQueryInstanceData(NoHooksLU): return result -class LUQueryNodeData(NoHooksLU): - """Logical unit for querying node data. - - """ - _OP_REQP = ["nodes"] - - def CheckPrereq(self): - """Check prerequisites. - - This only checks the optional node list against the existing names. - - """ - self.wanted_nodes = _GetWantedNodes(self, self.op.nodes) - - def Exec(self, feedback_fn): - """Compute and return the list of nodes. - - """ - - ilist = [self.cfg.GetInstanceInfo(iname) for iname - in self.cfg.GetInstanceList()] - result = [] - for node in self.wanted_nodes: - result.append((node.name, node.primary_ip, node.secondary_ip, - [inst.name for inst in ilist - if inst.primary_node == node.name], - [inst.name for inst in ilist - if node.name in inst.secondary_nodes], - )) - return result - - class LUSetInstanceParms(LogicalUnit): """Modifies an instances's parameters. @@ -3160,21 +4129,28 @@ class LUSetInstanceParms(LogicalUnit): This runs on the master, primary and secondaries. """ - env = { - "INSTANCE_NAME": self.op.instance_name, - } + args = dict() if self.mem: - env["MEM_SIZE"] = self.mem + args['memory'] = self.mem if self.vcpus: - env["VCPUS"] = self.vcpus - if self.do_ip: - env["INSTANCE_IP"] = self.ip - if self.bridge: - env["BRIDGE"] = self.bridge - + args['vcpus'] = self.vcpus + if self.do_ip or self.do_bridge or self.mac: + if self.do_ip: + ip = self.ip + else: + ip = self.instance.nics[0].ip + if self.bridge: + bridge = self.bridge + else: + bridge = self.instance.nics[0].bridge + if self.mac: + mac = self.mac + else: + mac = self.instance.nics[0].mac + args['nics'] = [(ip, bridge, mac)] + env = _BuildInstanceHookEnvByObject(self.instance, override=args) nl = [self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes) - return env, nl, nl def CheckPrereq(self): @@ -3186,34 +4162,77 @@ class LUSetInstanceParms(LogicalUnit): self.mem = getattr(self.op, "mem", None) self.vcpus = getattr(self.op, "vcpus", None) self.ip = getattr(self.op, "ip", None) + self.mac = getattr(self.op, "mac", None) self.bridge = getattr(self.op, "bridge", None) - if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4: - raise errors.OpPrereqError, ("No changes submitted") + self.kernel_path = getattr(self.op, "kernel_path", None) + self.initrd_path = getattr(self.op, "initrd_path", None) + self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None) + all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac, + self.kernel_path, self.initrd_path, self.hvm_boot_order] + if all_parms.count(None) == len(all_parms): + raise errors.OpPrereqError("No changes submitted") if self.mem is not None: try: self.mem = int(self.mem) except ValueError, err: - raise errors.OpPrereqError, ("Invalid memory size: %s" % str(err)) + raise errors.OpPrereqError("Invalid memory size: %s" % str(err)) if self.vcpus is not None: try: self.vcpus = int(self.vcpus) except ValueError, err: - raise errors.OpPrereqError, ("Invalid vcpus number: %s" % str(err)) + raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err)) if self.ip is not None: self.do_ip = True if self.ip.lower() == "none": self.ip = None else: if not utils.IsValidIP(self.ip): - raise errors.OpPrereqError, ("Invalid IP address '%s'." % self.ip) + raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip) else: self.do_ip = False + self.do_bridge = (self.bridge is not None) + if self.mac is not None: + if self.cfg.IsMacInUse(self.mac): + raise errors.OpPrereqError('MAC address %s already in use in cluster' % + self.mac) + if not utils.IsValidMac(self.mac): + raise errors.OpPrereqError('Invalid MAC address %s' % self.mac) + + if self.kernel_path is not None: + self.do_kernel_path = True + if self.kernel_path == constants.VALUE_NONE: + raise errors.OpPrereqError("Can't set instance to no kernel") + + if self.kernel_path != constants.VALUE_DEFAULT: + if not os.path.isabs(self.kernel_path): + raise errors.OpPrereqError("The kernel path must be an absolute" + " filename") + else: + self.do_kernel_path = False + + if self.initrd_path is not None: + self.do_initrd_path = True + if self.initrd_path not in (constants.VALUE_NONE, + constants.VALUE_DEFAULT): + if not os.path.isabs(self.initrd_path): + raise errors.OpPrereqError("The initrd path must be an absolute" + " filename") + else: + self.do_initrd_path = False + + # boot order verification + if self.hvm_boot_order is not None: + if self.hvm_boot_order != constants.VALUE_DEFAULT: + if len(self.hvm_boot_order.strip("acdn")) != 0: + raise errors.OpPrereqError("invalid boot order specified," + " must be one or more of [acdn]" + " or 'default'") instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: - raise errors.OpPrereqError, ("No such instance name '%s'" % - self.op.instance_name) + raise errors.OpPrereqError("No such instance name '%s'" % + self.op.instance_name) self.op.instance_name = instance.name self.instance = instance return @@ -3237,6 +4256,21 @@ class LUSetInstanceParms(LogicalUnit): if self.bridge: instance.nics[0].bridge = self.bridge result.append(("bridge", self.bridge)) + if self.mac: + instance.nics[0].mac = self.mac + result.append(("mac", self.mac)) + if self.do_kernel_path: + instance.kernel_path = self.kernel_path + result.append(("kernel_path", self.kernel_path)) + if self.do_initrd_path: + instance.initrd_path = self.initrd_path + result.append(("initrd_path", self.initrd_path)) + if self.hvm_boot_order: + if self.hvm_boot_order == constants.VALUE_DEFAULT: + instance.hvm_boot_order = None + else: + instance.hvm_boot_order = self.hvm_boot_order + result.append(("hvm_boot_order", self.hvm_boot_order)) self.cfg.AddInstance(instance) @@ -3264,7 +4298,7 @@ class LUQueryExports(NoHooksLU): that node. """ - return rpc.call_export_list([node.name for node in self.nodes]) + return rpc.call_export_list(self.nodes) class LUExportInstance(LogicalUnit): @@ -3282,10 +4316,10 @@ class LUExportInstance(LogicalUnit): """ env = { - "INSTANCE_NAME": self.op.instance_name, "EXPORT_NODE": self.op.target_node, "EXPORT_DO_SHUTDOWN": self.op.shutdown, } + env.update(_BuildInstanceHookEnvByObject(self.instance)) nl = [self.sstore.GetMasterNode(), self.instance.primary_node, self.op.target_node] return env, nl, nl @@ -3299,16 +4333,16 @@ class LUExportInstance(LogicalUnit): instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) self.instance = self.cfg.GetInstanceInfo(instance_name) if self.instance is None: - raise errors.OpPrereqError, ("Instance '%s' not found" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not found" % + self.op.instance_name) # node verification dst_node_short = self.cfg.ExpandNodeName(self.op.target_node) self.dst_node = self.cfg.GetNodeInfo(dst_node_short) if self.dst_node is None: - raise errors.OpPrereqError, ("Destination node '%s' is uknown." % - self.op.target_node) + raise errors.OpPrereqError("Destination node '%s' is unknown." % + self.op.target_node) self.op.target_node = self.dst_node.name def Exec(self, feedback_fn): @@ -3321,7 +4355,7 @@ class LUExportInstance(LogicalUnit): # shutdown the instance, unless requested not to do so if self.op.shutdown: op = opcodes.OpShutdownInstance(instance_name=instance.name) - self.processor.ChainOpCode(op, feedback_fn) + self.proc.ChainOpCode(op) vgname = self.cfg.GetVGName() @@ -3337,7 +4371,7 @@ class LUExportInstance(LogicalUnit): logger.Error("could not snapshot block device %s on node %s" % (disk.logical_id[1], src_node)) else: - new_dev = objects.Disk(dev_type="lvm", size=disk.size, + new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, logical_id=(vgname, new_dev_name), physical_id=(vgname, new_dev_name), iv_name=disk.iv_name) @@ -3347,7 +4381,7 @@ class LUExportInstance(LogicalUnit): if self.op.shutdown: op = opcodes.OpStartupInstance(instance_name=instance.name, force=False) - self.processor.ChainOpCode(op, feedback_fn) + self.proc.ChainOpCode(op) # TODO: check for size @@ -3373,9 +4407,196 @@ class LUExportInstance(LogicalUnit): # substitutes an empty list with the full cluster node list. if nodelist: op = opcodes.OpQueryExports(nodes=nodelist) - exportlist = self.processor.ChainOpCode(op, feedback_fn) + exportlist = self.proc.ChainOpCode(op) for node in exportlist: if instance.name in exportlist[node]: if not rpc.call_export_remove(node, instance.name): logger.Error("could not remove older export for instance %s" " on node %s" % (instance.name, node)) + + +class TagsLU(NoHooksLU): + """Generic tags LU. + + This is an abstract class which is the parent of all the other tags LUs. + + """ + def CheckPrereq(self): + """Check prerequisites. + + """ + if self.op.kind == constants.TAG_CLUSTER: + self.target = self.cfg.GetClusterInfo() + elif self.op.kind == constants.TAG_NODE: + name = self.cfg.ExpandNodeName(self.op.name) + if name is None: + raise errors.OpPrereqError("Invalid node name (%s)" % + (self.op.name,)) + self.op.name = name + self.target = self.cfg.GetNodeInfo(name) + elif self.op.kind == constants.TAG_INSTANCE: + name = self.cfg.ExpandInstanceName(self.op.name) + if name is None: + raise errors.OpPrereqError("Invalid instance name (%s)" % + (self.op.name,)) + self.op.name = name + self.target = self.cfg.GetInstanceInfo(name) + else: + raise errors.OpPrereqError("Wrong tag type requested (%s)" % + str(self.op.kind)) + + +class LUGetTags(TagsLU): + """Returns the tags of a given object. + + """ + _OP_REQP = ["kind", "name"] + + def Exec(self, feedback_fn): + """Returns the tag list. + + """ + return self.target.GetTags() + + +class LUSearchTags(NoHooksLU): + """Searches the tags for a given pattern. + + """ + _OP_REQP = ["pattern"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks the pattern passed for validity by compiling it. + + """ + try: + self.re = re.compile(self.op.pattern) + except re.error, err: + raise errors.OpPrereqError("Invalid search pattern '%s': %s" % + (self.op.pattern, err)) + + def Exec(self, feedback_fn): + """Returns the tag list. + + """ + cfg = self.cfg + tgts = [("/cluster", cfg.GetClusterInfo())] + ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()] + tgts.extend([("/instances/%s" % i.name, i) for i in ilist]) + nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()] + tgts.extend([("/nodes/%s" % n.name, n) for n in nlist]) + results = [] + for path, target in tgts: + for tag in target.GetTags(): + if self.re.search(tag): + results.append((path, tag)) + return results + + +class LUAddTags(TagsLU): + """Sets a tag on a given object. + + """ + _OP_REQP = ["kind", "name", "tags"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks the type and length of the tag name and value. + + """ + TagsLU.CheckPrereq(self) + for tag in self.op.tags: + objects.TaggableObject.ValidateTag(tag) + + def Exec(self, feedback_fn): + """Sets the tag. + + """ + try: + for tag in self.op.tags: + self.target.AddTag(tag) + except errors.TagError, err: + raise errors.OpExecError("Error while setting tag: %s" % str(err)) + try: + self.cfg.Update(self.target) + except errors.ConfigurationError: + raise errors.OpRetryError("There has been a modification to the" + " config file and the operation has been" + " aborted. Please retry.") + + +class LUDelTags(TagsLU): + """Delete a list of tags from a given object. + + """ + _OP_REQP = ["kind", "name", "tags"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks that we have the given tag. + + """ + TagsLU.CheckPrereq(self) + for tag in self.op.tags: + objects.TaggableObject.ValidateTag(tag) + del_tags = frozenset(self.op.tags) + cur_tags = self.target.GetTags() + if not del_tags <= cur_tags: + diff_tags = del_tags - cur_tags + diff_names = ["'%s'" % tag for tag in diff_tags] + diff_names.sort() + raise errors.OpPrereqError("Tag(s) %s not found" % + (",".join(diff_names))) + + def Exec(self, feedback_fn): + """Remove the tag from the object. + + """ + for tag in self.op.tags: + self.target.RemoveTag(tag) + try: + self.cfg.Update(self.target) + except errors.ConfigurationError: + raise errors.OpRetryError("There has been a modification to the" + " config file and the operation has been" + " aborted. Please retry.") + +class LUTestDelay(NoHooksLU): + """Sleep for a specified amount of time. + + This LU sleeps on the master and/or nodes for a specified amoutn of + time. + + """ + _OP_REQP = ["duration", "on_master", "on_nodes"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks that we have a good list of nodes and/or the duration + is valid. + + """ + + if self.op.on_nodes: + self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes) + + def Exec(self, feedback_fn): + """Do the actual sleep. + + """ + if self.op.on_master: + if not utils.TestDelay(self.op.duration): + raise errors.OpExecError("Error during master delay test") + if self.op.on_nodes: + result = rpc.call_test_delay(self.op.on_nodes, self.op.duration) + if not result: + raise errors.OpExecError("Complete failure from rpc call") + for node, node_result in result.items(): + if not node_result: + raise errors.OpExecError("Failure during rpc call to node %s," + " result: %s" % (node, node_result))