X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/bdd55f717237ce80b54deb09aad39a143d1aaa41..0f1a06e3e8e61259841848a7186f9c22ade6135d:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index d3f583c..4cfb38f 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +# # # Copyright (C) 2006, 2007 Google Inc. @@ -26,7 +26,6 @@ import os import os.path import sha -import socket import time import tempfile import re @@ -44,6 +43,7 @@ from ganeti import objects from ganeti import opcodes from ganeti import ssconf + class LogicalUnit(object): """Logical Unit base class. @@ -70,10 +70,12 @@ class LogicalUnit(object): validity. """ - self.processor = processor + self.proc = processor self.op = op self.cfg = cfg self.sstore = sstore + self.__ssh = None + for attr_name in self._OP_REQP: attr_val = getattr(op, attr_name, None) if attr_val is None: @@ -85,10 +87,20 @@ class LogicalUnit(object): " use 'gnt-cluster init' first.") if self.REQ_MASTER: master = sstore.GetMasterNode() - if master != socket.gethostname(): + if master != utils.HostInfo().name: raise errors.OpPrereqError("Commands must be run on the master" " node %s" % master) + def __GetSSH(self): + """Returns the SshRunner object + + """ + if not self.__ssh: + self.__ssh = ssh.SshRunner(self.sstore) + return self.__ssh + + ssh = property(fget=__GetSSH) + def CheckPrereq(self): """Check prerequisites for this LU. @@ -161,7 +173,24 @@ class NoHooksLU(LogicalUnit): This is a no-op, since we don't run hooks. """ - return + return {}, [], [] + + +def _AddHostToEtcHosts(hostname): + """Wrapper around utils.SetEtcHostsEntry. + + """ + hi = utils.HostInfo(name=hostname) + utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()]) + + +def _RemoveHostFromEtcHosts(hostname): + """Wrapper around utils.RemoveEtcHostsEntry. + + """ + hi = utils.HostInfo(name=hostname) + utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name) + utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName()) def _GetWantedNodes(lu, nodes): @@ -239,6 +268,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, secondary_nodes: List of secondary nodes as strings """ env = { + "OP_TARGET": name, "INSTANCE_NAME": name, "INSTANCE_PRIMARY": primary_node, "INSTANCE_SECONDARIES": " ".join(secondary_nodes), @@ -250,11 +280,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, if nics: nic_count = len(nics) - for idx, (ip, bridge) in enumerate(nics): + for idx, (ip, bridge, mac) in enumerate(nics): if ip is None: ip = "" env["INSTANCE_NIC%d_IP" % idx] = ip env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge + env["INSTANCE_NIC%d_HWADDR" % idx] = mac else: nic_count = 0 @@ -278,176 +309,13 @@ def _BuildInstanceHookEnvByObject(instance, override=None): 'status': instance.os, 'memory': instance.memory, 'vcpus': instance.vcpus, - 'nics': [(nic.ip, nic.bridge) for nic in instance.nics], + 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics], } if override: args.update(override) return _BuildInstanceHookEnv(**args) -def _UpdateEtcHosts(fullnode, ip): - """Ensure a node has a correct entry in /etc/hosts. - - Args: - fullnode - Fully qualified domain name of host. (str) - ip - IPv4 address of host (str) - - """ - node = fullnode.split(".", 1)[0] - - f = open('/etc/hosts', 'r+') - - inthere = False - - save_lines = [] - add_lines = [] - removed = False - - while True: - rawline = f.readline() - - if not rawline: - # End of file - break - - line = rawline.split('\n')[0] - - # Strip off comments - line = line.split('#')[0] - - if not line: - # Entire line was comment, skip - save_lines.append(rawline) - continue - - fields = line.split() - - haveall = True - havesome = False - for spec in [ ip, fullnode, node ]: - if spec not in fields: - haveall = False - if spec in fields: - havesome = True - - if haveall: - inthere = True - save_lines.append(rawline) - continue - - if havesome and not haveall: - # Line (old, or manual?) which is missing some. Remove. - removed = True - continue - - save_lines.append(rawline) - - if not inthere: - add_lines.append('%s\t%s %s\n' % (ip, fullnode, node)) - - if removed: - if add_lines: - save_lines = save_lines + add_lines - - # We removed a line, write a new file and replace old. - fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc') - newfile = os.fdopen(fd, 'w') - newfile.write(''.join(save_lines)) - newfile.close() - os.rename(tmpname, '/etc/hosts') - - elif add_lines: - # Simply appending a new line will do the trick. - f.seek(0, 2) - for add in add_lines: - f.write(add) - - f.close() - - -def _UpdateKnownHosts(fullnode, ip, pubkey): - """Ensure a node has a correct known_hosts entry. - - Args: - fullnode - Fully qualified domain name of host. (str) - ip - IPv4 address of host (str) - pubkey - the public key of the cluster - - """ - if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE): - f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+') - else: - f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+') - - inthere = False - - save_lines = [] - add_lines = [] - removed = False - - while True: - rawline = f.readline() - logger.Debug('read %s' % (repr(rawline),)) - - if not rawline: - # End of file - break - - line = rawline.split('\n')[0] - - parts = line.split(' ') - fields = parts[0].split(',') - key = parts[2] - - haveall = True - havesome = False - for spec in [ ip, fullnode ]: - if spec not in fields: - haveall = False - if spec in fields: - havesome = True - - logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),)) - if haveall and key == pubkey: - inthere = True - save_lines.append(rawline) - logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),)) - continue - - if havesome and (not haveall or key != pubkey): - removed = True - logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),)) - continue - - save_lines.append(rawline) - - if not inthere: - add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey)) - logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),)) - - if removed: - save_lines = save_lines + add_lines - - # Write a new file and replace old. - fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.', - constants.DATA_DIR) - newfile = os.fdopen(fd, 'w') - try: - newfile.write(''.join(save_lines)) - finally: - newfile.close() - logger.Debug("Wrote new known_hosts.") - os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE) - - elif add_lines: - # Simply appending a new line will do the trick. - f.seek(0, 2) - for add in add_lines: - f.write(add) - - f.close() - - def _HasValidVG(vglist, vgname): """Checks if the volume group list is valid. @@ -475,24 +343,23 @@ def _InitSSHSetup(node): node: the name of this host as a fqdn """ - if os.path.exists('/root/.ssh/id_dsa'): - utils.CreateBackup('/root/.ssh/id_dsa') - if os.path.exists('/root/.ssh/id_dsa.pub'): - utils.CreateBackup('/root/.ssh/id_dsa.pub') + priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) - utils.RemoveFile('/root/.ssh/id_dsa') - utils.RemoveFile('/root/.ssh/id_dsa.pub') + for name in priv_key, pub_key: + if os.path.exists(name): + utils.CreateBackup(name) + utils.RemoveFile(name) result = utils.RunCmd(["ssh-keygen", "-t", "dsa", - "-f", "/root/.ssh/id_dsa", + "-f", priv_key, "-q", "-N", ""]) if result.failed: raise errors.OpExecError("Could not generate ssh keypair, error %s" % result.output) - f = open('/root/.ssh/id_dsa.pub', 'r') + f = open(pub_key, 'r') try: - utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192)) + utils.AddAuthorizedKey(auth_keys, f.read(8192)) finally: f.close() @@ -528,14 +395,26 @@ def _InitGanetiServerSetup(ss): (result.cmd, result.exit_code, result.output)) +def _CheckInstanceBridgesExist(instance): + """Check that the brigdes needed by an instance exist. + + """ + # check bridges existance + brlist = [nic.bridge for nic in instance.nics] + if not rpc.call_bridges_exist(instance.primary_node, brlist): + raise errors.OpPrereqError("one or more target bridges %s does not" + " exist on destination node '%s'" % + (brlist, instance.primary_node)) + + class LUInitCluster(LogicalUnit): """Initialise the cluster. """ HPATH = "cluster-init" HTYPE = constants.HTYPE_CLUSTER - _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix", - "def_bridge", "master_netdev"] + _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix", + "def_bridge", "master_netdev", "file_storage_dir"] REQ_CLUSTER = False def BuildHooksEnv(self): @@ -545,10 +424,7 @@ class LUInitCluster(LogicalUnit): ourselves in the post-run node list. """ - env = { - "CLUSTER": self.op.cluster_name, - "MASTER": self.hostname.name, - } + env = {"OP_TARGET": self.op.cluster_name} return env, [], [self.hostname.name] def CheckPrereq(self): @@ -558,58 +434,77 @@ class LUInitCluster(LogicalUnit): if config.ConfigWriter.IsCluster(): raise errors.OpPrereqError("Cluster is already initialised") - hostname_local = socket.gethostname() - self.hostname = hostname = utils.LookupHostname(hostname_local) - if not hostname: - raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" % - hostname_local) + if self.op.hypervisor_type == constants.HT_XEN_HVM31: + if not os.path.exists(constants.VNC_PASSWORD_FILE): + raise errors.OpPrereqError("Please prepare the cluster VNC" + "password file %s" % + constants.VNC_PASSWORD_FILE) - if hostname.name != hostname_local: - raise errors.OpPrereqError("My own hostname (%s) does not match the" - " resolver (%s): probably not using FQDN" - " for hostname." % - (hostname_local, hostname.name)) + self.hostname = hostname = utils.HostInfo() if hostname.ip.startswith("127."): raise errors.OpPrereqError("This host's IP resolves to the private" - " range (%s). Please fix DNS or /etc/hosts." % - (hostname.ip,)) + " range (%s). Please fix DNS or %s." % + (hostname.ip, constants.ETC_HOSTS)) - self.clustername = clustername = utils.LookupHostname(self.op.cluster_name) - if not clustername: - raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')" - % self.op.cluster_name) - - result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname.ip]) - if result.failed: + if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS): raise errors.OpPrereqError("Inconsistency: this host's name resolves" " to %s,\nbut this ip address does not" " belong to this host." " Aborting." % hostname.ip) + self.clustername = clustername = utils.HostInfo(self.op.cluster_name) + + if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, + timeout=5): + raise errors.OpPrereqError("Cluster IP already active. Aborting.") + secondary_ip = getattr(self.op, "secondary_ip", None) if secondary_ip and not utils.IsValidIP(secondary_ip): raise errors.OpPrereqError("Invalid secondary ip given") - if secondary_ip and secondary_ip != hostname.ip: - result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip]) - if result.failed: - raise errors.OpPrereqError("You gave %s as secondary IP,\n" - "but it does not belong to this host." % - secondary_ip) + if (secondary_ip and + secondary_ip != hostname.ip and + (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS))): + raise errors.OpPrereqError("You gave %s as secondary IP," + " but it does not belong to this host." % + secondary_ip) self.secondary_ip = secondary_ip - # checks presence of the volume group given - vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name) + if not hasattr(self.op, "vg_name"): + self.op.vg_name = None + # if vg_name not None, checks if volume group is valid + if self.op.vg_name: + vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name) + if vgstatus: + raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if" + " you are not using lvm" % vgstatus) + + self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir) + + if not os.path.isabs(self.op.file_storage_dir): + raise errors.OpPrereqError("The file storage directory you have is" + " not an absolute path.") + + if not os.path.exists(self.op.file_storage_dir): + try: + os.makedirs(self.op.file_storage_dir, 0750) + except OSError, err: + raise errors.OpPrereqError("Cannot create file storage directory" + " '%s': %s" % + (self.op.file_storage_dir, err)) - if vgstatus: - raise errors.OpPrereqError("Error: %s" % vgstatus) + if not os.path.isdir(self.op.file_storage_dir): + raise errors.OpPrereqError("The file storage directory '%s' is not" + " a directory." % self.op.file_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", self.op.mac_prefix): raise errors.OpPrereqError("Invalid mac prefix given '%s'" % self.op.mac_prefix) - if self.op.hypervisor_type not in hypervisor.VALID_HTYPES: + if self.op.hypervisor_type not in constants.HYPER_TYPES: raise errors.OpPrereqError("Invalid hypervisor type given '%s'" % self.op.hypervisor_type) @@ -619,6 +514,11 @@ class LUInitCluster(LogicalUnit): (self.op.master_netdev, result.output.strip())) + if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and + os.access(constants.NODE_INITD_SCRIPT, os.X_OK)): + raise errors.OpPrereqError("Init.d script '%s' missing or not" + " executable." % constants.NODE_INITD_SCRIPT) + def Exec(self, feedback_fn): """Initialize the cluster. @@ -627,12 +527,13 @@ class LUInitCluster(LogicalUnit): hostname = self.hostname # set up the simple store - ss = ssconf.SimpleStore() + self.sstore = ss = ssconf.SimpleStore() ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type) ss.SetKey(ss.SS_MASTER_NODE, hostname.name) ss.SetKey(ss.SS_MASTER_IP, clustername.ip) ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev) ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name) + ss.SetKey(ss.SS_FILE_STORAGE_DIR, self.op.file_storage_dir) # set up the inter-node password and certificate _InitGanetiServerSetup(ss) @@ -641,25 +542,24 @@ class LUInitCluster(LogicalUnit): rpc.call_node_start_master(hostname.name) # set up ssh config and /etc/hosts - f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r') + f = open(constants.SSH_HOST_RSA_PUB, 'r') try: sshline = f.read() finally: f.close() sshkey = sshline.split(" ")[1] - _UpdateEtcHosts(hostname.name, hostname.ip) - - _UpdateKnownHosts(hostname.name, hostname.ip, sshkey) - + _AddHostToEtcHosts(hostname.name) _InitSSHSetup(hostname.name) # init of cluster config file - cfgw = config.ConfigWriter() + self.cfg = cfgw = config.ConfigWriter() cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip, sshkey, self.op.mac_prefix, self.op.vg_name, self.op.def_bridge) + ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE) + class LUDestroyCluster(NoHooksLU): """Logical unit for destroying the cluster. @@ -690,9 +590,13 @@ class LUDestroyCluster(NoHooksLU): """Destroys the cluster. """ - utils.CreateBackup('/root/.ssh/id_dsa') - utils.CreateBackup('/root/.ssh/id_dsa.pub') - rpc.call_node_leave_cluster(self.sstore.GetMasterNode()) + master = self.sstore.GetMasterNode() + if not rpc.call_node_stop_master(master): + raise errors.OpExecError("Could not disable the master role") + priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) + utils.CreateBackup(priv_key) + utils.CreateBackup(pub_key) + rpc.call_node_leave_cluster(master) class LUVerifyCluster(NoHooksLU): @@ -812,7 +716,7 @@ class LUVerifyCluster(NoHooksLU): (instance, node)) bad = True - return not bad + return bad def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn): """Verify if there are any unknown volumes in the cluster. @@ -860,9 +764,9 @@ class LUVerifyCluster(NoHooksLU): """ bad = False feedback_fn("* Verifying global settings") - self.cfg.VerifyConfig() + for msg in self.cfg.VerifyConfig(): + feedback_fn(" - ERROR: %s" % msg) - master = self.sstore.GetMasterNode() vg_name = self.cfg.GetVGName() nodelist = utils.NiceSort(self.cfg.GetNodeList()) instancelist = utils.NiceSort(self.cfg.GetInstanceList()) @@ -898,12 +802,17 @@ class LUVerifyCluster(NoHooksLU): # node_volume volumeinfo = all_volumeinfo[node] - if type(volumeinfo) != dict: + if isinstance(volumeinfo, basestring): + feedback_fn(" - ERROR: LVM problem on node %s: %s" % + (node, volumeinfo[-400:].encode('string_escape'))) + bad = True + node_volume[node] = {} + elif not isinstance(volumeinfo, dict): feedback_fn(" - ERROR: connection to %s failed" % (node,)) bad = True continue - - node_volume[node] = volumeinfo + else: + node_volume[node] = volumeinfo # node_instance nodeinstance = all_instanceinfo[node] @@ -939,6 +848,78 @@ class LUVerifyCluster(NoHooksLU): return int(bad) +class LUVerifyDisks(NoHooksLU): + """Verifies the cluster disks status. + + """ + _OP_REQP = [] + + def CheckPrereq(self): + """Check prerequisites. + + This has no prerequisites. + + """ + pass + + def Exec(self, feedback_fn): + """Verify integrity of cluster disks. + + """ + result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {} + + vg_name = self.cfg.GetVGName() + nodes = utils.NiceSort(self.cfg.GetNodeList()) + instances = [self.cfg.GetInstanceInfo(name) + for name in self.cfg.GetInstanceList()] + + nv_dict = {} + for inst in instances: + inst_lvs = {} + if (inst.status != "up" or + inst.disk_template not in constants.DTS_NET_MIRROR): + continue + inst.MapLVsByNode(inst_lvs) + # transform { iname: {node: [vol,],},} to {(node, vol): iname} + for node, vol_list in inst_lvs.iteritems(): + for vol in vol_list: + nv_dict[(node, vol)] = inst + + if not nv_dict: + return result + + node_lvs = rpc.call_volume_list(nodes, vg_name) + + to_act = set() + for node in nodes: + # node_volume + lvs = node_lvs[node] + + if isinstance(lvs, basestring): + logger.Info("error enumerating LVs on node %s: %s" % (node, lvs)) + res_nlvm[node] = lvs + elif not isinstance(lvs, dict): + logger.Info("connection to node %s failed or invalid data returned" % + (node,)) + res_nodes.append(node) + continue + + for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems(): + inst = nv_dict.pop((node, lv_name), None) + if (not lv_online and inst is not None + and inst.name not in res_instances): + res_instances.append(inst.name) + + # any leftover items in nv_dict are missing LVs, let's arrange the + # data better + for key, inst in nv_dict.iteritems(): + if inst.name not in res_missing: + res_missing[inst.name] = [] + res_missing[inst.name].append(key) + + return result + + class LURenameCluster(LogicalUnit): """Rename the cluster. @@ -952,6 +933,7 @@ class LURenameCluster(LogicalUnit): """ env = { + "OP_TARGET": self.sstore.GetClusterName(), "NEW_NAME": self.op.name, } mn = self.sstore.GetMasterNode() @@ -961,10 +943,7 @@ class LURenameCluster(LogicalUnit): """Verify that the passed name is a valid one. """ - hostname = utils.LookupHostname(self.op.name) - if not hostname: - raise errors.OpPrereqError("Cannot resolve the new cluster name ('%s')" % - self.op.name) + hostname = utils.HostInfo(self.op.name) new_name = hostname.name self.ip = new_ip = hostname.ip @@ -1016,11 +995,84 @@ class LURenameCluster(LogicalUnit): (fname, to_node)) finally: if not rpc.call_node_start_master(master): - logger.Error("Could not re-enable the master role on the master,\n" - "please restart manually.") + logger.Error("Could not re-enable the master role on the master," + " please restart manually.") + + +def _RecursiveCheckIfLVMBased(disk): + """Check if the given disk or its children are lvm-based. + + Args: + disk: ganeti.objects.Disk object + + Returns: + boolean indicating whether a LD_LV dev_type was found or not + + """ + if disk.children: + for chdisk in disk.children: + if _RecursiveCheckIfLVMBased(chdisk): + return True + return disk.dev_type == constants.LD_LV + + +class LUSetClusterParams(LogicalUnit): + """Change the parameters of the cluster. + + """ + HPATH = "cluster-modify" + HTYPE = constants.HTYPE_CLUSTER + _OP_REQP = [] + + def BuildHooksEnv(self): + """Build hooks env. + + """ + env = { + "OP_TARGET": self.sstore.GetClusterName(), + "NEW_VG_NAME": self.op.vg_name, + } + mn = self.sstore.GetMasterNode() + return env, [mn], [mn] + + def CheckPrereq(self): + """Check prerequisites. + + This checks whether the given params don't conflict and + if the given volume group is valid. + + """ + if not self.op.vg_name: + instances = [self.cfg.GetInstanceInfo(name) + for name in self.cfg.GetInstanceList()] + for inst in instances: + for disk in inst.disks: + if _RecursiveCheckIfLVMBased(disk): + raise errors.OpPrereqError("Cannot disable lvm storage while" + " lvm-based instances exist") + + # if vg_name not None, checks given volume group on all nodes + if self.op.vg_name: + node_list = self.cfg.GetNodeList() + vglist = rpc.call_vg_list(node_list) + for node in node_list: + vgstatus = _HasValidVG(vglist[node], self.op.vg_name) + if vgstatus: + raise errors.OpPrereqError("Error on node '%s': %s" % + (node, vgstatus)) + + def Exec(self, feedback_fn): + """Change the parameters of the cluster. + + """ + if self.op.vg_name != self.cfg.GetVGName(): + self.cfg.SetVGName(self.op.vg_name) + else: + feedback_fn("Cluster LVM configuration already in desired" + " state, not changing") -def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): +def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False): """Sleep and poll for an instance's disk to sync. """ @@ -1028,7 +1080,7 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): return True if not oneshot: - logger.ToStdout("Waiting for instance %s to sync disks." % instance.name) + proc.LogInfo("Waiting for instance %s to sync disks." % instance.name) node = instance.primary_node @@ -1042,7 +1094,7 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): cumul_degraded = False rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks) if not rstats: - logger.ToStderr("Can't get any data from node %s" % node) + proc.LogWarning("Can't get any data from node %s" % node) retries += 1 if retries >= 10: raise errors.RemoteError("Can't contact node %s for mirror data," @@ -1053,10 +1105,11 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): for i in range(len(rstats)): mstat = rstats[i] if mstat is None: - logger.ToStderr("Can't compute data for node %s/%s" % + proc.LogWarning("Can't compute data for node %s/%s" % (node, instance.disks[i].iv_name)) continue - perc_done, est_time, is_degraded = mstat + # we ignore the ldisk parameter + perc_done, est_time, is_degraded, _ = mstat cumul_degraded = cumul_degraded or (is_degraded and perc_done is None) if perc_done is not None: done = False @@ -1065,8 +1118,8 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): max_time = est_time else: rem_time = "no time estimate" - logger.ToStdout("- device %s: %5.2f%% done, %s" % - (instance.disks[i].iv_name, perc_done, rem_time)) + proc.LogInfo("- device %s: %5.2f%% done, %s" % + (instance.disks[i].iv_name, perc_done, rem_time)) if done or oneshot: break @@ -1079,24 +1132,32 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): utils.Lock('cmd') if done: - logger.ToStdout("Instance %s's disks are in sync." % instance.name) + proc.LogInfo("Instance %s's disks are in sync." % instance.name) return not cumul_degraded -def _CheckDiskConsistency(cfgw, dev, node, on_primary): +def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False): """Check that mirrors are not degraded. + The ldisk parameter, if True, will change the test from the + is_degraded attribute (which represents overall non-ok status for + the device(s)) to the ldisk (representing the local storage status). + """ cfgw.SetDiskID(dev, node) + if ldisk: + idx = 6 + else: + idx = 5 result = True if on_primary or dev.AssembleOnSecondary(): rstats = rpc.call_blockdev_find(node, dev) if not rstats: - logger.ToStderr("Can't get any data from node %s" % node) + logger.ToStderr("Node %s: Disk degraded, not found or node down" % node) result = False else: - result = result and (not rstats[5]) + result = result and (not rstats[idx]) if dev.children: for child in dev.children: result = result and _CheckDiskConsistency(cfgw, child, node, on_primary) @@ -1145,6 +1206,7 @@ class LURemoveNode(LogicalUnit): """ env = { + "OP_TARGET": self.op.node_name, "NODE_NAME": self.op.node_name, } all_nodes = self.cfg.GetNodeList() @@ -1194,12 +1256,14 @@ class LURemoveNode(LogicalUnit): rpc.call_node_leave_cluster(node.name) - ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT) + self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT) logger.Info("Removing node %s from config" % node.name) self.cfg.RemoveNode(node.name) + _RemoveHostFromEtcHosts(node.name) + class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. @@ -1214,7 +1278,8 @@ class LUQueryNodes(NoHooksLU): """ self.dynamic_fields = frozenset(["dtotal", "dfree", - "mtotal", "mnode", "mfree"]) + "mtotal", "mnode", "mfree", + "bootid"]) _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt", "pinst_list", "sinst_list", @@ -1245,6 +1310,7 @@ class LUQueryNodes(NoHooksLU): "mfree": utils.TryConvert(int, nodeinfo['memory_free']), "dtotal": utils.TryConvert(int, nodeinfo['vg_size']), "dfree": utils.TryConvert(int, nodeinfo['vg_free']), + "bootid": nodeinfo['bootid'], } else: live_data[name] = {} @@ -1382,6 +1448,7 @@ class LUAddNode(LogicalUnit): """ env = { + "OP_TARGET": self.op.node_name, "NODE_NAME": self.op.node_name, "NODE_PIP": self.op.primary_ip, "NODE_SIP": self.op.secondary_ip, @@ -1404,9 +1471,7 @@ class LUAddNode(LogicalUnit): node_name = self.op.node_name cfg = self.cfg - dns_data = utils.LookupHostname(node_name) - if not dns_data: - raise errors.OpPrereqError("Node %s is not resolvable" % node_name) + dns_data = utils.HostInfo(node_name) node = dns_data.name primary_ip = self.op.primary_ip = dns_data.ip @@ -1444,22 +1509,25 @@ class LUAddNode(LogicalUnit): " new node doesn't have one") # checks reachablity - command = ["fping", "-q", primary_ip] - result = utils.RunCmd(command) - if result.failed: + if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): raise errors.OpPrereqError("Node not reachable by ping") if not newbie_singlehomed: # check reachability from my secondary ip to newbie's secondary ip - command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip] - result = utils.RunCmd(command) - if result.failed: - raise errors.OpPrereqError("Node secondary ip not reachable by ping") + if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, + source=myself.secondary_ip): + raise errors.OpPrereqError("Node secondary ip not reachable by TCP" + " based ping to noded port") self.new_node = objects.Node(name=node, primary_ip=primary_ip, secondary_ip=secondary_ip) + if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31: + if not os.path.exists(constants.VNC_PASSWORD_FILE): + raise errors.OpPrereqError("Cluster VNC password file %s missing" % + constants.VNC_PASSWORD_FILE) + def Exec(self, feedback_fn): """Adds the new node to the cluster. @@ -1498,7 +1566,7 @@ class LUAddNode(LogicalUnit): constants.SSL_CERT_FILE, gntpem, constants.NODE_INITD_SCRIPT)) - result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True) + result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True) if result.failed: raise errors.OpExecError("Remote command on node %s, error: %s," " output: %s" % @@ -1521,10 +1589,11 @@ class LUAddNode(LogicalUnit): # setup ssh on node logger.Info("copy ssh key to node %s" % node) + priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) keyarray = [] - keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub", - "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub", - "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"] + keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, + constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, + priv_key, pub_key] for i in keyfiles: f = open(i, 'r') @@ -1540,24 +1609,23 @@ class LUAddNode(LogicalUnit): raise errors.OpExecError("Cannot transfer ssh keys to the new node") # Add node to our /etc/hosts, and add key to known_hosts - _UpdateEtcHosts(new_node.name, new_node.primary_ip) - _UpdateKnownHosts(new_node.name, new_node.primary_ip, - self.cfg.GetHostKey()) + _AddHostToEtcHosts(new_node.name) if new_node.secondary_ip != new_node.primary_ip: - result = ssh.SSHCall(node, "root", - "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip) - if result.failed: - raise errors.OpExecError("Node claims it doesn't have the" - " secondary ip you gave (%s).\n" - "Please fix and re-run this command." % - new_node.secondary_ip) - - success, msg = ssh.VerifyNodeHostname(node) + if not rpc.call_node_tcp_ping(new_node.name, + constants.LOCALHOST_IP_ADDRESS, + new_node.secondary_ip, + constants.DEFAULT_NODED_PORT, + 10, False): + raise errors.OpExecError("Node claims it doesn't have the secondary ip" + " you gave (%s). Please fix and re-run this" + " command." % new_node.secondary_ip) + + success, msg = self.ssh.VerifyNodeHostname(node) if not success: raise errors.OpExecError("Node '%s' claims it has a different hostname" - " than the one the resolver gives: %s.\n" - "Please fix and re-run this command." % + " than the one the resolver gives: %s." + " Please fix and re-run this command." % (node, msg)) # Distribute updated /etc/hosts and known_hosts to all nodes, @@ -1568,7 +1636,7 @@ class LUAddNode(LogicalUnit): dist_nodes.remove(myself.name) logger.Debug("Copying hosts and known_hosts to all nodes") - for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE): + for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): result = rpc.call_upload_file(dist_nodes, fname) for to_node in dist_nodes: if not result[to_node]: @@ -1576,8 +1644,10 @@ class LUAddNode(LogicalUnit): (fname, to_node)) to_copy = ss.GetFileList() + if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31: + to_copy.append(constants.VNC_PASSWORD_FILE) for fname in to_copy: - if not ssh.CopyFileToNode(node, fname): + if not self.ssh.CopyFileToNode(node, fname): logger.Error("could not copy file %s to node %s" % (fname, node)) logger.Info("adding node %s to cluster.conf" % node) @@ -1603,6 +1673,7 @@ class LUMasterFailover(LogicalUnit): """ env = { + "OP_TARGET": self.new_master, "NEW_MASTER": self.new_master, "OLD_MASTER": self.old_master, } @@ -1614,14 +1685,13 @@ class LUMasterFailover(LogicalUnit): This checks that we are not already the master. """ - self.new_master = socket.gethostname() - + self.new_master = utils.HostInfo().name self.old_master = self.sstore.GetMasterNode() if self.old_master == self.new_master: raise errors.OpPrereqError("This commands must be run on the node" - " where you want the new master to be.\n" - "%s is already the master" % + " where you want the new master to be." + " %s is already the master" % self.old_master) def Exec(self, feedback_fn): @@ -1650,8 +1720,8 @@ class LUMasterFailover(LogicalUnit): if not rpc.call_node_start_master(self.new_master): logger.Error("could not start the master role on the new master" " %s, please check" % self.new_master) - feedback_fn("Error in activating the master IP on the new master,\n" - "please fix manually.") + feedback_fn("Error in activating the master IP on the new master," + " please fix manually.") @@ -1716,12 +1786,12 @@ class LUClusterCopyFile(NoHooksLU): """ filename = self.op.filename - myname = socket.gethostname() + myname = utils.HostInfo().name for node in self.nodes: if node == myname: continue - if not ssh.CopyFileToNode(node, filename): + if not self.ssh.CopyFileToNode(node, filename): logger.Error("Copy of file %s to node %s failed" % (filename, node)) @@ -1762,9 +1832,15 @@ class LURunClusterCommand(NoHooksLU): """Run a command on some nodes. """ + # put the master at the end of the nodes list + master_node = self.sstore.GetMasterNode() + if master_node in self.nodes: + self.nodes.remove(master_node) + self.nodes.append(master_node) + data = [] for node in self.nodes: - result = ssh.SSHCall(node, "root", self.op.command) + result = self.ssh.Run(node, "root", self.op.command) data.append((node, result.output, result.exit_code)) return data @@ -1818,21 +1894,47 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False): """ device_info = [] disks_ok = True + iname = instance.name + # With the two passes mechanism we try to reduce the window of + # opportunity for the race condition of switching DRBD to primary + # before handshaking occured, but we do not eliminate it + + # The proper fix would be to wait (with some limits) until the + # connection has been made and drbd transitions from WFConnection + # into any other network-connected state (Connected, SyncTarget, + # SyncSource, etc.) + + # 1st pass, assemble on all nodes in secondary mode for inst_disk in instance.disks: - master_result = None for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): cfg.SetDiskID(node_disk, node) - is_primary = node == instance.primary_node - result = rpc.call_blockdev_assemble(node, node_disk, is_primary) + result = rpc.call_blockdev_assemble(node, node_disk, iname, False) if not result: - logger.Error("could not prepare block device %s on node %s (is_pri" - "mary=%s)" % (inst_disk.iv_name, node, is_primary)) - if is_primary or not ignore_secondaries: + logger.Error("could not prepare block device %s on node %s" + " (is_primary=False, pass=1)" % (inst_disk.iv_name, node)) + if not ignore_secondaries: disks_ok = False - if is_primary: - master_result = result - device_info.append((instance.primary_node, inst_disk.iv_name, - master_result)) + + # FIXME: race condition on drbd migration to primary + + # 2nd pass, do only the primary node + for inst_disk in instance.disks: + for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): + if node != instance.primary_node: + continue + cfg.SetDiskID(node_disk, node) + result = rpc.call_blockdev_assemble(node, node_disk, iname, True) + if not result: + logger.Error("could not prepare block device %s on node %s" + " (is_primary=True, pass=2)" % (inst_disk.iv_name, node)) + disks_ok = False + device_info.append((instance.primary_node, inst_disk.iv_name, result)) + + # leave the disks configured for the primary node + # this is a workaround that would be fixed better by + # improving the logical/physical id handling + for disk in instance.disks: + cfg.SetDiskID(disk, instance.primary_node) return disks_ok, device_info @@ -1909,6 +2011,36 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False): return result +def _CheckNodeFreeMemory(cfg, node, reason, requested): + """Checks if a node has enough free memory. + + This function check if a given node has the needed amount of free + memory. In case the node has less memory or we cannot get the + information from the node, this function raise an OpPrereqError + exception. + + Args: + - cfg: a ConfigWriter instance + - node: the node name + - reason: string to use in the error message + - requested: the amount of memory in MiB + + """ + nodeinfo = rpc.call_node_info([node], cfg.GetVGName()) + if not nodeinfo or not isinstance(nodeinfo, dict): + raise errors.OpPrereqError("Could not contact node %s for resource" + " information" % (node,)) + + free_mem = nodeinfo[node].get('memory_free') + if not isinstance(free_mem, int): + raise errors.OpPrereqError("Can't compute free memory on node %s, result" + " was '%s'" % (node, free_mem)) + if requested > free_mem: + raise errors.OpPrereqError("Not enough memory on node %s for %s:" + " needed %s MiB, available %s MiB" % + (node, reason, requested, free_mem)) + + class LUStartupInstance(LogicalUnit): """Starts an instance. @@ -1944,11 +2076,11 @@ class LUStartupInstance(LogicalUnit): self.op.instance_name) # check bridges existance - brlist = [nic.bridge for nic in instance.nics] - if not rpc.call_bridges_exist(instance.primary_node, brlist): - raise errors.OpPrereqError("one or more target bridges %s does not" - " exist on destination node '%s'" % - (brlist, instance.primary_node)) + _CheckInstanceBridgesExist(instance) + + _CheckNodeFreeMemory(self.cfg, instance.primary_node, + "starting instance %s" % instance.name, + instance.memory) self.instance = instance self.op.instance_name = instance.name @@ -1961,21 +2093,9 @@ class LUStartupInstance(LogicalUnit): force = self.op.force extra_args = getattr(self.op, "extra_args", "") - node_current = instance.primary_node - - nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName()) - if not nodeinfo: - raise errors.OpExecError("Could not contact node %s for infos" % - (node_current)) + self.cfg.MarkInstanceUp(instance.name) - freememory = nodeinfo[node_current]['memory_free'] - memory = instance.memory - if memory > freememory: - raise errors.OpExecError("Not enough memory to start instance" - " %s on node %s" - " needed %s MiB, available %s MiB" % - (instance.name, node_current, memory, - freememory)) + node_current = instance.primary_node _StartInstanceDisks(self.cfg, instance, force) @@ -1983,6 +2103,80 @@ class LUStartupInstance(LogicalUnit): _ShutdownInstanceDisks(instance, self.cfg) raise errors.OpExecError("Could not start instance") + +class LURebootInstance(LogicalUnit): + """Reboot an instance. + + """ + HPATH = "instance-reboot" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"] + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on master, primary and secondary nodes of the instance. + + """ + env = { + "IGNORE_SECONDARIES": self.op.ignore_secondaries, + } + env.update(_BuildInstanceHookEnvByObject(self.instance)) + nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + + list(self.instance.secondary_nodes)) + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + + # check bridges existance + _CheckInstanceBridgesExist(instance) + + self.instance = instance + self.op.instance_name = instance.name + + def Exec(self, feedback_fn): + """Reboot the instance. + + """ + instance = self.instance + ignore_secondaries = self.op.ignore_secondaries + reboot_type = self.op.reboot_type + extra_args = getattr(self.op, "extra_args", "") + + node_current = instance.primary_node + + if reboot_type not in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD, + constants.INSTANCE_REBOOT_FULL]: + raise errors.ParameterError("reboot type not in [%s, %s, %s]" % + (constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD, + constants.INSTANCE_REBOOT_FULL)) + + if reboot_type in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD]: + if not rpc.call_instance_reboot(node_current, instance, + reboot_type, extra_args): + raise errors.OpExecError("Could not reboot instance") + else: + if not rpc.call_instance_shutdown(node_current, instance): + raise errors.OpExecError("could not shutdown instance for full reboot") + _ShutdownInstanceDisks(instance, self.cfg) + _StartInstanceDisks(self.cfg, instance, ignore_secondaries) + if not rpc.call_instance_start(node_current, instance, extra_args): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance for full reboot") + self.cfg.MarkInstanceUp(instance.name) @@ -2024,10 +2218,10 @@ class LUShutdownInstance(LogicalUnit): """ instance = self.instance node_current = instance.primary_node + self.cfg.MarkInstanceDown(instance.name) if not rpc.call_instance_shutdown(node_current, instance): logger.Error("could not shutdown instance") - self.cfg.MarkInstanceDown(instance.name) _ShutdownInstanceDisks(instance, self.cfg) @@ -2081,8 +2275,8 @@ class LUReinstallInstance(LogicalUnit): if pnode is None: raise errors.OpPrereqError("Primary node '%s' is unknown" % self.op.pnode) - os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name] - if not isinstance(os_obj, objects.OS): + os_obj = rpc.call_os_get(pnode.name, self.op.os_type) + if not os_obj: raise errors.OpPrereqError("OS '%s' not in supported OS list for" " primary node" % self.op.os_type) @@ -2103,8 +2297,8 @@ class LUReinstallInstance(LogicalUnit): try: feedback_fn("Running the instance OS create scripts...") if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"): - raise errors.OpExecError("Could not install OS for instance %s " - "on node %s" % + raise errors.OpExecError("Could not install OS for instance %s" + " on node %s" % (inst.name, inst.primary_node)) finally: _ShutdownInstanceDisks(inst, self.cfg) @@ -2152,18 +2346,20 @@ class LURenameInstance(LogicalUnit): self.instance = instance # new name verification - hostname1 = utils.LookupHostname(self.op.new_name) - if not hostname1: - raise errors.OpPrereqError("New instance name '%s' not found in dns" % - self.op.new_name) + name_info = utils.HostInfo(self.op.new_name) + + self.op.new_name = new_name = name_info.name + instance_list = self.cfg.GetInstanceList() + if new_name in instance_list: + raise errors.OpPrereqError("Instance '%s' is already in the cluster" % + instance_name) - self.op.new_name = new_name = hostname1.name if not getattr(self.op, "ignore_ip", False): - command = ["fping", "-q", hostname1.ip] + command = ["fping", "-q", name_info.ip] result = utils.RunCmd(command) if not result.failed: raise errors.OpPrereqError("IP %s of instance %s already in use" % - (hostname1.ip, new_name)) + (name_info.ip, new_name)) def Exec(self, feedback_fn): @@ -2182,9 +2378,8 @@ class LURenameInstance(LogicalUnit): try: if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name, "sda", "sdb"): - msg = ("Could run OS rename script for instance %s\n" - "on node %s\n" - "(but the instance has been renamed in Ganeti)" % + msg = ("Could run OS rename script for instance %s on node %s (but the" + " instance has been renamed in Ganeti)" % (inst.name, inst.primary_node)) logger.Error(msg) finally: @@ -2206,8 +2401,7 @@ class LURemoveInstance(LogicalUnit): """ env = _BuildInstanceHookEnvByObject(self.instance) - nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.sstore.GetMasterNode()] return env, nl, nl def CheckPrereq(self): @@ -2232,12 +2426,19 @@ class LURemoveInstance(LogicalUnit): (instance.name, instance.primary_node)) if not rpc.call_instance_shutdown(instance.primary_node, instance): - raise errors.OpExecError("Could not shutdown instance %s on node %s" % - (instance.name, instance.primary_node)) + if self.op.ignore_failures: + feedback_fn("Warning: can't shutdown instance") + else: + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, instance.primary_node)) logger.Info("removing block devices for instance %s" % instance.name) - _RemoveDisks(instance, self.cfg) + if not _RemoveDisks(instance, self.cfg): + if self.op.ignore_failures: + feedback_fn("Warning: can't remove instance's disks") + else: + raise errors.OpExecError("Can't remove instance's disks") logger.Info("removing instance %s out of cluster config" % instance.name) @@ -2256,11 +2457,11 @@ class LUQueryInstances(NoHooksLU): This checks that the fields required are valid output fields. """ - self.dynamic_fields = frozenset(["oper_state", "oper_ram"]) + self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"]) _CheckOutputFields(static=["name", "os", "pnode", "snodes", "admin_state", "admin_ram", "disk_template", "ip", "mac", "bridge", - "sda_size", "sdb_size"], + "sda_size", "sdb_size", "vcpus"], dynamic=self.dynamic_fields, selected=self.op.output_fields) @@ -2313,6 +2514,21 @@ class LUQueryInstances(NoHooksLU): val = None else: val = bool(live_data.get(instance.name)) + elif field == "status": + if instance.primary_node in bad_nodes: + val = "ERROR_nodedown" + else: + running = bool(live_data.get(instance.name)) + if running: + if instance.status != "down": + val = "running" + else: + val = "ERROR_up" + else: + if instance.status != "down": + val = "ERROR_down" + else: + val = "ADMIN_down" elif field == "admin_ram": val = instance.memory elif field == "oper_ram": @@ -2336,6 +2552,8 @@ class LUQueryInstances(NoHooksLU): val = None else: val = disk.size + elif field == "vcpus": + val = instance.vcpus else: raise errors.ParameterError(field) iout.append(val) @@ -2377,34 +2595,26 @@ class LUFailoverInstance(LogicalUnit): raise errors.OpPrereqError("Instance '%s' not known" % self.op.instance_name) - if instance.disk_template != constants.DT_REMOTE_RAID1: + if instance.disk_template not in constants.DTS_NET_MIRROR: raise errors.OpPrereqError("Instance's disk layout is not" - " remote_raid1.") + " network mirrored, cannot failover.") secondary_nodes = instance.secondary_nodes if not secondary_nodes: raise errors.ProgrammerError("no secondary node but using " "DT_REMOTE_RAID1 template") - # check memory requirements on the secondary node target_node = secondary_nodes[0] - nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName()) - info = nodeinfo.get(target_node, None) - if not info: - raise errors.OpPrereqError("Cannot get current information" - " from node '%s'" % nodeinfo) - if instance.memory > info['memory_free']: - raise errors.OpPrereqError("Not enough memory on target node %s." - " %d MB available, %d MB required" % - (target_node, info['memory_free'], - instance.memory)) + # check memory requirements on the secondary node + _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" % + instance.name, instance.memory) # check bridge existance brlist = [nic.bridge for nic in instance.nics] - if not rpc.call_bridges_exist(instance.primary_node, brlist): + if not rpc.call_bridges_exist(target_node, brlist): raise errors.OpPrereqError("One or more target bridges %s does not" " exist on destination node '%s'" % - (brlist, instance.primary_node)) + (brlist, target_node)) self.instance = instance @@ -2424,33 +2634,22 @@ class LUFailoverInstance(LogicalUnit): for dev in instance.disks: # for remote_raid1, these are md over drbd if not _CheckDiskConsistency(self.cfg, dev, target_node, False): - if not self.op.ignore_consistency: + if instance.status == "up" and not self.op.ignore_consistency: raise errors.OpExecError("Disk %s is degraded on target node," " aborting failover." % dev.iv_name) - feedback_fn("* checking target node resource availability") - nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName()) - - if not nodeinfo: - raise errors.OpExecError("Could not contact target node %s." % - target_node) - - free_memory = int(nodeinfo[target_node]['memory_free']) - memory = instance.memory - if memory > free_memory: - raise errors.OpExecError("Not enough memory to create instance %s on" - " node %s. needed %s MiB, available %s MiB" % - (instance.name, target_node, memory, - free_memory)) - feedback_fn("* shutting down instance on source node") logger.Info("Shutting down instance %s on node %s" % (instance.name, source_node)) if not rpc.call_instance_shutdown(source_node, instance): - logger.Error("Could not shutdown instance %s on node %s. Proceeding" - " anyway. Please make sure node %s is down" % - (instance.name, source_node, source_node)) + if self.op.ignore_consistency: + logger.Error("Could not shutdown instance %s on node %s. Proceeding" + " anyway. Please make sure node %s is down" % + (instance.name, source_node, source_node)) + else: + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, source_node)) feedback_fn("* deactivating the instance's disks on source node") if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True): @@ -2460,24 +2659,26 @@ class LUFailoverInstance(LogicalUnit): # distribute new instance config to the other nodes self.cfg.AddInstance(instance) - feedback_fn("* activating the instance's disks on target node") - logger.Info("Starting instance %s on node %s" % - (instance.name, target_node)) + # Only start the instance if it's marked as up + if instance.status == "up": + feedback_fn("* activating the instance's disks on target node") + logger.Info("Starting instance %s on node %s" % + (instance.name, target_node)) - disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg, - ignore_secondaries=True) - if not disks_ok: - _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError("Can't activate the instance's disks") + disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg, + ignore_secondaries=True) + if not disks_ok: + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Can't activate the instance's disks") - feedback_fn("* starting the instance on the target node") - if not rpc.call_instance_start(target_node, instance, None): - _ShutdownInstanceDisks(instance, self.cfg) - raise errors.OpExecError("Could not start instance %s on node %s." % - (instance.name, target_node)) + feedback_fn("* starting the instance on the target node") + if not rpc.call_instance_start(target_node, instance, None): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance %s on node %s." % + (instance.name, target_node)) -def _CreateBlockDevOnPrimary(cfg, node, device, info): +def _CreateBlockDevOnPrimary(cfg, node, instance, device, info): """Create a tree of block devices on the primary node. This always creates all devices. @@ -2485,11 +2686,12 @@ def _CreateBlockDevOnPrimary(cfg, node, device, info): """ if device.children: for child in device.children: - if not _CreateBlockDevOnPrimary(cfg, node, child, info): + if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info): return False cfg.SetDiskID(device, node) - new_id = rpc.call_blockdev_create(node, device, device.size, True, info) + new_id = rpc.call_blockdev_create(node, device, device.size, + instance.name, True, info) if not new_id: return False if device.physical_id is None: @@ -2497,7 +2699,7 @@ def _CreateBlockDevOnPrimary(cfg, node, device, info): return True -def _CreateBlockDevOnSecondary(cfg, node, device, force, info): +def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info): """Create a tree of block devices on a secondary node. If this device type has to be created on secondaries, create it and @@ -2510,13 +2712,15 @@ def _CreateBlockDevOnSecondary(cfg, node, device, force, info): force = True if device.children: for child in device.children: - if not _CreateBlockDevOnSecondary(cfg, node, child, force, info): + if not _CreateBlockDevOnSecondary(cfg, node, instance, + child, force, info): return False if not force: return True cfg.SetDiskID(device, node) - new_id = rpc.call_blockdev_create(node, device, device.size, False, info) + new_id = rpc.call_blockdev_create(node, device, device.size, + instance.name, False, info) if not new_id: return False if device.physical_id is None: @@ -2543,76 +2747,79 @@ def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names): """ port = cfg.AllocatePort() vgname = cfg.GetVGName() - dev_data = objects.Disk(dev_type="lvm", size=size, + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, logical_id=(vgname, names[0])) - dev_meta = objects.Disk(dev_type="lvm", size=128, + dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, logical_id=(vgname, names[1])) - drbd_dev = objects.Disk(dev_type="drbd", size=size, + drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size, logical_id = (primary, secondary, port), children = [dev_data, dev_meta]) return drbd_dev +def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name): + """Generate a drbd8 device complete with its children. + + """ + port = cfg.AllocatePort() + vgname = cfg.GetVGName() + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, + logical_id = (primary, secondary, port), + children = [dev_data, dev_meta], + iv_name=iv_name) + return drbd_dev + + def _GenerateDiskTemplate(cfg, template_name, instance_name, primary_node, - secondary_nodes, disk_sz, swap_sz): + secondary_nodes, disk_sz, swap_sz, + file_storage_dir, file_driver): """Generate the entire disk layout for a given template type. """ #TODO: compute space requirements vgname = cfg.GetVGName() - if template_name == "diskless": + if template_name == constants.DT_DISKLESS: disks = [] - elif template_name == "plain": + elif template_name == constants.DT_PLAIN: if len(secondary_nodes) != 0: raise errors.ProgrammerError("Wrong template configuration") names = _GenerateUniqueNames(cfg, [".sda", ".sdb"]) - sda_dev = objects.Disk(dev_type="lvm", size=disk_sz, + sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz, logical_id=(vgname, names[0]), iv_name = "sda") - sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz, + sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz, logical_id=(vgname, names[1]), iv_name = "sdb") disks = [sda_dev, sdb_dev] - elif template_name == "local_raid1": - if len(secondary_nodes) != 0: - raise errors.ProgrammerError("Wrong template configuration") - - - names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2", - ".sdb_m1", ".sdb_m2"]) - sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, names[0])) - sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz, - logical_id=(vgname, names[1])) - md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda", - size=disk_sz, - children = [sda_dev_m1, sda_dev_m2]) - sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, names[2])) - sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz, - logical_id=(vgname, names[3])) - md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb", - size=swap_sz, - children = [sdb_dev_m1, sdb_dev_m2]) - disks = [md_sda_dev, md_sdb_dev] - elif template_name == constants.DT_REMOTE_RAID1: + elif template_name == constants.DT_DRBD8: if len(secondary_nodes) != 1: raise errors.ProgrammerError("Wrong template configuration") remote_node = secondary_nodes[0] names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta", ".sdb_data", ".sdb_meta"]) - drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node, - disk_sz, names[0:2]) - md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda", - children = [drbd_sda_dev], size=disk_sz) - drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node, - swap_sz, names[2:4]) - md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb", - children = [drbd_sdb_dev], size=swap_sz) - disks = [md_sda_dev, md_sdb_dev] + drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node, + disk_sz, names[0:2], "sda") + drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node, + swap_sz, names[2:4], "sdb") + disks = [drbd_sda_dev, drbd_sdb_dev] + elif template_name == constants.DT_FILE: + if len(secondary_nodes) != 0: + raise errors.ProgrammerError("Wrong template configuration") + + file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz, + iv_name="sda", logical_id=(file_driver, + "%s/sda" % file_storage_dir)) + file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz, + iv_name="sdb", logical_id=(file_driver, + "%s/sdb" % file_storage_dir)) + disks = [file_sda_dev, file_sdb_dev] else: raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) return disks @@ -2639,18 +2846,32 @@ def _CreateDisks(cfg, instance): """ info = _GetInstanceInfoText(instance) + if instance.disk_template == constants.DT_FILE: + file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) + result = rpc.call_file_storage_dir_create(instance.primary_node, + file_storage_dir) + + if not result: + logger.Error("Could not connect to node '%s'" % inst.primary_node) + return False + + if not result[0]: + logger.Error("failed to create directory '%s'" % file_storage_dir) + return False + for device in instance.disks: logger.Info("creating volume %s for instance %s" % (device.iv_name, instance.name)) #HARDCODE for secondary_node in instance.secondary_nodes: - if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False, - info): + if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance, + device, False, info): logger.Error("failed to create volume %s (%s) on secondary node %s!" % (device.iv_name, device, secondary_node)) return False #HARDCODE - if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info): + if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, + instance, device, info): logger.Error("failed to create volume %s on primary!" % device.iv_name) return False @@ -2662,7 +2883,7 @@ def _RemoveDisks(instance, cfg): This abstracts away some work from `AddInstance()` and `RemoveInstance()`. Note that in case some of the devices couldn't - be remove, the removal will continue with the other ones (compare + be removed, the removal will continue with the other ones (compare with `_CreateDisks()`). Args: @@ -2683,6 +2904,14 @@ def _RemoveDisks(instance, cfg): " continuing anyway" % (device.iv_name, node)) result = False + + if instance.disk_template == constants.DT_FILE: + file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) + if not rpc.call_file_storage_dir_remove(instance.primary_node, + file_storage_dir): + logger.Error("could not remove directory '%s'" % file_storage_dir) + result = False + return result @@ -2694,7 +2923,7 @@ class LUCreateInstance(LogicalUnit): HTYPE = constants.HTYPE_INSTANCE _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode", "disk_template", "swap_size", "mode", "start", "vcpus", - "wait_for_sync", "ip_check"] + "wait_for_sync", "ip_check", "mac"] def BuildHooksEnv(self): """Build hooks env. @@ -2720,7 +2949,7 @@ class LUCreateInstance(LogicalUnit): os_type=self.op.os_type, memory=self.op.mem_size, vcpus=self.op.vcpus, - nics=[(self.inst_ip, self.op.bridge)], + nics=[(self.inst_ip, self.op.bridge, self.op.mac)], )) nl = ([self.sstore.GetMasterNode(), self.op.pnode] + @@ -2732,11 +2961,20 @@ class LUCreateInstance(LogicalUnit): """Check prerequisites. """ + for attr in ["kernel_path", "initrd_path", "hvm_boot_order"]: + if not hasattr(self.op, attr): + setattr(self.op, attr, None) + if self.op.mode not in (constants.INSTANCE_CREATE, constants.INSTANCE_IMPORT): raise errors.OpPrereqError("Invalid instance creation mode '%s'" % self.op.mode) + if (not self.cfg.GetVGName() and + self.op.disk_template not in constants.DTS_NOT_LVM): + raise errors.OpPrereqError("Cluster does not support lvm-based" + " instances") + if self.op.mode == constants.INSTANCE_IMPORT: src_node = getattr(self.op, "src_node", None) src_path = getattr(self.op, "src_path", None) @@ -2789,9 +3027,18 @@ class LUCreateInstance(LogicalUnit): if self.op.disk_template not in constants.DISK_TEMPLATES: raise errors.OpPrereqError("Invalid disk template name") - if self.op.disk_template == constants.DT_REMOTE_RAID1: + if (self.op.file_driver and + not self.op.file_driver in constants.FILE_DRIVER): + raise errors.OpPrereqError("Invalid file driver name '%s'" % + self.op.file_driver) + + if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir): + raise errors.OpPrereqError("File storage directory not a relative" + " path") + + if self.op.disk_template in constants.DTS_NET_MIRROR: if getattr(self.op, "snode", None) is None: - raise errors.OpPrereqError("The 'remote_raid1' disk template needs" + raise errors.OpPrereqError("The networked disk templates need" " a mirror node") snode_name = self.cfg.ExpandNodeName(self.op.snode) @@ -2803,17 +3050,13 @@ class LUCreateInstance(LogicalUnit): " the primary node.") self.secondaries.append(snode_name) - # Check lv size requirements - nodenames = [pnode.name] + self.secondaries - nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) - # Required free disk space as a function of disk and swap space req_size_dict = { - constants.DT_DISKLESS: 0, + constants.DT_DISKLESS: None, constants.DT_PLAIN: self.op.disk_size + self.op.swap_size, - constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2, # 256 MB are added for drbd metadata, 128MB for each drbd device - constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256, + constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256, + constants.DT_FILE: None, } if self.op.disk_template not in req_size_dict: @@ -2822,27 +3065,35 @@ class LUCreateInstance(LogicalUnit): req_size = req_size_dict[self.op.disk_template] - for node in nodenames: - info = nodeinfo.get(node, None) - if not info: - raise errors.OpPrereqError("Cannot get current information" - " from node '%s'" % nodeinfo) - if req_size > info['vg_free']: - raise errors.OpPrereqError("Not enough disk space on target node %s." - " %d MB available, %d MB required" % - (node, info['vg_free'], req_size)) + # Check lv size requirements + if req_size is not None: + nodenames = [pnode.name] + self.secondaries + nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) + for node in nodenames: + info = nodeinfo.get(node, None) + if not info: + raise errors.OpPrereqError("Cannot get current information" + " from node '%s'" % nodeinfo) + vg_free = info.get('vg_free', None) + if not isinstance(vg_free, int): + raise errors.OpPrereqError("Can't compute free disk space on" + " node %s" % node) + if req_size > info['vg_free']: + raise errors.OpPrereqError("Not enough disk space on target node %s." + " %d MB available, %d MB required" % + (node, info['vg_free'], req_size)) # os verification - os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name] - if not isinstance(os_obj, objects.OS): + os_obj = rpc.call_os_get(pnode.name, self.op.os_type) + if not os_obj: raise errors.OpPrereqError("OS '%s' not in supported os list for" " primary node" % self.op.os_type) + if self.op.kernel_path == constants.VALUE_NONE: + raise errors.OpPrereqError("Can't set instance kernel to none") + # instance verification - hostname1 = utils.LookupHostname(self.op.instance_name) - if not hostname1: - raise errors.OpPrereqError("Instance name '%s' not found in dns" % - self.op.instance_name) + hostname1 = utils.HostInfo(self.op.instance_name) self.op.instance_name = instance_name = hostname1.name instance_list = self.cfg.GetInstanceList() @@ -2867,11 +3118,15 @@ class LUCreateInstance(LogicalUnit): " adding an instance in start mode") if self.op.ip_check: - command = ["fping", "-q", hostname1.ip] - result = utils.RunCmd(command) - if not result.failed: - raise errors.OpPrereqError("IP address %s of instance %s already" - " in use" % (hostname1.ip, instance_name)) + if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT): + raise errors.OpPrereqError("IP %s of instance %s already in use" % + (hostname1.ip, instance_name)) + + # MAC address verification + if self.op.mac != "auto": + if not utils.IsValidMac(self.op.mac.lower()): + raise errors.OpPrereqError("invalid MAC address specified: %s" % + self.op.mac) # bridge verification bridge = getattr(self.op, "bridge", None) @@ -2885,6 +3140,12 @@ class LUCreateInstance(LogicalUnit): " destination node '%s'" % (self.op.bridge, pnode.name)) + # boot order verification + if self.op.hvm_boot_order is not None: + if len(self.op.hvm_boot_order.strip("acdn")) != 0: + raise errors.OpPrereqError("invalid boot order specified," + " must be one or more of [acdn]") + if self.op.start: self.instance_status = 'up' else: @@ -2897,15 +3158,34 @@ class LUCreateInstance(LogicalUnit): instance = self.op.instance_name pnode_name = self.pnode.name - nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC()) + if self.op.mac == "auto": + mac_address = self.cfg.GenerateMAC() + else: + mac_address = self.op.mac + + nic = objects.NIC(bridge=self.op.bridge, mac=mac_address) if self.inst_ip is not None: nic.ip = self.inst_ip + ht_kind = self.sstore.GetHypervisorType() + if ht_kind in constants.HTS_REQ_PORT: + network_port = self.cfg.AllocatePort() + else: + network_port = None + + # build the full file storage dir path + file_storage_dir = os.path.normpath(os.path.join( + self.sstore.GetFileStorageDir(), + self.op.file_storage_dir, instance)) + + disks = _GenerateDiskTemplate(self.cfg, self.op.disk_template, instance, pnode_name, self.secondaries, self.op.disk_size, - self.op.swap_size) + self.op.swap_size, + file_storage_dir, + self.op.file_driver) iobj = objects.Instance(name=instance, os=self.op.os_type, primary_node=pnode_name, @@ -2914,6 +3194,10 @@ class LUCreateInstance(LogicalUnit): nics=[nic], disks=disks, disk_template=self.op.disk_template, status=self.instance_status, + network_port=network_port, + kernel_path=self.op.kernel_path, + initrd_path=self.op.initrd_path, + hvm_boot_order=self.op.hvm_boot_order, ) feedback_fn("* creating instance disks...") @@ -2926,12 +3210,12 @@ class LUCreateInstance(LogicalUnit): self.cfg.AddInstance(iobj) if self.op.wait_for_sync: - disk_abort = not _WaitForSync(self.cfg, iobj) - elif iobj.disk_template == constants.DT_REMOTE_RAID1: + disk_abort = not _WaitForSync(self.cfg, iobj, self.proc) + elif iobj.disk_template in constants.DTS_NET_MIRROR: # make sure the disks are not degraded (still sync-ing is ok) time.sleep(15) feedback_fn("* checking mirrors status") - disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True) + disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True) else: disk_abort = False @@ -3013,213 +3297,10 @@ class LUConnectConsole(NoHooksLU): logger.Debug("connecting to console of %s on %s" % (instance.name, node)) hyper = hypervisor.GetHypervisor() - console_cmd = hyper.GetShellCommandForConsole(instance.name) - # build ssh cmdline - argv = ["ssh", "-q", "-t"] - argv.extend(ssh.KNOWN_HOSTS_OPTS) - argv.extend(ssh.BATCH_MODE_OPTS) - argv.append(node) - argv.append(console_cmd) - return "ssh", argv - - -class LUAddMDDRBDComponent(LogicalUnit): - """Adda new mirror member to an instance's disk. - - """ - HPATH = "mirror-add" - HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name", "remote_node", "disk_name"] - - def BuildHooksEnv(self): - """Build hooks env. - - This runs on the master, the primary and all the secondaries. - - """ - env = { - "NEW_SECONDARY": self.op.remote_node, - "DISK_NAME": self.op.disk_name, - } - env.update(_BuildInstanceHookEnvByObject(self.instance)) - nl = [self.sstore.GetMasterNode(), self.instance.primary_node, - self.op.remote_node,] + list(self.instance.secondary_nodes) - return env, nl, nl - - def CheckPrereq(self): - """Check prerequisites. - - This checks that the instance is in the cluster. - - """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name) - self.instance = instance - - remote_node = self.cfg.ExpandNodeName(self.op.remote_node) - if remote_node is None: - raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node) - self.remote_node = remote_node - - if remote_node == instance.primary_node: - raise errors.OpPrereqError("The specified node is the primary node of" - " the instance.") - - if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError("Instance's disk layout is not" - " remote_raid1.") - for disk in instance.disks: - if disk.iv_name == self.op.disk_name: - break - else: - raise errors.OpPrereqError("Can't find this device ('%s') in the" - " instance." % self.op.disk_name) - if len(disk.children) > 1: - raise errors.OpPrereqError("The device already has two slave" - " devices.\n" - "This would create a 3-disk raid1" - " which we don't allow.") - self.disk = disk - - def Exec(self, feedback_fn): - """Add the mirror component - - """ - disk = self.disk - instance = self.instance - - remote_node = self.remote_node - lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]] - names = _GenerateUniqueNames(self.cfg, lv_names) - new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node, - remote_node, disk.size, names) - - logger.Info("adding new mirror component on secondary") - #HARDCODE - if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False, - _GetInstanceInfoText(instance)): - raise errors.OpExecError("Failed to create new component on secondary" - " node %s" % remote_node) - - logger.Info("adding new mirror component on primary") - #HARDCODE - if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd, - _GetInstanceInfoText(instance)): - # remove secondary dev - self.cfg.SetDiskID(new_drbd, remote_node) - rpc.call_blockdev_remove(remote_node, new_drbd) - raise errors.OpExecError("Failed to create volume on primary") - - # the device exists now - # call the primary node to add the mirror to md - logger.Info("adding new mirror component to md") - if not rpc.call_blockdev_addchild(instance.primary_node, - disk, new_drbd): - logger.Error("Can't add mirror compoment to md!") - self.cfg.SetDiskID(new_drbd, remote_node) - if not rpc.call_blockdev_remove(remote_node, new_drbd): - logger.Error("Can't rollback on secondary") - self.cfg.SetDiskID(new_drbd, instance.primary_node) - if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): - logger.Error("Can't rollback on primary") - raise errors.OpExecError("Can't add mirror component to md array") - - disk.children.append(new_drbd) - - self.cfg.AddInstance(instance) - - _WaitForSync(self.cfg, instance) - - return 0 - - -class LURemoveMDDRBDComponent(LogicalUnit): - """Remove a component from a remote_raid1 disk. - - """ - HPATH = "mirror-remove" - HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name", "disk_name", "disk_id"] - - def BuildHooksEnv(self): - """Build hooks env. - - This runs on the master, the primary and all the secondaries. - - """ - env = { - "DISK_NAME": self.op.disk_name, - "DISK_ID": self.op.disk_id, - "OLD_SECONDARY": self.old_secondary, - } - env.update(_BuildInstanceHookEnvByObject(self.instance)) - nl = [self.sstore.GetMasterNode(), - self.instance.primary_node] + list(self.instance.secondary_nodes) - return env, nl, nl - - def CheckPrereq(self): - """Check prerequisites. - - This checks that the instance is in the cluster. - - """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name) - self.instance = instance + console_cmd = hyper.GetShellCommandForConsole(instance) - if instance.disk_template != constants.DT_REMOTE_RAID1: - raise errors.OpPrereqError("Instance's disk layout is not" - " remote_raid1.") - for disk in instance.disks: - if disk.iv_name == self.op.disk_name: - break - else: - raise errors.OpPrereqError("Can't find this device ('%s') in the" - " instance." % self.op.disk_name) - for child in disk.children: - if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id: - break - else: - raise errors.OpPrereqError("Can't find the device with this port.") - - if len(disk.children) < 2: - raise errors.OpPrereqError("Cannot remove the last component from" - " a mirror.") - self.disk = disk - self.child = child - if self.child.logical_id[0] == instance.primary_node: - oid = 1 - else: - oid = 0 - self.old_secondary = self.child.logical_id[oid] - - def Exec(self, feedback_fn): - """Remove the mirror component - - """ - instance = self.instance - disk = self.disk - child = self.child - logger.Info("remove mirror component") - self.cfg.SetDiskID(disk, instance.primary_node) - if not rpc.call_blockdev_removechild(instance.primary_node, - disk, child): - raise errors.OpExecError("Can't remove child from mirror.") - - for node in child.logical_id[:2]: - self.cfg.SetDiskID(child, node) - if not rpc.call_blockdev_remove(node, child): - logger.Error("Warning: failed to remove device from node %s," - " continuing operation." % node) - - disk.children.remove(child) - self.cfg.AddInstance(instance) + # build ssh cmdline + return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True) class LUReplaceDisks(LogicalUnit): @@ -3228,7 +3309,7 @@ class LUReplaceDisks(LogicalUnit): """ HPATH = "mirrors-replace" HTYPE = constants.HTYPE_INSTANCE - _OP_REQP = ["instance_name"] + _OP_REQP = ["instance_name", "mode", "disks"] def BuildHooksEnv(self): """Build hooks env. @@ -3237,12 +3318,17 @@ class LUReplaceDisks(LogicalUnit): """ env = { + "MODE": self.op.mode, "NEW_SECONDARY": self.op.remote_node, "OLD_SECONDARY": self.instance.secondary_nodes[0], } env.update(_BuildInstanceHookEnvByObject(self.instance)) - nl = [self.sstore.GetMasterNode(), - self.instance.primary_node] + list(self.instance.secondary_nodes) + nl = [ + self.sstore.GetMasterNode(), + self.instance.primary_node, + ] + if self.op.remote_node is not None: + nl.append(self.op.remote_node) return env, nl, nl def CheckPrereq(self): @@ -3257,37 +3343,86 @@ class LUReplaceDisks(LogicalUnit): raise errors.OpPrereqError("Instance '%s' not known" % self.op.instance_name) self.instance = instance + self.op.instance_name = instance.name - if instance.disk_template != constants.DT_REMOTE_RAID1: + if instance.disk_template not in constants.DTS_NET_MIRROR: raise errors.OpPrereqError("Instance's disk layout is not" - " remote_raid1.") + " network mirrored.") if len(instance.secondary_nodes) != 1: raise errors.OpPrereqError("The instance has a strange layout," " expected one secondary but found %d" % len(instance.secondary_nodes)) + self.sec_node = instance.secondary_nodes[0] + remote_node = getattr(self.op, "remote_node", None) - if remote_node is None: - remote_node = instance.secondary_nodes[0] - else: + if remote_node is not None: remote_node = self.cfg.ExpandNodeName(remote_node) if remote_node is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node) + self.remote_node_info = self.cfg.GetNodeInfo(remote_node) + else: + self.remote_node_info = None if remote_node == instance.primary_node: raise errors.OpPrereqError("The specified node is the primary node of" " the instance.") + elif remote_node == self.sec_node: + if self.op.mode == constants.REPLACE_DISK_SEC: + # this is for DRBD8, where we can't execute the same mode of + # replacement as for drbd7 (no different port allocated) + raise errors.OpPrereqError("Same secondary given, cannot execute" + " replacement") + # the user gave the current secondary, switch to + # 'no-replace-secondary' mode for drbd7 + remote_node = None + if (instance.disk_template == constants.DT_REMOTE_RAID1 and + self.op.mode != constants.REPLACE_DISK_ALL): + raise errors.OpPrereqError("Template 'remote_raid1' only allows all" + " disks replacement, not individual ones") + if instance.disk_template == constants.DT_DRBD8: + if (self.op.mode == constants.REPLACE_DISK_ALL and + remote_node is not None): + # switch to replace secondary mode + self.op.mode = constants.REPLACE_DISK_SEC + + if self.op.mode == constants.REPLACE_DISK_ALL: + raise errors.OpPrereqError("Template 'drbd' only allows primary or" + " secondary disk replacement, not" + " both at once") + elif self.op.mode == constants.REPLACE_DISK_PRI: + if remote_node is not None: + raise errors.OpPrereqError("Template 'drbd' does not allow changing" + " the secondary while doing a primary" + " node disk replacement") + self.tgt_node = instance.primary_node + self.oth_node = instance.secondary_nodes[0] + elif self.op.mode == constants.REPLACE_DISK_SEC: + self.new_node = remote_node # this can be None, in which case + # we don't change the secondary + self.tgt_node = instance.secondary_nodes[0] + self.oth_node = instance.primary_node + else: + raise errors.ProgrammerError("Unhandled disk replace mode") + + for name in self.op.disks: + if instance.FindDisk(name) is None: + raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" % + (name, instance.name)) self.op.remote_node = remote_node - def Exec(self, feedback_fn): + def _ExecRR1(self, feedback_fn): """Replace the disks of an instance. """ instance = self.instance iv_names = {} # start of work - remote_node = self.op.remote_node + if self.op.remote_node is None: + remote_node = self.sec_node + else: + remote_node = self.op.remote_node cfg = self.cfg for dev in instance.disks: size = dev.size @@ -3299,28 +3434,29 @@ class LUReplaceDisks(LogicalUnit): logger.Info("adding new mirror component on secondary for %s" % dev.iv_name) #HARDCODE - if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False, + if not _CreateBlockDevOnSecondary(cfg, remote_node, instance, + new_drbd, False, _GetInstanceInfoText(instance)): - raise errors.OpExecError("Failed to create new component on" - " secondary node %s\n" - "Full abort, cleanup manually!" % + raise errors.OpExecError("Failed to create new component on secondary" + " node %s. Full abort, cleanup manually!" % remote_node) logger.Info("adding new mirror component on primary") #HARDCODE - if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd, + if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, + instance, new_drbd, _GetInstanceInfoText(instance)): # remove secondary dev cfg.SetDiskID(new_drbd, remote_node) rpc.call_blockdev_remove(remote_node, new_drbd) - raise errors.OpExecError("Failed to create volume on primary!\n" - "Full abort, cleanup manually!!") + raise errors.OpExecError("Failed to create volume on primary!" + " Full abort, cleanup manually!!") # the device exists now # call the primary node to add the mirror to md logger.Info("adding new mirror component to md") - if not rpc.call_blockdev_addchild(instance.primary_node, dev, - new_drbd): + if not rpc.call_blockdev_addchildren(instance.primary_node, dev, + [new_drbd]): logger.Error("Can't add mirror compoment to md!") cfg.SetDiskID(new_drbd, remote_node) if not rpc.call_blockdev_remove(remote_node, new_drbd): @@ -3336,7 +3472,7 @@ class LUReplaceDisks(LogicalUnit): # this can fail as the old devices are degraded and _WaitForSync # does a combined result over all disks, so we don't check its # return value - _WaitForSync(cfg, instance, unlock=True) + _WaitForSync(cfg, instance, self.proc, unlock=True) # so check manually all the devices for name in iv_names: @@ -3354,8 +3490,8 @@ class LUReplaceDisks(LogicalUnit): dev, child, new_drbd = iv_names[name] logger.Info("remove mirror %s component" % name) cfg.SetDiskID(dev, instance.primary_node) - if not rpc.call_blockdev_removechild(instance.primary_node, - dev, child): + if not rpc.call_blockdev_removechildren(instance.primary_node, + dev, [child]): logger.Error("Can't remove child from mirror, aborting" " *this device cleanup*.\nYou need to cleanup manually!!") continue @@ -3371,6 +3507,358 @@ class LUReplaceDisks(LogicalUnit): cfg.AddInstance(instance) + def _ExecD8DiskOnly(self, feedback_fn): + """Replace a disk on the primary or secondary for dbrd8. + + The algorithm for replace is quite complicated: + - for each disk to be replaced: + - create new LVs on the target node with unique names + - detach old LVs from the drbd device + - rename old LVs to name_replaced. + - rename new LVs to old LVs + - attach the new LVs (with the old names now) to the drbd device + - wait for sync across all devices + - for each modified disk: + - remove old LVs (which have the name name_replaces.) + + Failures are not very well handled. + + """ + steps_total = 6 + warning, info = (self.proc.LogWarning, self.proc.LogInfo) + instance = self.instance + iv_names = {} + vgname = self.cfg.GetVGName() + # start of work + cfg = self.cfg + tgt_node = self.tgt_node + oth_node = self.oth_node + + # Step: check device activation + self.proc.LogStep(1, steps_total, "check device existence") + info("checking volume groups") + my_vg = cfg.GetVGName() + results = rpc.call_vg_list([oth_node, tgt_node]) + if not results: + raise errors.OpExecError("Can't list volume groups on the nodes") + for node in oth_node, tgt_node: + res = results.get(node, False) + if not res or my_vg not in res: + raise errors.OpExecError("Volume group '%s' not found on %s" % + (my_vg, node)) + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + for node in tgt_node, oth_node: + info("checking %s on %s" % (dev.iv_name, node)) + cfg.SetDiskID(dev, node) + if not rpc.call_blockdev_find(node, dev): + raise errors.OpExecError("Can't find device %s on node %s" % + (dev.iv_name, node)) + + # Step: check other node consistency + self.proc.LogStep(2, steps_total, "check peer consistency") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s consistency on %s" % (dev.iv_name, oth_node)) + if not _CheckDiskConsistency(self.cfg, dev, oth_node, + oth_node==instance.primary_node): + raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe" + " to replace disks on this node (%s)" % + (oth_node, tgt_node)) + + # Step: create new storage + self.proc.LogStep(3, steps_total, "allocate new storage") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + size = dev.size + cfg.SetDiskID(dev, tgt_node) + lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]] + names = _GenerateUniqueNames(cfg, lv_names) + lv_data = objects.Disk(dev_type=constants.LD_LV, size=size, + logical_id=(vgname, names[0])) + lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128, + logical_id=(vgname, names[1])) + new_lvs = [lv_data, lv_meta] + old_lvs = dev.children + iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) + info("creating new local storage on %s for %s" % + (tgt_node, dev.iv_name)) + # since we *always* want to create this LV, we use the + # _Create...OnPrimary (which forces the creation), even if we + # are talking about the secondary node + for new_lv in new_lvs: + if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new LV named '%s' on" + " node '%s'" % + (new_lv.logical_id[1], tgt_node)) + + # Step: for each lv, detach+rename*2+attach + self.proc.LogStep(4, steps_total, "change drbd configuration") + for dev, old_lvs, new_lvs in iv_names.itervalues(): + info("detaching %s drbd from local storage" % dev.iv_name) + if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs): + raise errors.OpExecError("Can't detach drbd from local storage on node" + " %s for device %s" % (tgt_node, dev.iv_name)) + #dev.children = [] + #cfg.Update(instance) + + # ok, we created the new LVs, so now we know we have the needed + # storage; as such, we proceed on the target node to rename + # old_lv to _old, and new_lv to old_lv; note that we rename LVs + # using the assumption that logical_id == physical_id (which in + # turn is the unique_id on that node) + + # FIXME(iustin): use a better name for the replaced LVs + temp_suffix = int(time.time()) + ren_fn = lambda d, suff: (d.physical_id[0], + d.physical_id[1] + "_replaced-%s" % suff) + # build the rename list based on what LVs exist on the node + rlist = [] + for to_ren in old_lvs: + find_res = rpc.call_blockdev_find(tgt_node, to_ren) + if find_res is not None: # device exists + rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) + + info("renaming the old LVs on the target node") + if not rpc.call_blockdev_rename(tgt_node, rlist): + raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node) + # now we rename the new LVs to the old LVs + info("renaming the new LVs on the target node") + rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)] + if not rpc.call_blockdev_rename(tgt_node, rlist): + raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node) + + for old, new in zip(old_lvs, new_lvs): + new.logical_id = old.logical_id + cfg.SetDiskID(new, tgt_node) + + for disk in old_lvs: + disk.logical_id = ren_fn(disk, temp_suffix) + cfg.SetDiskID(disk, tgt_node) + + # now that the new lvs have the old name, we can add them to the device + info("adding new mirror component on %s" % tgt_node) + if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs): + for new_lv in new_lvs: + if not rpc.call_blockdev_remove(tgt_node, new_lv): + warning("Can't rollback device %s", hint="manually cleanup unused" + " logical volumes") + raise errors.OpExecError("Can't add local storage to drbd") + + dev.children = new_lvs + cfg.Update(instance) + + # Step: wait for sync + + # this can fail as the old devices are degraded and _WaitForSync + # does a combined result over all disks, so we don't check its + # return value + self.proc.LogStep(5, steps_total, "sync devices") + _WaitForSync(cfg, instance, self.proc, unlock=True) + + # so check manually all the devices + for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + cfg.SetDiskID(dev, instance.primary_node) + is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5] + if is_degr: + raise errors.OpExecError("DRBD device %s is degraded!" % name) + + # Step: remove old storage + self.proc.LogStep(6, steps_total, "removing old storage") + for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + info("remove logical volumes for %s" % name) + for lv in old_lvs: + cfg.SetDiskID(lv, tgt_node) + if not rpc.call_blockdev_remove(tgt_node, lv): + warning("Can't remove old LV", hint="manually remove unused LVs") + continue + + def _ExecD8Secondary(self, feedback_fn): + """Replace the secondary node for drbd8. + + The algorithm for replace is quite complicated: + - for all disks of the instance: + - create new LVs on the new node with same names + - shutdown the drbd device on the old secondary + - disconnect the drbd network on the primary + - create the drbd device on the new secondary + - network attach the drbd on the primary, using an artifice: + the drbd code for Attach() will connect to the network if it + finds a device which is connected to the good local disks but + not network enabled + - wait for sync across all devices + - remove all disks from the old secondary + + Failures are not very well handled. + + """ + steps_total = 6 + warning, info = (self.proc.LogWarning, self.proc.LogInfo) + instance = self.instance + iv_names = {} + vgname = self.cfg.GetVGName() + # start of work + cfg = self.cfg + old_node = self.tgt_node + new_node = self.new_node + pri_node = instance.primary_node + + # Step: check device activation + self.proc.LogStep(1, steps_total, "check device existence") + info("checking volume groups") + my_vg = cfg.GetVGName() + results = rpc.call_vg_list([pri_node, new_node]) + if not results: + raise errors.OpExecError("Can't list volume groups on the nodes") + for node in pri_node, new_node: + res = results.get(node, False) + if not res or my_vg not in res: + raise errors.OpExecError("Volume group '%s' not found on %s" % + (my_vg, node)) + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s on %s" % (dev.iv_name, pri_node)) + cfg.SetDiskID(dev, pri_node) + if not rpc.call_blockdev_find(pri_node, dev): + raise errors.OpExecError("Can't find device %s on node %s" % + (dev.iv_name, pri_node)) + + # Step: check other node consistency + self.proc.LogStep(2, steps_total, "check peer consistency") + for dev in instance.disks: + if not dev.iv_name in self.op.disks: + continue + info("checking %s consistency on %s" % (dev.iv_name, pri_node)) + if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True): + raise errors.OpExecError("Primary node (%s) has degraded storage," + " unsafe to replace the secondary" % + pri_node) + + # Step: create new storage + self.proc.LogStep(3, steps_total, "allocate new storage") + for dev in instance.disks: + size = dev.size + info("adding new local storage on %s for %s" % (new_node, dev.iv_name)) + # since we *always* want to create this LV, we use the + # _Create...OnPrimary (which forces the creation), even if we + # are talking about the secondary node + for new_lv in dev.children: + if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new LV named '%s' on" + " node '%s'" % + (new_lv.logical_id[1], new_node)) + + iv_names[dev.iv_name] = (dev, dev.children) + + self.proc.LogStep(4, steps_total, "changing drbd configuration") + for dev in instance.disks: + size = dev.size + info("activating a new drbd on %s for %s" % (new_node, dev.iv_name)) + # create new devices on new_node + new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, + logical_id=(pri_node, new_node, + dev.logical_id[2]), + children=dev.children) + if not _CreateBlockDevOnSecondary(cfg, new_node, instance, + new_drbd, False, + _GetInstanceInfoText(instance)): + raise errors.OpExecError("Failed to create new DRBD on" + " node '%s'" % new_node) + + for dev in instance.disks: + # we have new devices, shutdown the drbd on the old secondary + info("shutting down drbd for %s on old node" % dev.iv_name) + cfg.SetDiskID(dev, old_node) + if not rpc.call_blockdev_shutdown(old_node, dev): + warning("Failed to shutdown drbd for %s on old node" % dev.iv_name, + hint="Please cleanup this device manually as soon as possible") + + info("detaching primary drbds from the network (=> standalone)") + done = 0 + for dev in instance.disks: + cfg.SetDiskID(dev, pri_node) + # set the physical (unique in bdev terms) id to None, meaning + # detach from network + dev.physical_id = (None,) * len(dev.physical_id) + # and 'find' the device, which will 'fix' it to match the + # standalone state + if rpc.call_blockdev_find(pri_node, dev): + done += 1 + else: + warning("Failed to detach drbd %s from network, unusual case" % + dev.iv_name) + + if not done: + # no detaches succeeded (very unlikely) + raise errors.OpExecError("Can't detach at least one DRBD from old node") + + # if we managed to detach at least one, we update all the disks of + # the instance to point to the new secondary + info("updating instance configuration") + for dev in instance.disks: + dev.logical_id = (pri_node, new_node) + dev.logical_id[2:] + cfg.SetDiskID(dev, pri_node) + cfg.Update(instance) + + # and now perform the drbd attach + info("attaching primary drbds to new secondary (standalone => connected)") + failures = [] + for dev in instance.disks: + info("attaching primary drbd for %s to new secondary node" % dev.iv_name) + # since the attach is smart, it's enough to 'find' the device, + # it will automatically activate the network, if the physical_id + # is correct + cfg.SetDiskID(dev, pri_node) + if not rpc.call_blockdev_find(pri_node, dev): + warning("can't attach drbd %s to new secondary!" % dev.iv_name, + "please do a gnt-instance info to see the status of disks") + + # this can fail as the old devices are degraded and _WaitForSync + # does a combined result over all disks, so we don't check its + # return value + self.proc.LogStep(5, steps_total, "sync devices") + _WaitForSync(cfg, instance, self.proc, unlock=True) + + # so check manually all the devices + for name, (dev, old_lvs) in iv_names.iteritems(): + cfg.SetDiskID(dev, pri_node) + is_degr = rpc.call_blockdev_find(pri_node, dev)[5] + if is_degr: + raise errors.OpExecError("DRBD device %s is degraded!" % name) + + self.proc.LogStep(6, steps_total, "removing old storage") + for name, (dev, old_lvs) in iv_names.iteritems(): + info("remove logical volumes for %s" % name) + for lv in old_lvs: + cfg.SetDiskID(lv, old_node) + if not rpc.call_blockdev_remove(old_node, lv): + warning("Can't remove LV on old secondary", + hint="Cleanup stale volumes by hand") + + def Exec(self, feedback_fn): + """Execute disk replacement. + + This dispatches the disk replacement to the appropriate handler. + + """ + instance = self.instance + if instance.disk_template == constants.DT_REMOTE_RAID1: + fn = self._ExecRR1 + elif instance.disk_template == constants.DT_DRBD8: + if self.op.remote_node is None: + fn = self._ExecD8DiskOnly + else: + fn = self._ExecD8Secondary + else: + raise errors.ProgrammerError("Unhandled disk replacement case") + return fn(feedback_fn) + class LUQueryInstanceData(NoHooksLU): """Query runtime instance data. @@ -3393,7 +3881,7 @@ class LUQueryInstanceData(NoHooksLU): instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name)) if instance is None: raise errors.OpPrereqError("No such instance name '%s'" % name) - self.wanted_instances.append(instance) + self.wanted_instances.append(instance) else: self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name in self.cfg.GetInstanceList()] @@ -3406,7 +3894,7 @@ class LUQueryInstanceData(NoHooksLU): """ self.cfg.SetDiskID(dev, instance.primary_node) dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev) - if dev.dev_type == "drbd": + if dev.dev_type in constants.LDS_DRBD: # we change the snode then (otherwise we use the one passed in) if dev.logical_id[0] == instance.primary_node: snode = dev.logical_id[1] @@ -3465,6 +3953,11 @@ class LUQueryInstanceData(NoHooksLU): "memory": instance.memory, "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics], "disks": disks, + "network_port": instance.network_port, + "vcpus": instance.vcpus, + "kernel_path": instance.kernel_path, + "initrd_path": instance.initrd_path, + "hvm_boot_order": instance.hvm_boot_order, } result[instance.name] = idict @@ -3472,7 +3965,7 @@ class LUQueryInstanceData(NoHooksLU): return result -class LUSetInstanceParms(LogicalUnit): +class LUSetInstanceParams(LogicalUnit): """Modifies an instances's parameters. """ @@ -3491,7 +3984,7 @@ class LUSetInstanceParms(LogicalUnit): args['memory'] = self.mem if self.vcpus: args['vcpus'] = self.vcpus - if self.do_ip or self.do_bridge: + if self.do_ip or self.do_bridge or self.mac: if self.do_ip: ip = self.ip else: @@ -3500,7 +3993,11 @@ class LUSetInstanceParms(LogicalUnit): bridge = self.bridge else: bridge = self.instance.nics[0].bridge - args['nics'] = [(ip, bridge)] + if self.mac: + mac = self.mac + else: + mac = self.instance.nics[0].mac + args['nics'] = [(ip, bridge, mac)] env = _BuildInstanceHookEnvByObject(self.instance, override=args) nl = [self.sstore.GetMasterNode(), self.instance.primary_node] + list(self.instance.secondary_nodes) @@ -3515,8 +4012,14 @@ class LUSetInstanceParms(LogicalUnit): self.mem = getattr(self.op, "mem", None) self.vcpus = getattr(self.op, "vcpus", None) self.ip = getattr(self.op, "ip", None) + self.mac = getattr(self.op, "mac", None) self.bridge = getattr(self.op, "bridge", None) - if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4: + self.kernel_path = getattr(self.op, "kernel_path", None) + self.initrd_path = getattr(self.op, "initrd_path", None) + self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None) + all_params = [self.mem, self.vcpus, self.ip, self.bridge, self.mac, + self.kernel_path, self.initrd_path, self.hvm_boot_order] + if all_params.count(None) == len(all_params): raise errors.OpPrereqError("No changes submitted") if self.mem is not None: try: @@ -3538,6 +4041,42 @@ class LUSetInstanceParms(LogicalUnit): else: self.do_ip = False self.do_bridge = (self.bridge is not None) + if self.mac is not None: + if self.cfg.IsMacInUse(self.mac): + raise errors.OpPrereqError('MAC address %s already in use in cluster' % + self.mac) + if not utils.IsValidMac(self.mac): + raise errors.OpPrereqError('Invalid MAC address %s' % self.mac) + + if self.kernel_path is not None: + self.do_kernel_path = True + if self.kernel_path == constants.VALUE_NONE: + raise errors.OpPrereqError("Can't set instance to no kernel") + + if self.kernel_path != constants.VALUE_DEFAULT: + if not os.path.isabs(self.kernel_path): + raise errors.OpPrereqError("The kernel path must be an absolute" + " filename") + else: + self.do_kernel_path = False + + if self.initrd_path is not None: + self.do_initrd_path = True + if self.initrd_path not in (constants.VALUE_NONE, + constants.VALUE_DEFAULT): + if not os.path.isabs(self.initrd_path): + raise errors.OpPrereqError("The initrd path must be an absolute" + " filename") + else: + self.do_initrd_path = False + + # boot order verification + if self.hvm_boot_order is not None: + if self.hvm_boot_order != constants.VALUE_DEFAULT: + if len(self.hvm_boot_order.strip("acdn")) != 0: + raise errors.OpPrereqError("invalid boot order specified," + " must be one or more of [acdn]" + " or 'default'") instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) @@ -3567,6 +4106,21 @@ class LUSetInstanceParms(LogicalUnit): if self.bridge: instance.nics[0].bridge = self.bridge result.append(("bridge", self.bridge)) + if self.mac: + instance.nics[0].mac = self.mac + result.append(("mac", self.mac)) + if self.do_kernel_path: + instance.kernel_path = self.kernel_path + result.append(("kernel_path", self.kernel_path)) + if self.do_initrd_path: + instance.initrd_path = self.initrd_path + result.append(("initrd_path", self.initrd_path)) + if self.hvm_boot_order: + if self.hvm_boot_order == constants.VALUE_DEFAULT: + instance.hvm_boot_order = None + else: + instance.hvm_boot_order = self.hvm_boot_order + result.append(("hvm_boot_order", self.hvm_boot_order)) self.cfg.AddInstance(instance) @@ -3648,10 +4202,11 @@ class LUExportInstance(LogicalUnit): instance = self.instance dst_node = self.dst_node src_node = instance.primary_node - # shutdown the instance, unless requested not to do so if self.op.shutdown: - op = opcodes.OpShutdownInstance(instance_name=instance.name) - self.processor.ChainOpCode(op, feedback_fn) + # shutdown the instance, but not the disks + if not rpc.call_instance_shutdown(src_node, instance): + raise errors.OpExecError("Could not shutdown instance %s on node %s" % + (instance.name, source_node)) vgname = self.cfg.GetVGName() @@ -3667,17 +4222,17 @@ class LUExportInstance(LogicalUnit): logger.Error("could not snapshot block device %s on node %s" % (disk.logical_id[1], src_node)) else: - new_dev = objects.Disk(dev_type="lvm", size=disk.size, + new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, logical_id=(vgname, new_dev_name), physical_id=(vgname, new_dev_name), iv_name=disk.iv_name) snap_disks.append(new_dev) finally: - if self.op.shutdown: - op = opcodes.OpStartupInstance(instance_name=instance.name, - force=False) - self.processor.ChainOpCode(op, feedback_fn) + if self.op.shutdown and instance.status == "up": + if not rpc.call_instance_start(src_node, instance, None): + _ShutdownInstanceDisks(instance, self.cfg) + raise errors.OpExecError("Could not start instance") # TODO: check for size @@ -3703,7 +4258,7 @@ class LUExportInstance(LogicalUnit): # substitutes an empty list with the full cluster node list. if nodelist: op = opcodes.OpQueryExports(nodes=nodelist) - exportlist = self.processor.ChainOpCode(op, feedback_fn) + exportlist = self.proc.ChainOpCode(op) for node in exportlist: if instance.name in exportlist[node]: if not rpc.call_export_remove(node, instance.name): @@ -3731,7 +4286,7 @@ class TagsLU(NoHooksLU): self.op.name = name self.target = self.cfg.GetNodeInfo(name) elif self.op.kind == constants.TAG_INSTANCE: - name = self.cfg.ExpandInstanceName(name) + name = self.cfg.ExpandInstanceName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid instance name (%s)" % (self.op.name,)) @@ -3755,11 +4310,47 @@ class LUGetTags(TagsLU): return self.target.GetTags() -class LUAddTag(TagsLU): +class LUSearchTags(NoHooksLU): + """Searches the tags for a given pattern. + + """ + _OP_REQP = ["pattern"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks the pattern passed for validity by compiling it. + + """ + try: + self.re = re.compile(self.op.pattern) + except re.error, err: + raise errors.OpPrereqError("Invalid search pattern '%s': %s" % + (self.op.pattern, err)) + + def Exec(self, feedback_fn): + """Returns the tag list. + + """ + cfg = self.cfg + tgts = [("/cluster", cfg.GetClusterInfo())] + ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()] + tgts.extend([("/instances/%s" % i.name, i) for i in ilist]) + nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()] + tgts.extend([("/nodes/%s" % n.name, n) for n in nlist]) + results = [] + for path, target in tgts: + for tag in target.GetTags(): + if self.re.search(tag): + results.append((path, tag)) + return results + + +class LUAddTags(TagsLU): """Sets a tag on a given object. """ - _OP_REQP = ["kind", "name", "tag"] + _OP_REQP = ["kind", "name", "tags"] def CheckPrereq(self): """Check prerequisites. @@ -3768,14 +4359,16 @@ class LUAddTag(TagsLU): """ TagsLU.CheckPrereq(self) - objects.TaggableObject.ValidateTag(self.op.tag) + for tag in self.op.tags: + objects.TaggableObject.ValidateTag(tag) def Exec(self, feedback_fn): """Sets the tag. """ try: - self.target.AddTag(self.op.tag) + for tag in self.op.tags: + self.target.AddTag(tag) except errors.TagError, err: raise errors.OpExecError("Error while setting tag: %s" % str(err)) try: @@ -3786,11 +4379,11 @@ class LUAddTag(TagsLU): " aborted. Please retry.") -class LUDelTag(TagsLU): - """Delete a tag from a given object. +class LUDelTags(TagsLU): + """Delete a list of tags from a given object. """ - _OP_REQP = ["kind", "name", "tag"] + _OP_REQP = ["kind", "name", "tags"] def CheckPrereq(self): """Check prerequisites. @@ -3799,18 +4392,62 @@ class LUDelTag(TagsLU): """ TagsLU.CheckPrereq(self) - objects.TaggableObject.ValidateTag(self.op.tag) - if self.op.tag not in self.target.GetTags(): - raise errors.OpPrereqError("Tag not found") + for tag in self.op.tags: + objects.TaggableObject.ValidateTag(tag) + del_tags = frozenset(self.op.tags) + cur_tags = self.target.GetTags() + if not del_tags <= cur_tags: + diff_tags = del_tags - cur_tags + diff_names = ["'%s'" % tag for tag in diff_tags] + diff_names.sort() + raise errors.OpPrereqError("Tag(s) %s not found" % + (",".join(diff_names))) def Exec(self, feedback_fn): """Remove the tag from the object. """ - self.target.RemoveTag(self.op.tag) + for tag in self.op.tags: + self.target.RemoveTag(tag) try: self.cfg.Update(self.target) except errors.ConfigurationError: raise errors.OpRetryError("There has been a modification to the" " config file and the operation has been" " aborted. Please retry.") + +class LUTestDelay(NoHooksLU): + """Sleep for a specified amount of time. + + This LU sleeps on the master and/or nodes for a specified amoutn of + time. + + """ + _OP_REQP = ["duration", "on_master", "on_nodes"] + + def CheckPrereq(self): + """Check prerequisites. + + This checks that we have a good list of nodes and/or the duration + is valid. + + """ + + if self.op.on_nodes: + self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes) + + def Exec(self, feedback_fn): + """Do the actual sleep. + + """ + if self.op.on_master: + if not utils.TestDelay(self.op.duration): + raise errors.OpExecError("Error during master delay test") + if self.op.on_nodes: + result = rpc.call_test_delay(self.op.on_nodes, self.op.duration) + if not result: + raise errors.OpExecError("Complete failure from rpc call") + for node, node_result in result.items(): + if not node_result: + raise errors.OpExecError("Failure during rpc call to node %s," + " result: %s" % (node, node_result))