X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/397693d324502596c287010f3bd50c27f9358687..57c7bc57723715fb20c46cf83337558d90c28054:/lib/backend.py diff --git a/lib/backend.py b/lib/backend.py index 8f53364..ed60340 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -28,7 +28,7 @@ """ -# pylint: disable-msg=E1103 +# pylint: disable=E1103 # E1103: %s %r has no %r member (but some types could not be # inferred), because the _TryOSFromDisk returns either (True, os_obj) @@ -60,6 +60,8 @@ from ganeti import ssconf from ganeti import serializer from ganeti import netutils from ganeti import runtime +from ganeti import mcpu +from ganeti import compat _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id" @@ -196,6 +198,8 @@ def _BuildUploadFileList(): constants.SSH_KNOWN_HOSTS_FILE, constants.VNC_PASSWORD_FILE, constants.RAPI_CERT_FILE, + constants.SPICE_CERT_FILE, + constants.SPICE_CACERT_FILE, constants.RAPI_USERS_FILE, constants.CONFD_HMAC_KEY, constants.CLUSTER_DOMAIN_SECRET_FILE, @@ -203,7 +207,7 @@ def _BuildUploadFileList(): for hv_name in constants.HYPER_TYPES: hv_class = hypervisor.GetHypervisorClass(hv_name) - allowed_files.update(hv_class.GetAncillaryFiles()) + allowed_files.update(hv_class.GetAncillaryFiles()[0]) return frozenset(allowed_files) @@ -229,7 +233,8 @@ def GetMasterInfo(): for consumption here or from the node daemon. @rtype: tuple - @return: master_netdev, master_ip, master_name, primary_ip_family + @return: master_netdev, master_ip, master_name, primary_ip_family, + master_netmask @raise RPCFail: in case of errors """ @@ -237,125 +242,222 @@ def GetMasterInfo(): cfg = _GetConfig() master_netdev = cfg.GetMasterNetdev() master_ip = cfg.GetMasterIP() + master_netmask = cfg.GetMasterNetmask() master_node = cfg.GetMasterNode() primary_ip_family = cfg.GetPrimaryIPFamily() except errors.ConfigurationError, err: _Fail("Cluster configuration incomplete: %s", err, exc=True) - return (master_netdev, master_ip, master_node, primary_ip_family) + return (master_netdev, master_ip, master_node, primary_ip_family, + master_netmask) -def StartMaster(start_daemons, no_voting): - """Activate local node as master node. +def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn): + """Decorator that runs hooks before and after the decorated function. - The function will either try activate the IP address of the master - (unless someone else has it) or also start the master daemons, based - on the start_daemons parameter. + @type hook_opcode: string + @param hook_opcode: opcode of the hook + @type hooks_path: string + @param hooks_path: path of the hooks + @type env_builder_fn: function + @param env_builder_fn: function that returns a dictionary containing the + environment variables for the hooks. Will get all the parameters of the + decorated function. + @raise RPCFail: in case of pre-hook failure - @type start_daemons: boolean - @param start_daemons: whether to start the master daemons - (ganeti-masterd and ganeti-rapi), or (if false) activate the - master ip - @type no_voting: boolean - @param no_voting: whether to start ganeti-masterd without a node vote - (if start_daemons is True), but still non-interactively - @rtype: None + """ + def decorator(fn): + def wrapper(*args, **kwargs): + _, myself = ssconf.GetMasterAndMyself() + nodes = ([myself], [myself]) # these hooks run locally + + env_fn = compat.partial(env_builder_fn, *args, **kwargs) + + cfg = _GetConfig() + hr = HooksRunner() + hm = mcpu.HooksMaster(hook_opcode, hooks_path, nodes, hr.RunLocalHooks, + None, env_fn, logging.warning, cfg.GetClusterName(), + cfg.GetMasterNode()) + + hm.RunPhase(constants.HOOKS_PHASE_PRE) + result = fn(*args, **kwargs) + hm.RunPhase(constants.HOOKS_PHASE_POST) + + return result + return wrapper + return decorator + + +def _BuildMasterIpEnv(master_params, use_external_mip_script=None): + """Builds environment variables for master IP hooks. + + @type master_params: L{objects.MasterNetworkParameters} + @param master_params: network parameters of the master + @type use_external_mip_script: boolean + @param use_external_mip_script: whether to use an external master IP + address setup script (unused, but necessary per the implementation of the + _RunLocalHooks decorator) """ - # GetMasterInfo will raise an exception if not able to return data - master_netdev, master_ip, _, family = GetMasterInfo() + # pylint: disable=W0613 + ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family) + env = { + "MASTER_NETDEV": master_params.netdev, + "MASTER_IP": master_params.ip, + "MASTER_NETMASK": master_params.netmask, + "CLUSTER_IP_VERSION": str(ver), + } - err_msgs = [] - # either start the master and rapi daemons - if start_daemons: - if no_voting: - masterd_args = "--no-voting --yes-do-it" - else: - masterd_args = "" + return env - env = { - "EXTRA_MASTERD_ARGS": masterd_args, - } - result = utils.RunCmd([constants.DAEMON_UTIL, "start-master"], env=env) - if result.failed: - msg = "Can't start Ganeti master: %s" % result.output - logging.error(msg) - err_msgs.append(msg) - # or activate the IP - else: - if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT): - if netutils.IPAddress.Own(master_ip): - # we already have the ip: - logging.debug("Master IP already configured, doing nothing") - else: - msg = "Someone else has the master ip, not activating" - logging.error(msg) - err_msgs.append(msg) +@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup", + _BuildMasterIpEnv) +def ActivateMasterIp(master_params, use_external_mip_script): + """Activate the IP address of the master daemon. + + @type master_params: L{objects.MasterNetworkParameters} + @param master_params: network parameters of the master + @type use_external_mip_script: boolean + @param use_external_mip_script: whether to use an external master IP + address setup script + + """ + # pylint: disable=W0613 + err_msg = None + if netutils.TcpPing(master_params.ip, constants.DEFAULT_NODED_PORT): + if netutils.IPAddress.Own(master_params.ip): + # we already have the ip: + logging.debug("Master IP already configured, doing nothing") else: - ipcls = netutils.IP4Address - if family == netutils.IP6Address.family: - ipcls = netutils.IP6Address - - result = utils.RunCmd(["ip", "address", "add", - "%s/%d" % (master_ip, ipcls.iplen), - "dev", master_netdev, "label", - "%s:0" % master_netdev]) - if result.failed: - msg = "Can't activate master IP: %s" % result.output - logging.error(msg) - err_msgs.append(msg) + err_msg = "Someone else has the master ip, not activating" + logging.error(err_msg) + else: + ipcls = netutils.IPAddress.GetClassFromIpFamily(master_params.ip_family) + + result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add", + "%s/%s" % (master_params.ip, master_params.netmask), + "dev", master_params.netdev, "label", + "%s:0" % master_params.netdev]) + if result.failed: + err_msg = "Can't activate master IP: %s" % result.output + logging.error(err_msg) + else: # we ignore the exit code of the following cmds if ipcls == netutils.IP4Address: - utils.RunCmd(["arping", "-q", "-U", "-c 3", "-I", master_netdev, "-s", - master_ip, master_ip]) + utils.RunCmd(["arping", "-q", "-U", "-c 3", "-I", master_params.netdev, + "-s", master_params.ip, master_params.ip]) elif ipcls == netutils.IP6Address: try: - utils.RunCmd(["ndisc6", "-q", "-r 3", master_ip, master_netdev]) + utils.RunCmd(["ndisc6", "-q", "-r 3", master_params.ip, + master_params.netdev]) except errors.OpExecError: # TODO: Better error reporting logging.warning("Can't execute ndisc6, please install if missing") - if err_msgs: - _Fail("; ".join(err_msgs)) + if err_msg: + _Fail(err_msg) -def StopMaster(stop_daemons): - """Deactivate this node as master. +def StartMasterDaemons(no_voting): + """Activate local node as master node. - The function will always try to deactivate the IP address of the - master. It will also stop the master daemons depending on the - stop_daemons parameter. + The function will start the master daemons (ganeti-masterd and ganeti-rapi). - @type stop_daemons: boolean - @param stop_daemons: whether to also stop the master daemons - (ganeti-masterd and ganeti-rapi) + @type no_voting: boolean + @param no_voting: whether to start ganeti-masterd without a node vote + but still non-interactively @rtype: None """ - # TODO: log and report back to the caller the error failures; we - # need to decide in which case we fail the RPC for this - # GetMasterInfo will raise an exception if not able to return data - master_netdev, master_ip, _, family = GetMasterInfo() + if no_voting: + masterd_args = "--no-voting --yes-do-it" + else: + masterd_args = "" - ipcls = netutils.IP4Address - if family == netutils.IP6Address.family: - ipcls = netutils.IP6Address + env = { + "EXTRA_MASTERD_ARGS": masterd_args, + } - result = utils.RunCmd(["ip", "address", "del", - "%s/%d" % (master_ip, ipcls.iplen), - "dev", master_netdev]) + result = utils.RunCmd([constants.DAEMON_UTIL, "start-master"], env=env) + if result.failed: + msg = "Can't start Ganeti master: %s" % result.output + logging.error(msg) + _Fail(msg) + + +@RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown", + _BuildMasterIpEnv) +def DeactivateMasterIp(master_params, use_external_mip_script): + """Deactivate the master IP on this node. + + @type master_params: L{objects.MasterNetworkParameters} + @param master_params: network parameters of the master + @type use_external_mip_script: boolean + @param use_external_mip_script: whether to use an external master IP + address setup script + + """ + # pylint: disable=W0613 + # TODO: log and report back to the caller the error failures; we + # need to decide in which case we fail the RPC for this + + result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del", + "%s/%s" % (master_params.ip, master_params.netmask), + "dev", master_params.netdev]) if result.failed: logging.error("Can't remove the master IP, error: %s", result.output) # but otherwise ignore the failure - if stop_daemons: - result = utils.RunCmd([constants.DAEMON_UTIL, "stop-master"]) - if result.failed: - logging.error("Could not stop Ganeti master, command %s had exitcode %s" - " and error %s", - result.cmd, result.exit_code, result.output) + +def StopMasterDaemons(): + """Stop the master daemons on this node. + + Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node. + + @rtype: None + + """ + # TODO: log and report back to the caller the error failures; we + # need to decide in which case we fail the RPC for this + + result = utils.RunCmd([constants.DAEMON_UTIL, "stop-master"]) + if result.failed: + logging.error("Could not stop Ganeti master, command %s had exitcode %s" + " and error %s", + result.cmd, result.exit_code, result.output) + + +def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev): + """Change the netmask of the master IP. + + @param old_netmask: the old value of the netmask + @param netmask: the new value of the netmask + @param master_ip: the master IP + @param master_netdev: the master network device + + """ + if old_netmask == netmask: + return + + if not netutils.IPAddress.Own(master_ip): + _Fail("The master IP address is not up, not attempting to change its" + " netmask") + + result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add", + "%s/%s" % (master_ip, netmask), + "dev", master_netdev, "label", + "%s:0" % master_netdev]) + if result.failed: + _Fail("Could not set the new netmask on the master IP address") + + result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del", + "%s/%s" % (master_ip, old_netmask), + "dev", master_netdev, "label", + "%s:0" % master_netdev]) + if result.failed: + _Fail("Could not bring down the master IP address with the old netmask") def EtcHostsModify(mode, host, ip): @@ -411,8 +513,10 @@ def LeaveCluster(modify_ssh_setup): try: utils.RemoveFile(constants.CONFD_HMAC_KEY) utils.RemoveFile(constants.RAPI_CERT_FILE) + utils.RemoveFile(constants.SPICE_CERT_FILE) + utils.RemoveFile(constants.SPICE_CACERT_FILE) utils.RemoveFile(constants.NODED_CERT_FILE) - except: # pylint: disable-msg=W0702 + except: # pylint: disable=W0702 logging.exception("Error while removing cluster secrets") result = utils.RunCmd([constants.DAEMON_UTIL, "stop", constants.CONFD]) @@ -421,7 +525,7 @@ def LeaveCluster(modify_ssh_setup): result.cmd, result.exit_code, result.output) # Raise a custom exception (handled in ganeti-noded) - raise errors.QuitGanetiException(True, 'Shutdown scheduled') + raise errors.QuitGanetiException(True, "Shutdown scheduled") def GetNodeInfo(vgname, hypervisor_type): @@ -439,6 +543,7 @@ def GetNodeInfo(vgname, hypervisor_type): - memory_dom0 is the memory allocated for domain0 in MiB - memory_free is the currently available (free) ram in MiB - memory_total is the total number of ram in MiB + - hv_version: the hypervisor version, if available """ outputarray = {} @@ -449,8 +554,8 @@ def GetNodeInfo(vgname, hypervisor_type): if vginfo: vg_free = int(round(vginfo[0][0], 0)) vg_size = int(round(vginfo[0][1], 0)) - outputarray['vg_size'] = vg_size - outputarray['vg_free'] = vg_free + outputarray["vg_size"] = vg_size + outputarray["vg_free"] = vg_free if hypervisor_type is not None: hyper = hypervisor.GetHypervisor(hypervisor_type) @@ -506,17 +611,39 @@ def VerifyNode(what, cluster_name): val = "Error while checking hypervisor: %s" % str(err) tmp[hv_name] = val + if constants.NV_HVPARAMS in what and vm_capable: + result[constants.NV_HVPARAMS] = tmp = [] + for source, hv_name, hvparms in what[constants.NV_HVPARAMS]: + try: + logging.info("Validating hv %s, %s", hv_name, hvparms) + hypervisor.GetHypervisor(hv_name).ValidateParameters(hvparms) + except errors.HypervisorError, err: + tmp.append((source, hv_name, str(err))) + if constants.NV_FILELIST in what: result[constants.NV_FILELIST] = utils.FingerprintFiles( what[constants.NV_FILELIST]) if constants.NV_NODELIST in what: - result[constants.NV_NODELIST] = tmp = {} - random.shuffle(what[constants.NV_NODELIST]) - for node in what[constants.NV_NODELIST]: + (nodes, bynode) = what[constants.NV_NODELIST] + + # Add nodes from other groups (different for each node) + try: + nodes.extend(bynode[my_name]) + except KeyError: + pass + + # Use a random order + random.shuffle(nodes) + + # Try to contact all nodes + val = {} + for node in nodes: success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node) if not success: - tmp[node] = message + val[node] = message + + result[constants.NV_NODELIST] = val if constants.NV_NODENETTEST in what: result[constants.NV_NODENETTEST] = tmp = {} @@ -552,6 +679,11 @@ def VerifyNode(what, cluster_name): result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port, source=source) + if constants.NV_USERSCRIPTS in what: + result[constants.NV_USERSCRIPTS] = \ + [script for script in what[constants.NV_USERSCRIPTS] + if not (os.path.exists(script) and os.access(script, os.X_OK))] + if constants.NV_OOB_PATHS in what: result[constants.NV_OOB_PATHS] = tmp = [] for path in what[constants.NV_OOB_PATHS]: @@ -635,9 +767,51 @@ def VerifyNode(what, cluster_name): if constants.NV_OSLIST in what and vm_capable: result[constants.NV_OSLIST] = DiagnoseOS() + if constants.NV_BRIDGES in what and vm_capable: + result[constants.NV_BRIDGES] = [bridge + for bridge in what[constants.NV_BRIDGES] + if not utils.BridgeExists(bridge)] return result +def GetBlockDevSizes(devices): + """Return the size of the given block devices + + @type devices: list + @param devices: list of block device nodes to query + @rtype: dict + @return: + dictionary of all block devices under /dev (key). The value is their + size in MiB. + + {'/dev/disk/by-uuid/123456-12321231-312312-312': 124} + + """ + DEV_PREFIX = "/dev/" + blockdevs = {} + + for devpath in devices: + if not utils.IsBelowDir(DEV_PREFIX, devpath): + continue + + try: + st = os.stat(devpath) + except EnvironmentError, err: + logging.warning("Error stat()'ing device %s: %s", devpath, str(err)) + continue + + if stat.S_ISBLK(st.st_mode): + result = utils.RunCmd(["blockdev", "--getsize64", devpath]) + if result.failed: + # We don't want to fail, just do not list this device as available + logging.warning("Cannot get size for block device %s", devpath) + continue + + size = int(result.stdout) / (1024 * 1024) + blockdevs[devpath] = size + return blockdevs + + def GetVolumeList(vg_names): """Compute list of logical volumes and their size. @@ -656,7 +830,7 @@ def GetVolumeList(vg_names): """ lvs = {} - sep = '|' + sep = "|" if not vg_names: vg_names = [] result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix", @@ -672,14 +846,14 @@ def GetVolumeList(vg_names): logging.error("Invalid line returned from lvs output: '%s'", line) continue vg_name, name, size, attr = match.groups() - inactive = attr[4] == '-' - online = attr[5] == 'o' - virtual = attr[0] == 'v' + inactive = attr[4] == "-" + online = attr[5] == "o" + virtual = attr[0] == "v" if virtual: # we don't want to report such volumes as existing, since they # don't really hold data continue - lvs[vg_name+"/"+name] = (size, inactive, online) + lvs[vg_name + "/" + name] = (size, inactive, online) return lvs @@ -722,20 +896,20 @@ def NodeVolumes(): result.output) def parse_dev(dev): - return dev.split('(')[0] + return dev.split("(")[0] def handle_dev(dev): return [parse_dev(x) for x in dev.split(",")] def map_line(line): line = [v.strip() for v in line] - return [{'name': line[0], 'size': line[1], - 'dev': dev, 'vg': line[3]} for dev in handle_dev(line[2])] + return [{"name": line[0], "size": line[1], + "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])] all_devs = [] for line in result.stdout.splitlines(): - if line.count('|') >= 3: - all_devs.extend(map_line(line.split('|'))) + if line.count("|") >= 3: + all_devs.extend(map_line(line.split("|"))) else: logging.warning("Strange line in the output from lvs: '%s'", line) return all_devs @@ -800,9 +974,9 @@ def GetInstanceInfo(instance, hname): iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance) if iinfo is not None: - output['memory'] = iinfo[2] - output['state'] = iinfo[4] - output['time'] = iinfo[5] + output["memory"] = iinfo[2] + output["state"] = iinfo[4] + output["time"] = iinfo[5] return output @@ -856,16 +1030,16 @@ def GetAllInstancesInfo(hypervisor_list): if iinfo: for name, _, memory, vcpus, state, times in iinfo: value = { - 'memory': memory, - 'vcpus': vcpus, - 'state': state, - 'time': times, + "memory": memory, + "vcpus": vcpus, + "state": state, + "time": times, } if name in output: # we only check static parameters, like memory and vcpus, # and not state and time which can change between the # invocations of the different hypervisors - for key in 'memory', 'vcpus': + for key in "memory", "vcpus": if value[key] != output[name][key]: _Fail("Instance %s is running twice" " with different parameters", name) @@ -874,7 +1048,7 @@ def GetAllInstancesInfo(hypervisor_list): return output -def _InstanceLogName(kind, os_name, instance): +def _InstanceLogName(kind, os_name, instance, component): """Compute the OS log filename for a given instance and operation. The instance name and os name are passed in as strings since not all @@ -886,11 +1060,19 @@ def _InstanceLogName(kind, os_name, instance): @param os_name: the os name @type instance: string @param instance: the name of the instance being imported/added/etc. + @type component: string or None + @param component: the name of the component of the instance being + transferred """ # TODO: Use tempfile.mkstemp to create unique filename - base = ("%s-%s-%s-%s.log" % - (kind, os_name, instance, utils.TimestampForFilename())) + if component: + assert "/" not in component + c_msg = "-%s" % component + else: + c_msg = "" + base = ("%s-%s-%s%s-%s.log" % + (kind, os_name, instance, c_msg, utils.TimestampForFilename())) return utils.PathJoin(constants.LOG_OS_DIR, base) @@ -910,12 +1092,12 @@ def InstanceOsAdd(instance, reinstall, debug): create_env = OSEnvironment(instance, inst_os, debug) if reinstall: - create_env['INSTANCE_REINSTALL'] = "1" + create_env["INSTANCE_REINSTALL"] = "1" - logfile = _InstanceLogName("add", instance.os, instance.name) + logfile = _InstanceLogName("add", instance.os, instance.name, None) result = utils.RunCmd([inst_os.create_script], env=create_env, - cwd=inst_os.path, output=logfile,) + cwd=inst_os.path, output=logfile, reset_env=True) if result.failed: logging.error("os create command '%s' returned error: %s, logfile: %s," " output: %s", result.cmd, result.fail_reason, logfile, @@ -942,13 +1124,13 @@ def RunRenameInstance(instance, old_name, debug): inst_os = OSFromDisk(instance.os) rename_env = OSEnvironment(instance, inst_os, debug) - rename_env['OLD_INSTANCE_NAME'] = old_name + rename_env["OLD_INSTANCE_NAME"] = old_name logfile = _InstanceLogName("rename", instance.os, - "%s-%s" % (old_name, instance.name)) + "%s-%s" % (old_name, instance.name), None) result = utils.RunCmd([inst_os.rename_script], env=rename_env, - cwd=inst_os.path, output=logfile) + cwd=inst_os.path, output=logfile, reset_env=True) if result.failed: logging.error("os create command '%s' returned error: %s output: %s", @@ -1035,11 +1217,13 @@ def _GatherAndLinkBlockDevs(instance): return block_devices -def StartInstance(instance): +def StartInstance(instance, startup_paused): """Start an instance. @type instance: L{objects.Instance} @param instance: the instance object + @type startup_paused: bool + @param instance: pause instance at startup? @rtype: None """ @@ -1052,7 +1236,7 @@ def StartInstance(instance): try: block_devices = _GatherAndLinkBlockDevs(instance) hyper = hypervisor.GetHypervisor(instance.hypervisor) - hyper.StartInstance(instance, block_devices) + hyper.StartInstance(instance, block_devices, startup_paused) except errors.BlockDeviceError, err: _Fail("Block device error: %s", err, exc=True) except errors.HypervisorError, err: @@ -1164,7 +1348,7 @@ def InstanceReboot(instance, reboot_type, shutdown_timeout): elif reboot_type == constants.INSTANCE_REBOOT_HARD: try: InstanceShutdown(instance, shutdown_timeout) - return StartInstance(instance) + return StartInstance(instance, False) except errors.HypervisorError, err: _Fail("Failed to hard reboot instance %s: %s", instance.name, err) else: @@ -1197,14 +1381,25 @@ def AcceptInstance(instance, info, target): @param target: target host (usually ip), on this node """ + # TODO: why is this required only for DTS_EXT_MIRROR? + if instance.disk_template in constants.DTS_EXT_MIRROR: + # Create the symlinks, as the disks are not active + # in any way + try: + _GatherAndLinkBlockDevs(instance) + except errors.BlockDeviceError, err: + _Fail("Block device error: %s", err, exc=True) + hyper = hypervisor.GetHypervisor(instance.hypervisor) try: hyper.AcceptInstance(instance, info, target) except errors.HypervisorError, err: + if instance.disk_template in constants.DTS_EXT_MIRROR: + _RemoveBlockDevLinks(instance.name, instance.disks) _Fail("Failed to accept instance: %s", err, exc=True) -def FinalizeMigration(instance, info, success): +def FinalizeMigrationDst(instance, info, success): """Finalize any preparation to accept an instance. @type instance: L{objects.Instance} @@ -1217,9 +1412,9 @@ def FinalizeMigration(instance, info, success): """ hyper = hypervisor.GetHypervisor(instance.hypervisor) try: - hyper.FinalizeMigration(instance, info, success) + hyper.FinalizeMigrationDst(instance, info, success) except errors.HypervisorError, err: - _Fail("Failed to finalize migration: %s", err, exc=True) + _Fail("Failed to finalize migration on the target node: %s", err, exc=True) def MigrateInstance(instance, target, live): @@ -1232,10 +1427,7 @@ def MigrateInstance(instance, target, live): @type live: boolean @param live: whether the migration should be done live or not (the interpretation of this parameter is left to the hypervisor) - @rtype: tuple - @return: a tuple of (success, msg) where: - - succes is a boolean denoting the success/failure of the operation - - msg is a string with details in case of failure + @raise RPCFail: if migration fails for some reason """ hyper = hypervisor.GetHypervisor(instance.hypervisor) @@ -1246,6 +1438,46 @@ def MigrateInstance(instance, target, live): _Fail("Failed to migrate instance: %s", err, exc=True) +def FinalizeMigrationSource(instance, success, live): + """Finalize the instance migration on the source node. + + @type instance: L{objects.Instance} + @param instance: the instance definition of the migrated instance + @type success: bool + @param success: whether the migration succeeded or not + @type live: bool + @param live: whether the user requested a live migration or not + @raise RPCFail: If the execution fails for some reason + + """ + hyper = hypervisor.GetHypervisor(instance.hypervisor) + + try: + hyper.FinalizeMigrationSource(instance, success, live) + except Exception, err: # pylint: disable=W0703 + _Fail("Failed to finalize the migration on the source node: %s", err, + exc=True) + + +def GetMigrationStatus(instance): + """Get the migration status + + @type instance: L{objects.Instance} + @param instance: the instance that is being migrated + @rtype: L{objects.MigrationStatus} + @return: the status of the current migration (one of + L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional + progress info that can be retrieved from the hypervisor + @raise RPCFail: If the migration status cannot be retrieved + + """ + hyper = hypervisor.GetHypervisor(instance.hypervisor) + try: + return hyper.GetMigrationStatus(instance) + except Exception, err: # pylint: disable=W0703 + _Fail("Failed to get migration status: %s", err, exc=True) + + def BlockdevCreate(disk, size, owner, on_primary, info): """Creates a block device for an instance. @@ -1267,8 +1499,8 @@ def BlockdevCreate(disk, size, owner, on_primary, info): it's not required to return anything. """ - # TODO: remove the obsolete 'size' argument - # pylint: disable-msg=W0613 + # TODO: remove the obsolete "size" argument + # pylint: disable=W0613 clist = [] if disk.children: for child in disk.children: @@ -1280,7 +1512,7 @@ def BlockdevCreate(disk, size, owner, on_primary, info): # we need the children open in case the device itself has to # be assembled try: - # pylint: disable-msg=E1103 + # pylint: disable=E1103 crdev.Open() except errors.BlockDeviceError, err: _Fail("Can't make child '%s' read-write: %s", child, err) @@ -1483,7 +1715,7 @@ def _RecursiveAssembleBD(disk, owner, as_primary): return result -def BlockdevAssemble(disk, owner, as_primary): +def BlockdevAssemble(disk, owner, as_primary, idx): """Activate a block device for an instance. This is a wrapper over _RecursiveAssembleBD. @@ -1496,10 +1728,14 @@ def BlockdevAssemble(disk, owner, as_primary): try: result = _RecursiveAssembleBD(disk, owner, as_primary) if isinstance(result, bdev.BlockDev): - # pylint: disable-msg=E1103 + # pylint: disable=E1103 result = result.dev_path + if as_primary: + _SymlinkBlockDev(owner, result, idx) except errors.BlockDeviceError, err: _Fail("Error while assembling disk: %s", err, exc=True) + except OSError, err: + _Fail("Error while symlinking disk: %s", err, exc=True) return result @@ -1763,7 +1999,7 @@ def BlockdevExport(disk, dest_node, dest_path, cluster_name): destcmd) # all commands have been checked, so we're safe to combine them - command = '|'.join([expcmd, utils.ShellQuoteArgs(remotecmd)]) + command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)]) result = utils.RunCmd(["bash", "-c", command]) @@ -1784,10 +2020,10 @@ def UploadFile(file_name, data, mode, uid, gid, atime, mtime): @param data: the new contents of the file @type mode: int @param mode: the mode to give the file (can be None) - @type uid: int - @param uid: the owner of the file (can be -1 for default) - @type gid: int - @param gid: the group of the file (can be -1 for default) + @type uid: string + @param uid: the owner of the file + @type gid: string + @param gid: the group of the file @type atime: float @param atime: the atime to set on the file (can be None) @type mtime: float @@ -1804,6 +2040,13 @@ def UploadFile(file_name, data, mode, uid, gid, atime, mtime): raw_data = _Decompress(data) + if not (isinstance(uid, basestring) and isinstance(gid, basestring)): + _Fail("Invalid username/groupname type") + + getents = runtime.GetEnts() + uid = getents.LookupUser(uid) + gid = getents.LookupGroup(gid) + utils.SafeWriteFile(file_name, None, data=raw_data, mode=mode, uid=uid, gid=gid, atime=atime, mtime=mtime) @@ -1850,7 +2093,7 @@ def _ErrnoOrStr(err): @param err: the exception to format """ - if hasattr(err, 'errno'): + if hasattr(err, "errno"): detail = errno.errorcode[err.errno] else: detail = str(err) @@ -1976,23 +2219,28 @@ def _TryOSFromDisk(name, base_dir=None): return False, ("API version mismatch for path '%s': found %s, want %s." % (os_dir, api_versions, constants.OS_API_VERSIONS)) - # OS Files dictionary, we will populate it with the absolute path names - os_files = dict.fromkeys(constants.OS_SCRIPTS) + # OS Files dictionary, we will populate it with the absolute path + # names; if the value is True, then it is a required file, otherwise + # an optional one + os_files = dict.fromkeys(constants.OS_SCRIPTS, True) if max(api_versions) >= constants.OS_API_V15: - os_files[constants.OS_VARIANTS_FILE] = '' + os_files[constants.OS_VARIANTS_FILE] = False if max(api_versions) >= constants.OS_API_V20: - os_files[constants.OS_PARAMETERS_FILE] = '' + os_files[constants.OS_PARAMETERS_FILE] = True else: del os_files[constants.OS_SCRIPT_VERIFY] - for filename in os_files: + for (filename, required) in os_files.items(): os_files[filename] = utils.PathJoin(os_dir, filename) try: st = os.stat(os_files[filename]) except EnvironmentError, err: + if err.errno == errno.ENOENT and not required: + del os_files[filename] + continue return False, ("File '%s' under path '%s' is missing (%s)" % (filename, os_dir, _ErrnoOrStr(err))) @@ -2011,10 +2259,10 @@ def _TryOSFromDisk(name, base_dir=None): try: variants = utils.ReadFile(variants_file).splitlines() except EnvironmentError, err: - return False, ("Error while reading the OS variants file at %s: %s" % - (variants_file, _ErrnoOrStr(err))) - if not variants: - return False, ("No supported os variant found") + # we accept missing files, but not other errors + if err.errno != errno.ENOENT: + return False, ("Error while reading the OS variants file at %s: %s" % + (variants_file, _ErrnoOrStr(err))) parameters = [] if constants.OS_PARAMETERS_FILE in os_files: @@ -2086,20 +2334,22 @@ def OSCoreEnv(os_name, inst_os, os_params, debug=0): result = {} api_version = \ max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions)) - result['OS_API_VERSION'] = '%d' % api_version - result['OS_NAME'] = inst_os.name - result['DEBUG_LEVEL'] = '%d' % debug + result["OS_API_VERSION"] = "%d" % api_version + result["OS_NAME"] = inst_os.name + result["DEBUG_LEVEL"] = "%d" % debug # OS variants - if api_version >= constants.OS_API_V15: + if api_version >= constants.OS_API_V15 and inst_os.supported_variants: variant = objects.OS.GetVariant(os_name) if not variant: variant = inst_os.supported_variants[0] - result['OS_VARIANT'] = variant + else: + variant = "" + result["OS_VARIANT"] = variant # OS params for pname, pvalue in os_params.items(): - result['OSP_%s' % pname.upper()] = pvalue + result["OSP_%s" % pname.upper()] = pvalue return result @@ -2121,39 +2371,41 @@ def OSEnvironment(instance, inst_os, debug=0): """ result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug) - for attr in ["name", "os", "uuid", "ctime", "mtime"]: + for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]: result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr)) - result['HYPERVISOR'] = instance.hypervisor - result['DISK_COUNT'] = '%d' % len(instance.disks) - result['NIC_COUNT'] = '%d' % len(instance.nics) + result["HYPERVISOR"] = instance.hypervisor + result["DISK_COUNT"] = "%d" % len(instance.disks) + result["NIC_COUNT"] = "%d" % len(instance.nics) + result["INSTANCE_SECONDARY_NODES"] = \ + ("%s" % " ".join(instance.secondary_nodes)) # Disks for idx, disk in enumerate(instance.disks): real_disk = _OpenRealBD(disk) - result['DISK_%d_PATH' % idx] = real_disk.dev_path - result['DISK_%d_ACCESS' % idx] = disk.mode + result["DISK_%d_PATH" % idx] = real_disk.dev_path + result["DISK_%d_ACCESS" % idx] = disk.mode if constants.HV_DISK_TYPE in instance.hvparams: - result['DISK_%d_FRONTEND_TYPE' % idx] = \ + result["DISK_%d_FRONTEND_TYPE" % idx] = \ instance.hvparams[constants.HV_DISK_TYPE] if disk.dev_type in constants.LDS_BLOCK: - result['DISK_%d_BACKEND_TYPE' % idx] = 'block' + result["DISK_%d_BACKEND_TYPE" % idx] = "block" elif disk.dev_type == constants.LD_FILE: - result['DISK_%d_BACKEND_TYPE' % idx] = \ - 'file:%s' % disk.physical_id[0] + result["DISK_%d_BACKEND_TYPE" % idx] = \ + "file:%s" % disk.physical_id[0] # NICs for idx, nic in enumerate(instance.nics): - result['NIC_%d_MAC' % idx] = nic.mac + result["NIC_%d_MAC" % idx] = nic.mac if nic.ip: - result['NIC_%d_IP' % idx] = nic.ip - result['NIC_%d_MODE' % idx] = nic.nicparams[constants.NIC_MODE] + result["NIC_%d_IP" % idx] = nic.ip + result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE] if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: - result['NIC_%d_BRIDGE' % idx] = nic.nicparams[constants.NIC_LINK] + result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK] if nic.nicparams[constants.NIC_LINK]: - result['NIC_%d_LINK' % idx] = nic.nicparams[constants.NIC_LINK] + result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK] if constants.HV_NIC_TYPE in instance.hvparams: - result['NIC_%d_FRONTEND_TYPE' % idx] = \ + result["NIC_%d_FRONTEND_TYPE" % idx] = \ instance.hvparams[constants.HV_NIC_TYPE] # HV/BE params @@ -2164,7 +2416,7 @@ def OSEnvironment(instance, inst_os, debug=0): return result -def BlockdevGrow(disk, amount): +def BlockdevGrow(disk, amount, dryrun): """Grow a stack of block devices. This function is called recursively, with the childrens being the @@ -2172,10 +2424,14 @@ def BlockdevGrow(disk, amount): @type disk: L{objects.Disk} @param disk: the disk to be grown + @type amount: integer + @param amount: the amount (in mebibytes) to grow with + @type dryrun: boolean + @param dryrun: whether to execute the operation in simulation mode + only, without actually increasing the size @rtype: (status, result) - @return: a tuple with the status of the operation - (True/False), and the errors message if status - is False + @return: a tuple with the status of the operation (True/False), and + the errors message if status is False """ r_dev = _RecursiveFindBD(disk) @@ -2183,7 +2439,7 @@ def BlockdevGrow(disk, amount): _Fail("Cannot find block device %s", disk) try: - r_dev.Grow(amount) + r_dev.Grow(amount, dryrun) except errors.BlockDeviceError, err: _Fail("Failed to grow block device: %s", err, exc=True) @@ -2237,45 +2493,46 @@ def FinalizeExport(instance, snap_disks): config = objects.SerializableConfigParser() config.add_section(constants.INISECT_EXP) - config.set(constants.INISECT_EXP, 'version', '0') - config.set(constants.INISECT_EXP, 'timestamp', '%d' % int(time.time())) - config.set(constants.INISECT_EXP, 'source', instance.primary_node) - config.set(constants.INISECT_EXP, 'os', instance.os) - config.set(constants.INISECT_EXP, 'compression', 'gzip') + config.set(constants.INISECT_EXP, "version", "0") + config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time())) + config.set(constants.INISECT_EXP, "source", instance.primary_node) + config.set(constants.INISECT_EXP, "os", instance.os) + config.set(constants.INISECT_EXP, "compression", "none") config.add_section(constants.INISECT_INS) - config.set(constants.INISECT_INS, 'name', instance.name) - config.set(constants.INISECT_INS, 'memory', '%d' % + config.set(constants.INISECT_INS, "name", instance.name) + config.set(constants.INISECT_INS, "memory", "%d" % instance.beparams[constants.BE_MEMORY]) - config.set(constants.INISECT_INS, 'vcpus', '%d' % + config.set(constants.INISECT_INS, "vcpus", "%d" % instance.beparams[constants.BE_VCPUS]) - config.set(constants.INISECT_INS, 'disk_template', instance.disk_template) - config.set(constants.INISECT_INS, 'hypervisor', instance.hypervisor) + config.set(constants.INISECT_INS, "disk_template", instance.disk_template) + config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor) + config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags())) nic_total = 0 for nic_count, nic in enumerate(instance.nics): nic_total += 1 - config.set(constants.INISECT_INS, 'nic%d_mac' % - nic_count, '%s' % nic.mac) - config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip) + config.set(constants.INISECT_INS, "nic%d_mac" % + nic_count, "%s" % nic.mac) + config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip) for param in constants.NICS_PARAMETER_TYPES: - config.set(constants.INISECT_INS, 'nic%d_%s' % (nic_count, param), - '%s' % nic.nicparams.get(param, None)) + config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param), + "%s" % nic.nicparams.get(param, None)) # TODO: redundant: on load can read nics until it doesn't exist - config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total) + config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total) disk_total = 0 for disk_count, disk in enumerate(snap_disks): if disk: disk_total += 1 - config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count, - ('%s' % disk.iv_name)) - config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count, - ('%s' % disk.physical_id[1])) - config.set(constants.INISECT_INS, 'disk%d_size' % disk_count, - ('%d' % disk.size)) + config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count, + ("%s" % disk.iv_name)) + config.set(constants.INISECT_INS, "disk%d_dump" % disk_count, + ("%s" % disk.physical_id[1])) + config.set(constants.INISECT_INS, "disk%d_size" % disk_count, + ("%d" % disk.size)) - config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total) + config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total) # New-style hypervisor/backend parameters @@ -2391,15 +2648,15 @@ def BlockdevRename(devlist): _Fail("; ".join(msgs)) -def _TransformFileStorageDir(file_storage_dir): +def _TransformFileStorageDir(fs_dir): """Checks whether given file_storage_dir is valid. - Checks wheter the given file_storage_dir is within the cluster-wide - default file_storage_dir stored in SimpleStore. Only paths under that - directory are allowed. + Checks wheter the given fs_dir is within the cluster-wide default + file_storage_dir or the shared_file_storage_dir, which are stored in + SimpleStore. Only paths under those directories are allowed. - @type file_storage_dir: str - @param file_storage_dir: the path to check + @type fs_dir: str + @param fs_dir: the path to check @return: the normalized path if valid, None otherwise @@ -2407,13 +2664,15 @@ def _TransformFileStorageDir(file_storage_dir): if not constants.ENABLE_FILE_STORAGE: _Fail("File storage disabled at configure time") cfg = _GetConfig() - file_storage_dir = os.path.normpath(file_storage_dir) - base_file_storage_dir = cfg.GetFileStorageDir() - if (os.path.commonprefix([file_storage_dir, base_file_storage_dir]) != - base_file_storage_dir): + fs_dir = os.path.normpath(fs_dir) + base_fstore = cfg.GetFileStorageDir() + base_shared = cfg.GetSharedFileStorageDir() + if not (utils.IsBelowDir(base_fstore, fs_dir) or + utils.IsBelowDir(base_shared, fs_dir)): _Fail("File storage directory '%s' is not under base file" - " storage directory '%s'", file_storage_dir, base_file_storage_dir) - return file_storage_dir + " storage directory '%s' or shared storage directory '%s'", + fs_dir, base_fstore, base_shared) + return fs_dir def CreateFileStorageDir(file_storage_dir): @@ -2550,7 +2809,10 @@ def JobQueueRename(old, new): _EnsureJobQueueFile(old) _EnsureJobQueueFile(new) - utils.RenameFile(old, new, mkdir=True) + getents = runtime.GetEnts() + + utils.RenameFile(old, new, mkdir=True, mkdir_mode=0700, + dir_uid=getents.masterd_uid, dir_gid=getents.masterd_gid) def BlockdevClose(instance_name, disks): @@ -2661,7 +2923,7 @@ def ValidateOS(required, osname, checks, osparams): validate_env = OSCoreEnv(osname, tbv, osparams) result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env, - cwd=tbv.path) + cwd=tbv.path, reset_env=True) if result.failed: logging.error("os validate command '%s' returned error: %s output: %s", result.cmd, result.fail_reason, result.output) @@ -2776,12 +3038,12 @@ def _GetImportExportIoCommand(instance, mode, ieio, ieargs): if not utils.IsNormAbsPath(filename): _Fail("Path '%s' is not normalized or absolute", filename) - directory = os.path.normpath(os.path.dirname(filename)) + real_filename = os.path.realpath(filename) + directory = os.path.dirname(real_filename) - if (os.path.commonprefix([constants.EXPORT_DIR, directory]) != - constants.EXPORT_DIR): - _Fail("File '%s' is not under exports directory '%s'", - filename, constants.EXPORT_DIR) + if not utils.IsBelowDir(constants.EXPORT_DIR, real_filename): + _Fail("File '%s' is not under exports directory '%s': %s", + filename, constants.EXPORT_DIR, real_filename) # Create directory utils.Makedirs(directory, mode=0750) @@ -2873,7 +3135,8 @@ def _CreateImportExportStatusDir(prefix): (prefix, utils.TimestampForFilename()))) -def StartImportExportDaemon(mode, opts, host, port, instance, ieio, ieioargs): +def StartImportExportDaemon(mode, opts, host, port, instance, component, + ieio, ieioargs): """Starts an import or export daemon. @param mode: Import/output mode @@ -2885,6 +3148,9 @@ def StartImportExportDaemon(mode, opts, host, port, instance, ieio, ieioargs): @param port: Remote port for export (None for import) @type instance: L{objects.Instance} @param instance: Instance object + @type component: string + @param component: which part of the instance is transferred now, + e.g. 'disk/0' @param ieio: Input/output type @param ieioargs: Input/output arguments @@ -2924,7 +3190,7 @@ def StartImportExportDaemon(mode, opts, host, port, instance, ieio, ieioargs): if not os.path.exists(i): _Fail("File '%s' does not exist" % i) - status_dir = _CreateImportExportStatusDir(prefix) + status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component)) try: status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE) pid_file = utils.PathJoin(status_dir, _IES_PID_FILE) @@ -2973,7 +3239,16 @@ def StartImportExportDaemon(mode, opts, host, port, instance, ieio, ieioargs): if cmd_suffix: cmd.append("--cmd-suffix=%s" % cmd_suffix) - logfile = _InstanceLogName(prefix, instance.os, instance.name) + if mode == constants.IEM_EXPORT: + # Retry connection a few times when connecting to remote peer + cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES) + cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT) + elif opts.connect_timeout is not None: + assert mode == constants.IEM_IMPORT + # Overall timeout for establishing connection while listening + cmd.append("--connect-timeout=%s" % opts.connect_timeout) + + logfile = _InstanceLogName(prefix, instance.os, instance.name, component) # TODO: Once _InstanceLogName uses tempfile.mkstemp, StartDaemon has # support for receiving a file descriptor for output @@ -3210,7 +3485,7 @@ def PowercycleNode(hypervisor_type): # ensure the child is running on ram try: utils.Mlockall() - except Exception: # pylint: disable-msg=W0703 + except Exception: # pylint: disable=W0703 pass time.sleep(5) hyper.PowercycleNode() @@ -3235,7 +3510,21 @@ class HooksRunner(object): hooks_base_dir = constants.HOOKS_BASE_DIR # yeah, _BASE_DIR is not valid for attributes, we use it like a # constant - self._BASE_DIR = hooks_base_dir # pylint: disable-msg=C0103 + self._BASE_DIR = hooks_base_dir # pylint: disable=C0103 + + def RunLocalHooks(self, node_list, hpath, phase, env): + """Check that the hooks will be run only locally and then run them. + + """ + assert len(node_list) == 1 + node = node_list[0] + _, myself = ssconf.GetMasterAndMyself() + assert node == myself + + results = self.RunHooks(hpath, phase, env) + + # Return values in the form expected by HooksMaster + return {node: (None, False, results)} def RunHooks(self, hpath, phase, env): """Run the scripts in the hooks directory. @@ -3266,7 +3555,6 @@ class HooksRunner(object): else: _Fail("Unknown hooks phase '%s'", phase) - subdir = "%s-%s.d" % (hpath, suffix) dir_name = utils.PathJoin(self._BASE_DIR, subdir)