X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/0188611b9cf332ff83f95e0bdbdcbd74c946465c..86aa9ba32f810956948ff4e578c828351998a82c:/lib/backend.py diff --git a/lib/backend.py b/lib/backend.py index 912cd8c..ccb6a17 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -64,6 +64,7 @@ from ganeti import mcpu from ganeti import compat from ganeti import pathutils from ganeti import vcluster +from ganeti import ht _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id" @@ -87,6 +88,19 @@ _LVSLINE_REGEX = re.compile("^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$") _MASTER_START = "start" _MASTER_STOP = "stop" +#: Maximum file permissions for remote command directory and executables +_RCMD_MAX_MODE = (stat.S_IRWXU | + stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) + +#: Delay before returning an error for remote commands +_RCMD_INVALID_DELAY = 10 + +#: How long to wait to acquire lock for remote commands (shorter than +#: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many +#: command requests arrive +_RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8 + class RPCFail(Exception): """Class denoting RPC failure. @@ -200,7 +214,7 @@ def _BuildUploadFileList(): """ allowed_files = set([ pathutils.CLUSTER_CONF_FILE, - constants.ETC_HOSTS, + pathutils.ETC_HOSTS, pathutils.SSH_KNOWN_HOSTS_FILE, pathutils.VNC_PASSWORD_FILE, pathutils.RAPI_CERT_FILE, @@ -215,6 +229,9 @@ def _BuildUploadFileList(): hv_class = hypervisor.GetHypervisorClass(hv_name) allowed_files.update(hv_class.GetAncillaryFiles()[0]) + assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \ + "Allowed file storage paths should never be uploaded via RPC" + return frozenset(allowed_files) @@ -644,8 +661,11 @@ def VerifyNode(what, cluster_name): tmp.append((source, hv_name, str(err))) if constants.NV_FILELIST in what: - result[constants.NV_FILELIST] = utils.FingerprintFiles( - what[constants.NV_FILELIST]) + fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath, + what[constants.NV_FILELIST])) + result[constants.NV_FILELIST] = \ + dict((vcluster.MakeVirtualPath(key), value) + for (key, value) in fingerprints.items()) if constants.NV_NODELIST in what: (nodes, bynode) = what[constants.NV_NODELIST] @@ -705,7 +725,7 @@ def VerifyNode(what, cluster_name): if constants.NV_USERSCRIPTS in what: result[constants.NV_USERSCRIPTS] = \ [script for script in what[constants.NV_USERSCRIPTS] - if not (os.path.exists(script) and os.access(script, os.X_OK))] + if not utils.IsExecutable(script)] if constants.NV_OOB_PATHS in what: result[constants.NV_OOB_PATHS] = tmp = [] @@ -794,6 +814,11 @@ def VerifyNode(what, cluster_name): result[constants.NV_BRIDGES] = [bridge for bridge in what[constants.NV_BRIDGES] if not utils.BridgeExists(bridge)] + + if what.get(constants.NV_FILE_STORAGE_PATHS) == my_name: + result[constants.NV_FILE_STORAGE_PATHS] = \ + bdev.ComputeWrongFileStoragePaths() + return result @@ -2128,15 +2153,6 @@ def RunOob(oob_program, command, node, timeout): return result.stdout -def WriteSsconfFiles(values): - """Update all ssconf files. - - Wrapper around the SimpleStore.WriteFiles. - - """ - ssconf.SimpleStore().WriteFiles(values) - - def _OSOndiskAPIVersion(os_dir): """Compute and return the API version of a given OS. @@ -2294,7 +2310,8 @@ def _TryOSFromDisk(name, base_dir=None): if constants.OS_VARIANTS_FILE in os_files: variants_file = os_files[constants.OS_VARIANTS_FILE] try: - variants = utils.ReadFile(variants_file).splitlines() + variants = \ + utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file)) except EnvironmentError, err: # we accept missing files, but not other errors if err.errno != errno.ENOENT: @@ -2446,6 +2463,8 @@ def OSEnvironment(instance, inst_os, debug=0): result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK] if nic.nicparams[constants.NIC_LINK]: result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK] + if nic.network: + result["NIC_%d_NETWORK" % idx] = nic.network if constants.HV_NIC_TYPE in instance.hvparams: result["NIC_%d_FRONTEND_TYPE" % idx] = \ instance.hvparams[constants.HV_NIC_TYPE] @@ -2519,6 +2538,32 @@ def BlockdevSnapshot(disk): disk.unique_id, disk.dev_type) +def BlockdevSetInfo(disk, info): + """Sets 'metadata' information on block devices. + + This function sets 'info' metadata on block devices. Initial + information is set at device creation; this function should be used + for example after renames. + + @type disk: L{objects.Disk} + @param disk: the disk to be grown + @type info: string + @param info: new 'info' metadata + @rtype: (status, result) + @return: a tuple with the status of the operation (True/False), and + the errors message if status is False + + """ + r_dev = _RecursiveFindBD(disk) + if r_dev is None: + _Fail("Cannot find block device %s", disk) + + try: + r_dev.SetInfo(info) + except errors.BlockDeviceError, err: + _Fail("Failed to set information on block device: %s", err, exc=True) + + def FinalizeExport(instance, snap_disks): """Write out the export configuration information. @@ -2565,6 +2610,8 @@ def FinalizeExport(instance, snap_disks): config.set(constants.INISECT_INS, "nic%d_mac" % nic_count, "%s" % nic.mac) config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip) + config.set(constants.INISECT_INS, "nic%d_network" % nic_count, + "%s" % nic.network) for param in constants.NICS_PARAMETER_TYPES: config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param), "%s" % nic.nicparams.get(param, None)) @@ -2711,18 +2758,13 @@ def _TransformFileStorageDir(fs_dir): @return: the normalized path if valid, None otherwise """ - if not constants.ENABLE_FILE_STORAGE: + if not (constants.ENABLE_FILE_STORAGE or + constants.ENABLE_SHARED_FILE_STORAGE): _Fail("File storage disabled at configure time") - cfg = _GetConfig() - fs_dir = os.path.normpath(fs_dir) - base_fstore = cfg.GetFileStorageDir() - base_shared = cfg.GetSharedFileStorageDir() - if not (utils.IsBelowDir(base_fstore, fs_dir) or - utils.IsBelowDir(base_shared, fs_dir)): - _Fail("File storage directory '%s' is not under base file" - " storage directory '%s' or shared storage directory '%s'", - fs_dir, base_fstore, base_shared) - return fs_dir + + bdev.CheckFileStoragePath(fs_dir) + + return os.path.normpath(fs_dir) def CreateFileStorageDir(file_storage_dir): @@ -2813,12 +2855,9 @@ def _EnsureJobQueueFile(file_name): @raises RPCFail: if the file is not valid """ - queue_dir = os.path.normpath(pathutils.QUEUE_DIR) - result = (os.path.commonprefix([queue_dir, file_name]) == queue_dir) - - if not result: + if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name): _Fail("Passed job queue file '%s' does not belong to" - " the queue directory '%s'", file_name, queue_dir) + " the queue directory '%s'", file_name, pathutils.QUEUE_DIR) def JobQueueUpdate(file_name, content): @@ -3546,6 +3585,209 @@ def PowercycleNode(hypervisor_type): hyper.PowercycleNode() +def _VerifyRestrictedCmdName(cmd): + """Verifies a remote command name. + + @type cmd: string + @param cmd: Command name + @rtype: tuple; (boolean, string or None) + @return: The tuple's first element is the status; if C{False}, the second + element is an error message string, otherwise it's C{None} + + """ + if not cmd.strip(): + return (False, "Missing command name") + + if os.path.basename(cmd) != cmd: + return (False, "Invalid command name") + + if not constants.EXT_PLUGIN_MASK.match(cmd): + return (False, "Command name contains forbidden characters") + + return (True, None) + + +def _CommonRestrictedCmdCheck(path, owner): + """Common checks for remote command file system directories and files. + + @type path: string + @param path: Path to check + @param owner: C{None} or tuple containing UID and GID + @rtype: tuple; (boolean, string or C{os.stat} result) + @return: The tuple's first element is the status; if C{False}, the second + element is an error message string, otherwise it's the result of C{os.stat} + + """ + if owner is None: + # Default to root as owner + owner = (0, 0) + + try: + st = os.stat(path) + except EnvironmentError, err: + return (False, "Can't stat(2) '%s': %s" % (path, err)) + + if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE): + return (False, "Permissions on '%s' are too permissive" % path) + + if (st.st_uid, st.st_gid) != owner: + (owner_uid, owner_gid) = owner + return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid)) + + return (True, st) + + +def _VerifyRestrictedCmdDirectory(path, _owner=None): + """Verifies remote command directory. + + @type path: string + @param path: Path to check + @rtype: tuple; (boolean, string or None) + @return: The tuple's first element is the status; if C{False}, the second + element is an error message string, otherwise it's C{None} + + """ + (status, value) = _CommonRestrictedCmdCheck(path, _owner) + + if not status: + return (False, value) + + if not stat.S_ISDIR(value.st_mode): + return (False, "Path '%s' is not a directory" % path) + + return (True, None) + + +def _VerifyRestrictedCmd(path, cmd, _owner=None): + """Verifies a whole remote command and returns its executable filename. + + @type path: string + @param path: Directory containing remote commands + @type cmd: string + @param cmd: Command name + @rtype: tuple; (boolean, string) + @return: The tuple's first element is the status; if C{False}, the second + element is an error message string, otherwise the second element is the + absolute path to the executable + + """ + executable = utils.PathJoin(path, cmd) + + (status, msg) = _CommonRestrictedCmdCheck(executable, _owner) + + if not status: + return (False, msg) + + if not utils.IsExecutable(executable): + return (False, "access(2) thinks '%s' can't be executed" % executable) + + return (True, executable) + + +def _PrepareRestrictedCmd(path, cmd, + _verify_dir=_VerifyRestrictedCmdDirectory, + _verify_name=_VerifyRestrictedCmdName, + _verify_cmd=_VerifyRestrictedCmd): + """Performs a number of tests on a remote command. + + @type path: string + @param path: Directory containing remote commands + @type cmd: string + @param cmd: Command name + @return: Same as L{_VerifyRestrictedCmd} + + """ + # Verify the directory first + (status, msg) = _verify_dir(path) + if status: + # Check command if everything was alright + (status, msg) = _verify_name(cmd) + + if not status: + return (False, msg) + + # Check actual executable + return _verify_cmd(path, cmd) + + +def RunRestrictedCmd(cmd, + _lock_timeout=_RCMD_LOCK_TIMEOUT, + _lock_file=pathutils.RESTRICTED_COMMANDS_LOCK_FILE, + _path=pathutils.RESTRICTED_COMMANDS_DIR, + _sleep_fn=time.sleep, + _prepare_fn=_PrepareRestrictedCmd, + _runcmd_fn=utils.RunCmd, + _enabled=constants.ENABLE_RESTRICTED_COMMANDS): + """Executes a remote command after performing strict tests. + + @type cmd: string + @param cmd: Command name + @rtype: string + @return: Command output + @raise RPCFail: In case of an error + + """ + logging.info("Preparing to run remote command '%s'", cmd) + + if not _enabled: + _Fail("Remote commands disabled at configure time") + + lock = None + try: + cmdresult = None + try: + lock = utils.FileLock.Open(_lock_file) + lock.Exclusive(blocking=True, timeout=_lock_timeout) + + (status, value) = _prepare_fn(_path, cmd) + + if status: + cmdresult = _runcmd_fn([value], env={}, reset_env=True, + postfork_fn=lambda _: lock.Unlock()) + else: + logging.error(value) + except Exception: # pylint: disable=W0703 + # Keep original error in log + logging.exception("Caught exception") + + if cmdresult is None: + logging.info("Sleeping for %0.1f seconds before returning", + _RCMD_INVALID_DELAY) + _sleep_fn(_RCMD_INVALID_DELAY) + + # Do not include original error message in returned error + _Fail("Executing command '%s' failed" % cmd) + elif cmdresult.failed or cmdresult.fail_reason: + _Fail("Remote command '%s' failed: %s; output: %s", + cmd, cmdresult.fail_reason, cmdresult.output) + else: + return cmdresult.output + finally: + if lock is not None: + # Release lock at last + lock.Close() + lock = None + + +def SetWatcherPause(until, _filename=pathutils.WATCHER_PAUSEFILE): + """Creates or removes the watcher pause file. + + @type until: None or number + @param until: Unix timestamp saying until when the watcher shouldn't run + + """ + if until is None: + logging.info("Received request to no longer pause watcher") + utils.RemoveFile(_filename) + else: + logging.info("Received request to pause watcher until %s", until) + + if not ht.TNumber(until): + _Fail("Duration must be numeric") + + utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644) + + class HooksRunner(object): """Hook runner.