#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import serializer
from ganeti import netutils
from ganeti import runtime
-from ganeti import mcpu
from ganeti import compat
from ganeti import pathutils
from ganeti import vcluster
+from ganeti import ht
+from ganeti import hooksmaster
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
-_ALLOWED_CLEAN_DIRS = frozenset([
+_ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
pathutils.DATA_DIR,
pathutils.JOB_QUEUE_ARCHIVE_DIR,
pathutils.QUEUE_DIR,
_MASTER_START = "start"
_MASTER_STOP = "stop"
+#: Maximum file permissions for restricted command directory and executables
+_RCMD_MAX_MODE = (stat.S_IRWXU |
+ stat.S_IRGRP | stat.S_IXGRP |
+ stat.S_IROTH | stat.S_IXOTH)
+
+#: Delay before returning an error for restricted commands
+_RCMD_INVALID_DELAY = 10
+
+#: How long to wait to acquire lock for restricted commands (shorter than
+#: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
+#: command requests arrive
+_RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
+
class RPCFail(Exception):
"""Class denoting RPC failure.
"""
+def _GetInstReasonFilename(instance_name):
+ """Path of the file containing the reason of the instance status change.
+
+ @type instance_name: string
+ @param instance_name: The name of the instance
+ @rtype: string
+ @return: The path of the file
+
+ """
+ return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
+
+
+def _StoreInstReasonTrail(instance_name, trail):
+ """Serialize a reason trail related to an instance change of state to file.
+
+ The exact location of the file depends on the name of the instance and on
+ the configuration of the Ganeti cluster defined at deploy time.
+
+ @type instance_name: string
+ @param instance_name: The name of the instance
+ @rtype: None
+
+ """
+ json = serializer.DumpJson(trail)
+ filename = _GetInstReasonFilename(instance_name)
+ utils.WriteFile(filename, data=json)
+
+
def _Fail(msg, *args, **kwargs):
"""Log an error and the raise an RPCFail exception.
cfg = _GetConfig()
hr = HooksRunner()
- hm = mcpu.HooksMaster(hook_opcode, hooks_path, nodes, hr.RunLocalHooks,
- None, env_fn, logging.warning, cfg.GetClusterName(),
- cfg.GetMasterNode())
-
+ hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
+ hr.RunLocalHooks, None, env_fn,
+ logging.warning, cfg.GetClusterName(),
+ cfg.GetMasterNode())
hm.RunPhase(constants.HOOKS_PHASE_PRE)
result = fn(*args, **kwargs)
hm.RunPhase(constants.HOOKS_PHASE_POST)
result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
if result.failed:
- _Fail("Failed to %s the master IP. Script return value: %s" %
- (action, result.exit_code), log=True)
+ _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
+ (action, result.exit_code, result.output), log=True)
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
raise errors.QuitGanetiException(True, "Shutdown scheduled")
-def _GetVgInfo(name):
+def _GetVgInfo(name, excl_stor):
"""Retrieves information about a LVM volume group.
"""
# TODO: GetVGInfo supports returning information for multiple VGs at once
- vginfo = bdev.LogicalVolume.GetVGInfo([name])
+ vginfo = bdev.LogicalVolume.GetVGInfo([name], excl_stor)
if vginfo:
vg_free = int(round(vginfo[0][0], 0))
vg_size = int(round(vginfo[0][1], 0))
return map(fn, names)
-def GetNodeInfo(vg_names, hv_names):
+def GetNodeInfo(vg_names, hv_names, excl_stor):
"""Gives back a hash with different information about the node.
@type vg_names: list of string
@param vg_names: Names of the volume groups to ask for disk space information
@type hv_names: list of string
@param hv_names: Names of the hypervisors to ask for node information
+ @type excl_stor: boolean
+ @param excl_stor: Whether exclusive_storage is active
@rtype: tuple; (string, None/dict, None/dict)
@return: Tuple containing boot ID, volume group information and hypervisor
information
"""
bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
- vg_info = _GetNamedNodeInfo(vg_names, _GetVgInfo)
+ vg_info = _GetNamedNodeInfo(vg_names, (lambda vg: _GetVgInfo(vg, excl_stor)))
hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
return (bootid, vg_info, hv_info)
+def _CheckExclusivePvs(pvi_list):
+ """Check that PVs are not shared among LVs
+
+ @type pvi_list: list of L{objects.LvmPvInfo} objects
+ @param pvi_list: information about the PVs
+
+ @rtype: list of tuples (string, list of strings)
+ @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
+
+ """
+ res = []
+ for pvi in pvi_list:
+ if len(pvi.lv_list) > 1:
+ res.append((pvi.name, pvi.lv_list))
+ return res
+
+
def VerifyNode(what, cluster_name):
"""Verify the status of the local node.
if constants.NV_USERSCRIPTS in what:
result[constants.NV_USERSCRIPTS] = \
[script for script in what[constants.NV_USERSCRIPTS]
- if not (os.path.exists(script) and os.access(script, os.X_OK))]
+ if not utils.IsExecutable(script)]
if constants.NV_OOB_PATHS in what:
result[constants.NV_OOB_PATHS] = tmp = []
result[constants.NV_VGLIST] = utils.ListVolumeGroups()
if constants.NV_PVLIST in what and vm_capable:
- result[constants.NV_PVLIST] = \
- bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
- filter_allocatable=False)
+ check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
+ val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
+ filter_allocatable=False,
+ include_lvs=check_exclusive_pvs)
+ if check_exclusive_pvs:
+ result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
+ for pvi in val:
+ # Avoid sending useless data on the wire
+ pvi.lv_list = []
+ result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
if constants.NV_VERSION in what:
result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
result[constants.NV_BRIDGES] = [bridge
for bridge in what[constants.NV_BRIDGES]
if not utils.BridgeExists(bridge)]
+
+ if what.get(constants.NV_FILE_STORAGE_PATHS) == my_name:
+ result[constants.NV_FILE_STORAGE_PATHS] = \
+ bdev.ComputeWrongFileStoragePaths()
+
return result
" log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
-def _GetBlockDevSymlinkPath(instance_name, idx):
- return utils.PathJoin(pathutils.DISK_LINKS_DIR, "%s%s%d" %
- (instance_name, constants.DISK_SEPARATOR, idx))
+def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
+ """Returns symlink path for block device.
+
+ """
+ if _dir is None:
+ _dir = pathutils.DISK_LINKS_DIR
+
+ return utils.PathJoin(_dir,
+ ("%s%s%s" %
+ (instance_name, constants.DISK_SEPARATOR, idx)))
def _SymlinkBlockDev(instance_name, device_path, idx):
return block_devices
-def StartInstance(instance, startup_paused):
+def StartInstance(instance, startup_paused, reason, store_reason=True):
"""Start an instance.
@type instance: L{objects.Instance}
@param instance: the instance object
@type startup_paused: bool
@param instance: pause instance at startup?
+ @type reason: list of reasons
+ @param reason: the reason trail for this startup
+ @type store_reason: boolean
+ @param store_reason: whether to store the shutdown reason trail on file
@rtype: None
"""
block_devices = _GatherAndLinkBlockDevs(instance)
hyper = hypervisor.GetHypervisor(instance.hypervisor)
hyper.StartInstance(instance, block_devices, startup_paused)
+ if store_reason:
+ _StoreInstReasonTrail(instance.name, reason)
except errors.BlockDeviceError, err:
_Fail("Block device error: %s", err, exc=True)
except errors.HypervisorError, err:
_Fail("Hypervisor error: %s", err, exc=True)
-def InstanceShutdown(instance, timeout):
+def InstanceShutdown(instance, timeout, reason, store_reason=True):
"""Shut an instance down.
@note: this functions uses polling with a hardcoded timeout.
@param instance: the instance object
@type timeout: integer
@param timeout: maximum timeout for soft shutdown
+ @type reason: list of reasons
+ @param reason: the reason trail for this shutdown
+ @type store_reason: boolean
+ @param store_reason: whether to store the shutdown reason trail on file
@rtype: None
"""
try:
hyper.StopInstance(instance, retry=self.tried_once)
+ if store_reason:
+ _StoreInstReasonTrail(instance.name, reason)
except errors.HypervisorError, err:
if iname not in hyper.ListInstances():
# if the instance is no longer existing, consider this a
_RemoveBlockDevLinks(iname, instance.disks)
-def InstanceReboot(instance, reboot_type, shutdown_timeout):
+def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
"""Reboot an instance.
@type instance: L{objects.Instance}
instance (instead of a call_instance_reboot RPC)
@type shutdown_timeout: integer
@param shutdown_timeout: maximum timeout for soft shutdown
+ @type reason: list of reasons
+ @param reason: the reason trail for this reboot
@rtype: None
"""
_Fail("Failed to soft reboot instance %s: %s", instance.name, err)
elif reboot_type == constants.INSTANCE_REBOOT_HARD:
try:
- InstanceShutdown(instance, shutdown_timeout)
- return StartInstance(instance, False)
+ InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
+ result = StartInstance(instance, False, reason, store_reason=False)
+ _StoreInstReasonTrail(instance.name, reason)
+ return result
except errors.HypervisorError, err:
_Fail("Failed to hard reboot instance %s: %s", instance.name, err)
else:
_Fail("Failed to get migration status: %s", err, exc=True)
-def BlockdevCreate(disk, size, owner, on_primary, info):
+def HotplugDevice(instance, action, dev_type, device, extra, seq):
+ """Hotplug a device
+
+ Hotplug is currently supported only for KVM Hypervisor.
+ @type instance: L{objects.Instance}
+ @param instance: the instance to which we hotplug a device
+ @type action: string
+ @param action: the hotplug action to perform
+ @type dev_type: string
+ @param dev_type: the device type to hotplug
+ @type device: either L{objects.NIC} or L{objects.Disk}
+ @param device: the device object to hotplug
+ @type extra: string
+ @param extra: extra info used by hotplug code (e.g. disk link)
+ @type seq: int
+ @param seq: the index of the device from master perspective
+ @raise RPCFail: in case instance does not have KVM hypervisor
+
+ """
+ hyper = hypervisor.GetHypervisor(instance.hypervisor)
+ try:
+ hyper.VerifyHotplugSupport(instance, action, dev_type)
+ except errors.HotplugError, err:
+ _Fail("Hotplug is not supported: %s", err)
+
+ if action == constants.HOTPLUG_ACTION_ADD:
+ fn = hyper.HotAddDevice
+ elif action == constants.HOTPLUG_ACTION_REMOVE:
+ fn = hyper.HotDelDevice
+ elif action == constants.HOTPLUG_ACTION_MODIFY:
+ fn = hyper.HotModDevice
+ else:
+ assert action in constants.HOTPLUG_ALL_ACTIONS
+
+ return fn(instance, dev_type, device, extra, seq)
+
+
+def HotplugSupported(instance):
+ """Checks if hotplug is generally supported.
+
+ """
+ hyper = hypervisor.GetHypervisor(instance.hypervisor)
+ try:
+ hyper.HotplugSupported(instance)
+ except errors.HotplugError, err:
+ _Fail("Hotplug is not supported: %s", err)
+
+
+def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
"""Creates a block device for an instance.
@type disk: L{objects.Disk}
@type info: string
@param info: string that will be sent to the physical device
creation, used for example to set (LVM) tags on LVs
+ @type excl_stor: boolean
+ @param excl_stor: Whether exclusive_storage is active
@return: the new unique_id of the device (this can sometime be
computed only after creation), or None. On secondary nodes,
clist.append(crdev)
try:
- device = bdev.Create(disk, clist)
+ device = bdev.Create(disk, clist, excl_stor)
except errors.BlockDeviceError, err:
_Fail("Can't create block device: %s", err)
rdev = None
if rdev is not None:
r_path = rdev.dev_path
- try:
- rdev.Remove()
- except errors.BlockDeviceError, err:
- msgs.append(str(err))
+
+ def _TryRemove():
+ try:
+ rdev.Remove()
+ return []
+ except errors.BlockDeviceError, err:
+ return [str(err)]
+
+ msgs.extend(utils.SimpleRetry([], _TryRemove,
+ constants.DISK_REMOVE_RETRY_INTERVAL,
+ constants.DISK_REMOVE_RETRY_TIMEOUT))
+
if not msgs:
DevCacheManager.RemoveCache(r_path)
This is a wrapper over _RecursiveAssembleBD.
@rtype: str or boolean
- @return: a C{/dev/...} path for primary nodes, and
- C{True} for secondary nodes
+ @return: a tuple with the C{/dev/...} path and the created symlink
+ for primary nodes, and (C{True}, C{True}) for secondary nodes
"""
try:
result = _RecursiveAssembleBD(disk, owner, as_primary)
if isinstance(result, bdev.BlockDev):
# pylint: disable=E1103
- result = result.dev_path
+ dev_path = result.dev_path
+ link_name = None
if as_primary:
- _SymlinkBlockDev(owner, result, idx)
+ link_name = _SymlinkBlockDev(owner, dev_path, idx)
+ elif result:
+ return result, result
+ else:
+ _Fail("Unexpected result from _RecursiveAssembleBD")
except errors.BlockDeviceError, err:
_Fail("Error while assembling disk: %s", err, exc=True)
except OSError, err:
_Fail("Error while symlinking disk: %s", err, exc=True)
- return result
+ return dev_path, link_name
def BlockdevShutdown(disk):
real_disk = _OpenRealBD(disk)
result["DISK_%d_PATH" % idx] = real_disk.dev_path
result["DISK_%d_ACCESS" % idx] = disk.mode
+ result["DISK_%d_UUID" % idx] = disk.uuid
+ if disk.name:
+ result["DISK_%d_NAME" % idx] = disk.name
if constants.HV_DISK_TYPE in instance.hvparams:
result["DISK_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_DISK_TYPE]
# NICs
for idx, nic in enumerate(instance.nics):
result["NIC_%d_MAC" % idx] = nic.mac
+ result["NIC_%d_UUID" % idx] = nic.uuid
+ if nic.name:
+ result["NIC_%d_NAME" % idx] = nic.name
if nic.ip:
result["NIC_%d_IP" % idx] = nic.ip
result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
if nic.nicparams[constants.NIC_LINK]:
result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
+ if nic.netinfo:
+ nobj = objects.Network.FromDict(nic.netinfo)
+ result.update(nobj.HooksDict("NIC_%d_" % idx))
if constants.HV_NIC_TYPE in instance.hvparams:
result["NIC_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_NIC_TYPE]
return result
+def DiagnoseExtStorage(top_dirs=None):
+ """Compute the validity for all ExtStorage Providers.
+
+ @type top_dirs: list
+ @param top_dirs: the list of directories in which to
+ search (if not given defaults to
+ L{pathutils.ES_SEARCH_PATH})
+ @rtype: list of L{objects.ExtStorage}
+ @return: a list of tuples (name, path, status, diagnose, parameters)
+ for all (potential) ExtStorage Providers under all
+ search paths, where:
+ - name is the (potential) ExtStorage Provider
+ - path is the full path to the ExtStorage Provider
+ - status True/False is the validity of the ExtStorage Provider
+ - diagnose is the error message for an invalid ExtStorage Provider,
+ otherwise empty
+ - parameters is a list of (name, help) parameters, if any
+
+ """
+ if top_dirs is None:
+ top_dirs = pathutils.ES_SEARCH_PATH
+
+ result = []
+ for dir_name in top_dirs:
+ if os.path.isdir(dir_name):
+ try:
+ f_names = utils.ListVisibleFiles(dir_name)
+ except EnvironmentError, err:
+ logging.exception("Can't list the ExtStorage directory %s: %s",
+ dir_name, err)
+ break
+ for name in f_names:
+ es_path = utils.PathJoin(dir_name, name)
+ status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
+ if status:
+ diagnose = ""
+ parameters = es_inst.supported_parameters
+ else:
+ diagnose = es_inst
+ parameters = []
+ result.append((name, es_path, status, diagnose, parameters))
+
+ return result
+
+
def BlockdevGrow(disk, amount, dryrun, backingstore):
"""Grow a stack of block devices.
config.set(constants.INISECT_INS, "nic%d_mac" %
nic_count, "%s" % nic.mac)
config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
+ config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
+ "%s" % nic.network)
for param in constants.NICS_PARAMETER_TYPES:
config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
"%s" % nic.nicparams.get(param, None))
if not (constants.ENABLE_FILE_STORAGE or
constants.ENABLE_SHARED_FILE_STORAGE):
_Fail("File storage disabled at configure time")
- cfg = _GetConfig()
- fs_dir = os.path.normpath(fs_dir)
- base_fstore = cfg.GetFileStorageDir()
- base_shared = cfg.GetSharedFileStorageDir()
- if not (utils.IsBelowDir(base_fstore, fs_dir) or
- utils.IsBelowDir(base_shared, fs_dir)):
- _Fail("File storage directory '%s' is not under base file"
- " storage directory '%s' or shared storage directory '%s'",
- fs_dir, base_fstore, base_shared)
- return fs_dir
+
+ bdev.CheckFileStoragePath(fs_dir)
+
+ return os.path.normpath(fs_dir)
def CreateFileStorageDir(file_storage_dir):
# Write and replace the file atomically
utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
- gid=getents.masterd_gid)
+ gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
def JobQueueRename(old, new):
getents = runtime.GetEnts()
- utils.RenameFile(old, new, mkdir=True, mkdir_mode=0700,
- dir_uid=getents.masterd_uid, dir_gid=getents.masterd_gid)
+ utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
+ dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
def BlockdevClose(instance_name, disks):
for rd in bdevs:
stats = rd.GetProcStatus()
- all_connected = (all_connected and
- (stats.is_connected or stats.is_in_resync))
+ if multimaster:
+ # In the multimaster case we have to wait explicitly until
+ # the resource is Connected and UpToDate/UpToDate, because
+ # we promote *both nodes* to primary directly afterwards.
+ # Being in resync is not enough, since there is a race during which we
+ # may promote a node with an Outdated disk to primary, effectively
+ # tearing down the connection.
+ all_connected = (all_connected and
+ stats.is_connected and
+ stats.is_disk_uptodate and
+ stats.peer_disk_uptodate)
+ else:
+ all_connected = (all_connected and
+ (stats.is_connected or stats.is_in_resync))
if stats.is_standalone:
# peer had different config info and this node became
hyper.PowercycleNode()
+def _VerifyRestrictedCmdName(cmd):
+ """Verifies a restricted command name.
+
+ @type cmd: string
+ @param cmd: Command name
+ @rtype: tuple; (boolean, string or None)
+ @return: The tuple's first element is the status; if C{False}, the second
+ element is an error message string, otherwise it's C{None}
+
+ """
+ if not cmd.strip():
+ return (False, "Missing command name")
+
+ if os.path.basename(cmd) != cmd:
+ return (False, "Invalid command name")
+
+ if not constants.EXT_PLUGIN_MASK.match(cmd):
+ return (False, "Command name contains forbidden characters")
+
+ return (True, None)
+
+
+def _CommonRestrictedCmdCheck(path, owner):
+ """Common checks for restricted command file system directories and files.
+
+ @type path: string
+ @param path: Path to check
+ @param owner: C{None} or tuple containing UID and GID
+ @rtype: tuple; (boolean, string or C{os.stat} result)
+ @return: The tuple's first element is the status; if C{False}, the second
+ element is an error message string, otherwise it's the result of C{os.stat}
+
+ """
+ if owner is None:
+ # Default to root as owner
+ owner = (0, 0)
+
+ try:
+ st = os.stat(path)
+ except EnvironmentError, err:
+ return (False, "Can't stat(2) '%s': %s" % (path, err))
+
+ if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE):
+ return (False, "Permissions on '%s' are too permissive" % path)
+
+ if (st.st_uid, st.st_gid) != owner:
+ (owner_uid, owner_gid) = owner
+ return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid))
+
+ return (True, st)
+
+
+def _VerifyRestrictedCmdDirectory(path, _owner=None):
+ """Verifies restricted command directory.
+
+ @type path: string
+ @param path: Path to check
+ @rtype: tuple; (boolean, string or None)
+ @return: The tuple's first element is the status; if C{False}, the second
+ element is an error message string, otherwise it's C{None}
+
+ """
+ (status, value) = _CommonRestrictedCmdCheck(path, _owner)
+
+ if not status:
+ return (False, value)
+
+ if not stat.S_ISDIR(value.st_mode):
+ return (False, "Path '%s' is not a directory" % path)
+
+ return (True, None)
+
+
+def _VerifyRestrictedCmd(path, cmd, _owner=None):
+ """Verifies a whole restricted command and returns its executable filename.
+
+ @type path: string
+ @param path: Directory containing restricted commands
+ @type cmd: string
+ @param cmd: Command name
+ @rtype: tuple; (boolean, string)
+ @return: The tuple's first element is the status; if C{False}, the second
+ element is an error message string, otherwise the second element is the
+ absolute path to the executable
+
+ """
+ executable = utils.PathJoin(path, cmd)
+
+ (status, msg) = _CommonRestrictedCmdCheck(executable, _owner)
+
+ if not status:
+ return (False, msg)
+
+ if not utils.IsExecutable(executable):
+ return (False, "access(2) thinks '%s' can't be executed" % executable)
+
+ return (True, executable)
+
+
+def _PrepareRestrictedCmd(path, cmd,
+ _verify_dir=_VerifyRestrictedCmdDirectory,
+ _verify_name=_VerifyRestrictedCmdName,
+ _verify_cmd=_VerifyRestrictedCmd):
+ """Performs a number of tests on a restricted command.
+
+ @type path: string
+ @param path: Directory containing restricted commands
+ @type cmd: string
+ @param cmd: Command name
+ @return: Same as L{_VerifyRestrictedCmd}
+
+ """
+ # Verify the directory first
+ (status, msg) = _verify_dir(path)
+ if status:
+ # Check command if everything was alright
+ (status, msg) = _verify_name(cmd)
+
+ if not status:
+ return (False, msg)
+
+ # Check actual executable
+ return _verify_cmd(path, cmd)
+
+
+def RunRestrictedCmd(cmd,
+ _lock_timeout=_RCMD_LOCK_TIMEOUT,
+ _lock_file=pathutils.RESTRICTED_COMMANDS_LOCK_FILE,
+ _path=pathutils.RESTRICTED_COMMANDS_DIR,
+ _sleep_fn=time.sleep,
+ _prepare_fn=_PrepareRestrictedCmd,
+ _runcmd_fn=utils.RunCmd,
+ _enabled=constants.ENABLE_RESTRICTED_COMMANDS):
+ """Executes a restricted command after performing strict tests.
+
+ @type cmd: string
+ @param cmd: Command name
+ @rtype: string
+ @return: Command output
+ @raise RPCFail: In case of an error
+
+ """
+ logging.info("Preparing to run restricted command '%s'", cmd)
+
+ if not _enabled:
+ _Fail("Restricted commands disabled at configure time")
+
+ lock = None
+ try:
+ cmdresult = None
+ try:
+ lock = utils.FileLock.Open(_lock_file)
+ lock.Exclusive(blocking=True, timeout=_lock_timeout)
+
+ (status, value) = _prepare_fn(_path, cmd)
+
+ if status:
+ cmdresult = _runcmd_fn([value], env={}, reset_env=True,
+ postfork_fn=lambda _: lock.Unlock())
+ else:
+ logging.error(value)
+ except Exception: # pylint: disable=W0703
+ # Keep original error in log
+ logging.exception("Caught exception")
+
+ if cmdresult is None:
+ logging.info("Sleeping for %0.1f seconds before returning",
+ _RCMD_INVALID_DELAY)
+ _sleep_fn(_RCMD_INVALID_DELAY)
+
+ # Do not include original error message in returned error
+ _Fail("Executing command '%s' failed" % cmd)
+ elif cmdresult.failed or cmdresult.fail_reason:
+ _Fail("Restricted command '%s' failed: %s; output: %s",
+ cmd, cmdresult.fail_reason, cmdresult.output)
+ else:
+ return cmdresult.output
+ finally:
+ if lock is not None:
+ # Release lock at last
+ lock.Close()
+ lock = None
+
+
+def SetWatcherPause(until, _filename=pathutils.WATCHER_PAUSEFILE):
+ """Creates or removes the watcher pause file.
+
+ @type until: None or number
+ @param until: Unix timestamp saying until when the watcher shouldn't run
+
+ """
+ if until is None:
+ logging.info("Received request to no longer pause watcher")
+ utils.RemoveFile(_filename)
+ else:
+ logging.info("Received request to pause watcher until %s", until)
+
+ if not ht.TNumber(until):
+ _Fail("Duration must be numeric")
+
+ utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
+
+
class HooksRunner(object):
"""Hook runner.