import os.path
import shutil
import time
-import tempfile
import stat
import errno
import re
import subprocess
+import random
from ganeti import logger
from ganeti import errors
from ganeti import ssconf
+def _GetSshRunner():
+ return ssh.SshRunner()
+
+
def StartMaster():
"""Activate local node as master node.
def StopMaster():
"""Deactivate this node as master.
- This does two things:
- - run the master stop script
- - remove link to master cron script.
+ This runs the master stop script.
"""
result = utils.RunCmd([constants.MASTER_SCRIPT, "-d", "stop"])
if os.path.isfile(full_name) and not os.path.islink(full_name):
utils.RemoveFile(full_name)
-
try:
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
except errors.OpExecError, err:
if 'nodelist' in what:
result['nodelist'] = {}
+ random.shuffle(what['nodelist'])
for node in what['nodelist']:
- success, message = ssh.VerifyNodeHostname(node)
+ success, message = _GetSshRunner().VerifyNodeHostname(node)
if not success:
result['nodelist'][node] = message
return result
"""Compute list of logical volumes and their size.
Returns:
- dictionary of all partions (key) with their size:
- test1: 20.06MiB
+ dictionary of all partions (key) with their size (in MiB), inactive
+ and online status:
+ {'test1': ('20.06', True, True)}
"""
- result = utils.RunCmd(["lvs", "--noheadings", "--units=m",
- "-oname,size", vg_name])
+ lvs = {}
+ sep = '|'
+ result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
+ "--separator=%s" % sep,
+ "-olv_name,lv_size,lv_attr", vg_name])
if result.failed:
logger.Error("Failed to list logical volumes, lvs output: %s" %
result.output)
- return {}
+ return result.output
- lvlist = [line.split() for line in result.output.splitlines()]
- return dict(lvlist)
+ for line in result.stdout.splitlines():
+ line = line.strip().rstrip(sep)
+ name, size, attr = line.split(sep)
+ if len(attr) != 6:
+ attr = '------'
+ inactive = attr[4] == '-'
+ online = attr[5] == 'o'
+ lvs[name] = (size, inactive, online)
+
+ return lvs
def ListVolumeGroups():
'vg': line[3].strip(),
}
- return [map_line(line.split('|')) for line in result.output.splitlines()]
+ return [map_line(line.split('|')) for line in result.stdout.splitlines()]
def BridgesExist(bridges_list):
logfile)
result = utils.RunCmd(command)
-
if result.failed:
- logger.Error("os create command '%s' returned error: %s"
+ logger.Error("os create command '%s' returned error: %s, logfile: %s,"
" output: %s" %
- (command, result.fail_reason, result.output))
+ (command, result.fail_reason, logfile, result.output))
return False
return True
vg_free is the free size of the volume group in MiB
pv_count are the number of physical disks in that vg
+ If an error occurs during gathering of data, we return the same dict
+ with keys all set to None.
+
"""
+ retdic = dict.fromkeys(["vg_size", "vg_free", "pv_count"])
+
retval = utils.RunCmd(["vgs", "-ovg_size,vg_free,pv_count", "--noheadings",
"--nosuffix", "--units=m", "--separator=:", vg_name])
if retval.failed:
errmsg = "volume group %s not present" % vg_name
logger.Error(errmsg)
- raise errors.LVMError(errmsg)
- valarr = retval.stdout.strip().split(':')
- retdic = {
- "vg_size": int(round(float(valarr[0]), 0)),
- "vg_free": int(round(float(valarr[1]), 0)),
- "pv_count": int(valarr[2]),
- }
+ return retdic
+ valarr = retval.stdout.strip().rstrip(':').split(':')
+ if len(valarr) == 3:
+ try:
+ retdic = {
+ "vg_size": int(round(float(valarr[0]), 0)),
+ "vg_free": int(round(float(valarr[1]), 0)),
+ "pv_count": int(valarr[2]),
+ }
+ except ValueError, err:
+ logger.Error("Fail to parse vgs output: %s" % str(err))
+ else:
+ logger.Error("vgs output has the wrong number of fields (expected"
+ " three): %s" % str(valarr))
return retdic
return True
-def CreateBlockDevice(disk, size, on_primary, info):
+def CreateBlockDevice(disk, size, owner, on_primary, info):
"""Creates a block device for an instance.
Args:
- bdev: a ganeti.objects.Disk object
- size: the size of the physical underlying devices
- do_open: if the device should be `Assemble()`-d and
- `Open()`-ed after creation
+ disk: a ganeti.objects.Disk object
+ size: the size of the physical underlying device
+ owner: a string with the name of the instance
+ on_primary: a boolean indicating if it is the primary node or not
+ info: string that will be sent to the physical device creation
Returns:
the new unique_id of the device (this can sometime be
clist = []
if disk.children:
for child in disk.children:
- crdev = _RecursiveAssembleBD(child, on_primary)
+ crdev = _RecursiveAssembleBD(child, owner, on_primary)
if on_primary or disk.AssembleOnSecondary():
# we need the children open in case the device itself has to
# be assembled
crdev.Open()
- else:
- crdev.Close()
clist.append(crdev)
try:
device = bdev.FindDevice(disk.dev_type, disk.physical_id, clist)
raise ValueError("Can't create child device for %s, %s" %
(disk, size))
if on_primary or disk.AssembleOnSecondary():
- device.Assemble()
+ if not device.Assemble():
+ errorstring = "Can't assemble device after creation"
+ logger.Error(errorstring)
+ raise errors.BlockDeviceError("%s, very unusual event - check the node"
+ " daemon logs" % errorstring)
device.SetSyncSpeed(constants.SYNC_SPEED)
if on_primary or disk.OpenOnSecondary():
device.Open(force=True)
+ DevCacheManager.UpdateCache(device.dev_path, owner,
+ on_primary, disk.iv_name)
device.SetInfo(info)
logger.Info("Can't attach to device %s in remove" % disk)
rdev = None
if rdev is not None:
+ r_path = rdev.dev_path
result = rdev.Remove()
+ if result:
+ DevCacheManager.RemoveCache(r_path)
else:
result = True
if disk.children:
return result
-def _RecursiveAssembleBD(disk, as_primary):
+def _RecursiveAssembleBD(disk, owner, as_primary):
"""Activate a block device for an instance.
This is run on the primary and secondary nodes for an instance.
"""
children = []
if disk.children:
+ mcn = disk.ChildrenNeeded()
+ if mcn == -1:
+ mcn = 0 # max number of Nones allowed
+ else:
+ mcn = len(disk.children) - mcn # max number of Nones
for chld_disk in disk.children:
- children.append(_RecursiveAssembleBD(chld_disk, as_primary))
+ try:
+ cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
+ except errors.BlockDeviceError, err:
+ if children.count(None) >= mcn:
+ raise
+ cdev = None
+ logger.Debug("Error in child activation: %s" % str(err))
+ children.append(cdev)
if as_primary or disk.AssembleOnSecondary():
r_dev = bdev.AttachOrAssemble(disk.dev_type, disk.physical_id, children)
result = r_dev
if as_primary or disk.OpenOnSecondary():
r_dev.Open()
- else:
- r_dev.Close()
+ DevCacheManager.UpdateCache(r_dev.dev_path, owner,
+ as_primary, disk.iv_name)
+
else:
result = True
return result
-def AssembleBlockDevice(disk, as_primary):
+def AssembleBlockDevice(disk, owner, as_primary):
"""Activate a block device for an instance.
This is a wrapper over _RecursiveAssembleBD.
True for secondary nodes
"""
- result = _RecursiveAssembleBD(disk, as_primary)
+ result = _RecursiveAssembleBD(disk, owner, as_primary)
if isinstance(result, bdev.BlockDev):
result = result.dev_path
return result
"""
r_dev = _RecursiveFindBD(disk)
if r_dev is not None:
+ r_path = r_dev.dev_path
result = r_dev.Shutdown()
+ if result:
+ DevCacheManager.RemoveCache(r_path)
else:
result = True
if disk.children:
return False
new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
if new_bdevs.count(None) > 0:
- logger.Error("Can't find new device(s) to add")
+ logger.Error("Can't find new device(s) to add: %s:%s" %
+ (new_bdevs, new_cdevs))
return False
parent_bdev.AddChildren(new_bdevs)
return True
"""
parent_bdev = _RecursiveFindBD(parent_cdev)
if parent_bdev is None:
+ logger.Error("Can't find parent in remove children: %s" % parent_cdev)
return False
- new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
- if new_bdevs.count(None) > 0:
- return False
- parent_bdev.RemoveChildren(new_bdevs)
+ devs = []
+ for disk in new_cdevs:
+ rpath = disk.StaticDevPath()
+ if rpath is None:
+ bd = _RecursiveFindBD(disk)
+ if bd is None:
+ logger.Error("Can't find dynamic device %s while removing children" %
+ disk)
+ return False
+ else:
+ devs.append(bd.dev_path)
+ else:
+ devs.append(rpath)
+ parent_bdev.RemoveChildren(devs)
return True
rbd = _RecursiveFindBD(disk)
if rbd is None:
return rbd
- sync_p, est_t, is_degr = rbd.GetSyncStatus()
- return rbd.dev_path, rbd.major, rbd.minor, sync_p, est_t, is_degr
+ return (rbd.dev_path, rbd.major, rbd.minor) + rbd.GetSyncStatus()
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
file_name)
return False
- allowed_files = [constants.CLUSTER_CONF_FILE, "/etc/hosts",
- constants.SSH_KNOWN_HOSTS_FILE]
+ allowed_files = [
+ constants.CLUSTER_CONF_FILE,
+ constants.ETC_HOSTS,
+ constants.SSH_KNOWN_HOSTS_FILE,
+ ]
allowed_files.extend(ssconf.SimpleStore().GetFileList())
if file_name not in allowed_files:
logger.Error("Filename passed to UploadFile not in allowed"
" upload targets: '%s'" % file_name)
return False
- dir_name, small_name = os.path.split(file_name)
- fd, new_name = tempfile.mkstemp('.new', small_name, dir_name)
- # here we need to make sure we remove the temp file, if any error
- # leaves it in place
- try:
- os.chown(new_name, uid, gid)
- os.chmod(new_name, mode)
- os.write(fd, data)
- os.fsync(fd)
- os.utime(new_name, (atime, mtime))
- os.rename(new_name, file_name)
- finally:
- os.close(fd)
- utils.RemoveFile(new_name)
+ utils.WriteFile(file_name, data=data, mode=mode, uid=uid, gid=gid,
+ atime=atime, mtime=mtime)
return True
detail = str(err)
return detail
-def _OSSearch(name, search_path=None):
- """Search for OSes with the given name in the search_path.
-
- Args:
- name: The name of the OS to look for
- search_path: List of dirs to search (defaults to constants.OS_SEARCH_PATH)
-
- Returns:
- The base_dir the OS resides in
-
- """
-
- if search_path is None:
- search_path = constants.OS_SEARCH_PATH
-
- for dir in search_path:
- t_os_dir = os.path.sep.join([dir, name])
- if os.path.isdir(t_os_dir):
- return dir
-
- return None
def _OSOndiskVersion(name, os_dir):
"""Compute and return the API version of a given OS.
case when this is not a valid OS name.
"""
-
api_file = os.path.sep.join([os_dir, "ganeti_api_version"])
try:
def DiagnoseOS(top_dirs=None):
"""Compute the validity for all OSes.
- For each name in all the given top directories (if not given defaults i
- to constants.OS_SEARCH_PATH it will return an object. If this is a valid
- os, the object will be an instance of the object.OS class. If not,
- it will be an instance of errors.InvalidOS and this signifies that
- this name does not correspond to a valid OS.
+ Returns an OS object for each name in all the given top directories
+ (if not given defaults to constants.OS_SEARCH_PATH)
Returns:
- list of objects
+ list of OS objects
"""
if top_dirs is None:
top_dirs = constants.OS_SEARCH_PATH
result = []
- for dir in top_dirs:
- if os.path.isdir(dir):
+ for dir_name in top_dirs:
+ if os.path.isdir(dir_name):
try:
- f_names = utils.ListVisibleFiles(dir)
+ f_names = utils.ListVisibleFiles(dir_name)
except EnvironmentError, err:
- logger.Error("Can't list the OS directory %s: %s" % (dir,str(err)))
+ logger.Error("Can't list the OS directory %s: %s" %
+ (dir_name, str(err)))
break
for name in f_names:
try:
- os_inst = OSFromDisk(name, base_dir=dir)
+ os_inst = OSFromDisk(name, base_dir=dir_name)
result.append(os_inst)
except errors.InvalidOS, err:
- result.append(err)
+ result.append(objects.OS.FromInvalidOS(err))
return result
"""
if base_dir is None:
- base_dir = _OSSearch(name)
-
- if base_dir is None:
- raise errors.InvalidOS(name, None, "OS dir not found in search path")
+ os_dir = utils.FindFile(name, constants.OS_SEARCH_PATH, os.path.isdir)
+ if os_dir is None:
+ raise errors.InvalidOS(name, None, "OS dir not found in search path")
+ else:
+ os_dir = os.path.sep.join([base_dir, name])
- os_dir = os.path.sep.join([base_dir, name])
api_version = _OSOndiskVersion(name, os_dir)
if api_version != constants.OS_API_VERSION:
script)
- return objects.OS(name=name, path=os_dir,
+ return objects.OS(name=name, path=os_dir, status=constants.OS_VALID_STATUS,
create_script=os_scripts['create'],
export_script=os_scripts['export'],
import_script=os_scripts['import'],
return None
else:
raise errors.ProgrammerError("Cannot snapshot non-lvm block device"
- "'%s' of type '%s'" %
+ " '%s' of type '%s'" %
(disk.unique_id, disk.dev_type))
destcmd = utils.BuildShellCmd("mkdir -p %s && cat > %s/%s",
destdir, destdir, destfile)
- remotecmd = ssh.BuildSSHCmd(dest_node, constants.GANETI_RUNAS, destcmd)
-
-
+ remotecmd = _GetSshRunner().BuildCmd(dest_node, constants.GANETI_RUNAS,
+ destcmd)
# all commands have been checked, so we're safe to combine them
command = '|'.join([expcmd, comprcmd, utils.ShellQuoteArgs(remotecmd)])
config.set(constants.INISECT_INS, 'memory', '%d' % instance.memory)
config.set(constants.INISECT_INS, 'vcpus', '%d' % instance.vcpus)
config.set(constants.INISECT_INS, 'disk_template', instance.disk_template)
+
+ nic_count = 0
for nic_count, nic in enumerate(instance.nics):
config.set(constants.INISECT_INS, 'nic%d_mac' %
nic_count, '%s' % nic.mac)
config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip)
+ config.set(constants.INISECT_INS, 'nic%d_bridge' % nic_count, '%s' % nic.bridge)
# TODO: redundant: on load can read nics until it doesn't exist
config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_count)
+ disk_count = 0
for disk_count, disk in enumerate(snap_disks):
config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count,
('%s' % disk.iv_name))
os.mkdir(constants.LOG_OS_DIR, 0750)
destcmd = utils.BuildShellCmd('cat %s', src_image)
- remotecmd = ssh.BuildSSHCmd(src_node, constants.GANETI_RUNAS, destcmd)
+ remotecmd = _GetSshRunner().BuildCmd(src_node, constants.GANETI_RUNAS,
+ destcmd)
comprcmd = "gunzip"
impcmd = utils.BuildShellCmd("(cd %s; %s -i %s -b %s -s %s &>%s)",
return True
+def RenameBlockDevices(devlist):
+ """Rename a list of block devices.
+
+ The devlist argument is a list of tuples (disk, new_logical,
+ new_physical). The return value will be a combined boolean result
+ (True only if all renames succeeded).
+
+ """
+ result = True
+ for disk, unique_id in devlist:
+ dev = _RecursiveFindBD(disk)
+ if dev is None:
+ result = False
+ continue
+ try:
+ old_rpath = dev.dev_path
+ dev.Rename(unique_id)
+ new_rpath = dev.dev_path
+ if old_rpath != new_rpath:
+ DevCacheManager.RemoveCache(old_rpath)
+ # FIXME: we should add the new cache information here, like:
+ # DevCacheManager.UpdateCache(new_rpath, owner, ...)
+ # but we don't have the owner here - maybe parse from existing
+ # cache? for now, we only lose lvm data when we rename, which
+ # is less critical than DRBD or MD
+ except errors.BlockDeviceError, err:
+ logger.Error("Can't rename device '%s' to '%s': %s" %
+ (dev, unique_id, err))
+ result = False
+ return result
+
+
+def _TransformFileStorageDir(file_storage_dir):
+ """Checks whether given file_storage_dir is valid.
+
+ Checks wheter the given file_storage_dir is within the cluster-wide
+ default file_storage_dir stored in SimpleStore. Only paths under that
+ directory are allowed.
+
+ Args:
+ file_storage_dir: string with path
+
+ Returns:
+ normalized file_storage_dir (string) if valid, None otherwise
+
+ """
+ file_storage_dir = os.path.normpath(file_storage_dir)
+ base_file_storage_dir = ssconf.SimpleStore().GetFileStorageDir()
+ if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) ==
+ base_file_storage_dir):
+ logger.Error("file storage directory '%s' is not under base file"
+ " storage directory '%s'" %
+ (file_storage_dir, base_file_storage_dir))
+ return None
+ return file_storage_dir
+
+
+def CreateFileStorageDir(file_storage_dir):
+ """Create file storage directory.
+
+ Args:
+ file_storage_dir: string containing the path
+
+ Returns:
+ tuple with first element a boolean indicating wheter dir
+ creation was successful or not
+
+ """
+ file_storage_dir = _TransformFileStorageDir(file_storage_dir)
+ result = True,
+ if not file_storage_dir:
+ result = False,
+ else:
+ if os.path.exists(file_storage_dir):
+ if not os.path.isdir(file_storage_dir):
+ logger.Error("'%s' is not a directory" % file_storage_dir)
+ result = False,
+ else:
+ try:
+ os.makedirs(file_storage_dir, 0750)
+ except OSError, err:
+ logger.Error("Cannot create file storage directory '%s': %s" %
+ (file_storage_dir, err))
+ result = False,
+ return result
+
+
+def RemoveFileStorageDir(file_storage_dir):
+ """Remove file storage directory.
+
+ Remove it only if it's empty. If not log an error and return.
+
+ Args:
+ file_storage_dir: string containing the path
+
+ Returns:
+ tuple with first element a boolean indicating wheter dir
+ removal was successful or not
+
+ """
+ file_storage_dir = _TransformFileStorageDir(file_storage_dir)
+ result = True,
+ if not file_storage_dir:
+ result = False,
+ else:
+ if os.path.exists(file_storage_dir):
+ if not os.path.isdir(file_storage_dir):
+ logger.Error("'%s' is not a directory" % file_storage_dir)
+ result = False,
+ # deletes dir only if empty, otherwise we want to return False
+ try:
+ os.rmdir(file_storage_dir)
+ except OSError, err:
+ logger.Error("Cannot remove file storage directory '%s': %s" %
+ (file_storage_dir, err))
+ result = False,
+ return result
+
+
+def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
+ """Rename the file storage directory.
+
+ Args:
+ old_file_storage_dir: string containing the old path
+ new_file_storage_dir: string containing the new path
+
+ Returns:
+ tuple with first element a boolean indicating wheter dir
+ rename was successful or not
+
+ """
+ old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
+ new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
+ result = True,
+ if not old_file_storage_dir or not new_file_storage_dir:
+ result = False,
+ else:
+ if not os.path.exists(new_file_storage_dir):
+ if os.path.isdir(old_file_storage_dir):
+ try:
+ os.rename(old_file_storage_dir, new_file_storage_dir)
+ except OSError, err:
+ logger.Error("Cannot rename '%s' to '%s': %s"
+ % (old_file_storage_dir, new_file_storage_dir, err))
+ result = False,
+ else:
+ logger.Error("'%s' is not a directory" % old_file_storage_dir)
+ result = False,
+ else:
+ if os.path.exists(old_file_storage_dir):
+ logger.Error("Cannot rename '%s' to '%s'. Both locations exist." %
+ old_file_storage_dir, new_file_storage_dir)
+ result = False,
+ return result
+
+
class HooksRunner(object):
"""Hook runner.
Args:
- hooks_base_dir: if not None, this overrides the
constants.HOOKS_BASE_DIR (useful for unittests)
- - logs_base_dir: if not None, this overrides the
- constants.LOG_HOOKS_DIR (useful for unittests)
- - logging: enable or disable logging of script output
"""
if hooks_base_dir is None:
"""Exec one hook script.
Args:
- - phase: the phase
- script: the full path to the script
- env: the environment with which to exec the script
fdstdin = open("/dev/null", "r")
child = subprocess.Popen([script], stdin=fdstdin, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True,
- shell=False, cwd="/",env=env)
+ shell=False, cwd="/", env=env)
output = ""
try:
output = child.stdout.read(4096)
rr.append(("%s/%s" % (subdir, relname), rrval, output))
return rr
+
+
+class IAllocatorRunner(object):
+ """IAllocator runner.
+
+ This class is instantiated on the node side (ganeti-noded) and not on
+ the master side.
+
+ """
+ def Run(self, name, idata):
+ """Run an iallocator script.
+
+ Return value: tuple of:
+ - run status (one of the IARUN_ constants)
+ - stdout
+ - stderr
+ - fail reason (as from utils.RunResult)
+
+ """
+ alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
+ os.path.isfile)
+ if alloc_script is None:
+ return (constants.IARUN_NOTFOUND, None, None, None)
+
+ fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
+ try:
+ os.write(fd, idata)
+ os.close(fd)
+ result = utils.RunCmd([alloc_script, fin_name])
+ if result.failed:
+ return (constants.IARUN_FAILURE, result.stdout, result.stderr,
+ result.fail_reason)
+ finally:
+ os.unlink(fin_name)
+
+ return (constants.IARUN_SUCCESS, result.stdout, result.stderr, None)
+
+
+class DevCacheManager(object):
+ """Simple class for managing a cache of block device information.
+
+ """
+ _DEV_PREFIX = "/dev/"
+ _ROOT_DIR = constants.BDEV_CACHE_DIR
+
+ @classmethod
+ def _ConvertPath(cls, dev_path):
+ """Converts a /dev/name path to the cache file name.
+
+ This replaces slashes with underscores and strips the /dev
+ prefix. It then returns the full path to the cache file
+
+ """
+ if dev_path.startswith(cls._DEV_PREFIX):
+ dev_path = dev_path[len(cls._DEV_PREFIX):]
+ dev_path = dev_path.replace("/", "_")
+ fpath = "%s/bdev_%s" % (cls._ROOT_DIR, dev_path)
+ return fpath
+
+ @classmethod
+ def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
+ """Updates the cache information for a given device.
+
+ """
+ if dev_path is None:
+ logger.Error("DevCacheManager.UpdateCache got a None dev_path")
+ return
+ fpath = cls._ConvertPath(dev_path)
+ if on_primary:
+ state = "primary"
+ else:
+ state = "secondary"
+ if iv_name is None:
+ iv_name = "not_visible"
+ fdata = "%s %s %s\n" % (str(owner), state, iv_name)
+ try:
+ utils.WriteFile(fpath, data=fdata)
+ except EnvironmentError, err:
+ logger.Error("Can't update bdev cache for %s, error %s" %
+ (dev_path, str(err)))
+
+ @classmethod
+ def RemoveCache(cls, dev_path):
+ """Remove data for a dev_path.
+
+ """
+ if dev_path is None:
+ logger.Error("DevCacheManager.RemoveCache got a None dev_path")
+ return
+ fpath = cls._ConvertPath(dev_path)
+ try:
+ utils.RemoveFile(fpath)
+ except EnvironmentError, err:
+ logger.Error("Can't update bdev cache for %s, error %s" %
+ (dev_path, str(err)))