4 # Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Block device abstraction"""
31 from ganeti import utils
32 from ganeti import errors
33 from ganeti import constants
34 from ganeti import objects
35 from ganeti import compat
36 from ganeti import pathutils
37 from ganeti import serializer
38 from ganeti.storage import drbd
39 from ganeti.storage import base
42 class RbdShowmappedJsonError(Exception):
43 """`rbd showmmapped' JSON formatting error Exception class.
49 def _CheckResult(result):
50 """Throws an error if the given result is a failed one.
52 @param result: result from RunCmd
56 base.ThrowError("Command: %s error: %s - %s",
57 result.cmd, result.fail_reason, result.output)
60 def _GetForbiddenFileStoragePaths():
61 """Builds a list of path prefixes which shouldn't be used for file storage.
76 for prefix in ["", "/usr", "/usr/local"]:
77 paths.update(map(lambda s: "%s/%s" % (prefix, s),
78 ["bin", "lib", "lib32", "lib64", "sbin"]))
80 return compat.UniqueFrozenset(map(os.path.normpath, paths))
83 def _ComputeWrongFileStoragePaths(paths,
84 _forbidden=_GetForbiddenFileStoragePaths()):
85 """Cross-checks a list of paths for prefixes considered bad.
87 Some paths, e.g. "/bin", should not be used for file storage.
90 @param paths: List of paths to be checked
92 @return: Sorted list of paths for which the user should be warned
96 return (not os.path.isabs(path) or
98 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
100 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
103 def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
104 """Returns a list of file storage paths whose prefix is considered bad.
106 See L{_ComputeWrongFileStoragePaths}.
109 return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
112 def _CheckFileStoragePath(path, allowed):
113 """Checks if a path is in a list of allowed paths for file storage.
116 @param path: Path to check
118 @param allowed: List of allowed paths
119 @raise errors.FileStoragePathError: If the path is not allowed
122 if not os.path.isabs(path):
123 raise errors.FileStoragePathError("File storage path must be absolute,"
127 if not os.path.isabs(i):
128 logging.info("Ignoring relative path '%s' for file storage", i)
131 if utils.IsBelowDir(i, path):
134 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
138 def _LoadAllowedFileStoragePaths(filename):
139 """Loads file containing allowed file storage paths.
142 @return: List of allowed paths (can be an empty list)
146 contents = utils.ReadFile(filename)
147 except EnvironmentError:
150 return utils.FilterEmptyLinesAndComments(contents)
153 def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
154 """Checks if a path is allowed for file storage.
157 @param path: Path to check
158 @raise errors.FileStoragePathError: If the path is not allowed
161 allowed = _LoadAllowedFileStoragePaths(_filename)
163 if _ComputeWrongFileStoragePaths([path]):
164 raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
167 _CheckFileStoragePath(path, allowed)
170 class LogicalVolume(base.BlockDev):
171 """Logical Volume block device.
174 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
175 _PARSE_PV_DEV_RE = re.compile("^([^ ()]+)\([0-9]+\)$")
176 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
177 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
179 def __init__(self, unique_id, children, size, params):
180 """Attaches to a LV device.
182 The unique_id is a tuple (vg_name, lv_name)
185 super(LogicalVolume, self).__init__(unique_id, children, size, params)
186 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
187 raise ValueError("Invalid configuration data %s" % str(unique_id))
188 self._vg_name, self._lv_name = unique_id
189 self._ValidateName(self._vg_name)
190 self._ValidateName(self._lv_name)
191 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
192 self._degraded = True
193 self.major = self.minor = self.pe_size = self.stripe_count = None
198 def _GetStdPvSize(pvs_info):
199 """Return the the standard PV size (used with exclusive storage).
201 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
206 assert len(pvs_info) > 0
207 smallest = min([pv.size for pv in pvs_info])
208 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
211 def _ComputeNumPvs(size, pvs_info):
212 """Compute the number of PVs needed for an LV (with exclusive storage).
215 @param size: LV size in MiB
216 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
218 @return: number of PVs needed
220 assert len(pvs_info) > 0
221 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
222 return int(math.ceil(float(size) / pv_size))
225 def _GetEmptyPvNames(pvs_info, max_pvs=None):
226 """Return a list of empty PVs, by name.
229 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
230 if max_pvs is not None:
231 empty_pvs = empty_pvs[:max_pvs]
232 return map((lambda pv: pv.name), empty_pvs)
235 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
236 """Create a new logical volume.
239 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
240 raise errors.ProgrammerError("Invalid configuration data %s" %
242 vg_name, lv_name = unique_id
243 cls._ValidateName(vg_name)
244 cls._ValidateName(lv_name)
245 pvs_info = cls.GetPVInfo([vg_name])
248 msg = "No (empty) PVs found"
250 msg = "Can't compute PV info for vg %s" % vg_name
252 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
254 pvlist = [pv.name for pv in pvs_info]
255 if compat.any(":" in v for v in pvlist):
256 base.ThrowError("Some of your PVs have the invalid character ':' in their"
257 " name, this is not supported - please filter them out"
258 " in lvm.conf using either 'filter' or 'preferred_names'")
260 current_pvs = len(pvlist)
261 desired_stripes = params[constants.LDP_STRIPES]
262 stripes = min(current_pvs, desired_stripes)
265 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
269 req_pvs = cls._ComputeNumPvs(size, pvs_info)
271 if spindles < req_pvs:
272 base.ThrowError("Requested number of spindles (%s) is not enough for"
273 " a disk of %d MB (at least %d spindles needed)",
274 spindles, size, req_pvs)
277 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
278 current_pvs = len(pvlist)
279 if current_pvs < req_pvs:
280 base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
281 " MB: %d available, %d needed",
282 size, current_pvs, req_pvs)
283 assert current_pvs == len(pvlist)
284 if stripes > current_pvs:
285 # No warning issued for this, as it's no surprise
286 stripes = current_pvs
289 if stripes < desired_stripes:
290 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
291 " available.", desired_stripes, vg_name, current_pvs)
292 free_size = sum([pv.free for pv in pvs_info])
293 # The size constraint should have been checked from the master before
294 # calling the create function.
296 base.ThrowError("Not enough free space: required %s,"
297 " available %s", size, free_size)
299 # If the free space is not well distributed, we won't be able to
300 # create an optimally-striped volume; in that case, we want to try
301 # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
303 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
304 for stripes_arg in range(stripes, 0, -1):
305 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
306 if not result.failed:
309 base.ThrowError("LV create failed (%s): %s",
310 result.fail_reason, result.output)
311 return LogicalVolume(unique_id, children, size, params)
314 def _GetVolumeInfo(lvm_cmd, fields):
315 """Returns LVM Volume infos using lvm_cmd
317 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
318 @param fields: Fields to return
319 @return: A list of dicts each with the parsed fields
323 raise errors.ProgrammerError("No fields specified")
326 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
327 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
329 result = utils.RunCmd(cmd)
331 raise errors.CommandError("Can't get the volume information: %s - %s" %
332 (result.fail_reason, result.output))
335 for line in result.stdout.splitlines():
336 splitted_fields = line.strip().split(sep)
338 if len(fields) != len(splitted_fields):
339 raise errors.CommandError("Can't parse %s output: line '%s'" %
342 data.append(splitted_fields)
347 def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
348 """Get the free space info for PVs in a volume group.
350 @param vg_names: list of volume group names, if empty all will be returned
351 @param filter_allocatable: whether to skip over unallocatable PVs
352 @param include_lvs: whether to include a list of LVs hosted on each PV
355 @return: list of objects.LvmPvInfo objects
358 # We request "lv_name" field only if we care about LVs, so we don't get
359 # a long list of entries with many duplicates unless we really have to.
360 # The duplicate "pv_name" field will be ignored.
366 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
367 "pv_attr", "pv_size", lvfield])
368 except errors.GenericError, err:
369 logging.error("Can't get PV information: %s", err)
372 # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
373 # pair. We sort entries by PV name and then LV name, so it's easy to weed
376 info.sort(key=(lambda i: (i[0], i[5])))
379 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
380 # (possibly) skip over pvs which are not allocatable
381 if filter_allocatable and pv_attr[0] != "a":
383 # (possibly) skip over pvs which are not in the right volume group(s)
384 if vg_names and vg_name not in vg_names:
386 # Beware of duplicates (check before inserting)
387 if lastpvi and lastpvi.name == pv_name:
388 if include_lvs and lv_name:
389 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
390 lastpvi.lv_list.append(lv_name)
392 if include_lvs and lv_name:
396 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
397 size=float(pv_size), free=float(pv_free),
398 attributes=pv_attr, lv_list=lvl)
404 def _GetExclusiveStorageVgFree(cls, vg_name):
405 """Return the free disk space in the given VG, in exclusive storage mode.
407 @type vg_name: string
408 @param vg_name: VG name
410 @return: free space in MiB
412 pvs_info = cls.GetPVInfo([vg_name])
415 pv_size = cls._GetStdPvSize(pvs_info)
416 num_pvs = len(cls._GetEmptyPvNames(pvs_info))
417 return pv_size * num_pvs
420 def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
421 """Get the free space info for specific VGs.
423 @param vg_names: list of volume group names, if empty all will be returned
424 @param excl_stor: whether exclusive_storage is enabled
425 @param filter_readonly: whether to skip over readonly VGs
428 @return: list of tuples (free_space, total_size, name) with free_space in
433 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
435 except errors.GenericError, err:
436 logging.error("Can't get VG information: %s", err)
440 for vg_name, vg_free, vg_attr, vg_size in info:
441 # (possibly) skip over vgs which are not writable
442 if filter_readonly and vg_attr[0] == "r":
444 # (possibly) skip over vgs which are not in the right volume group(s)
445 if vg_names and vg_name not in vg_names:
447 # Exclusive storage needs a different concept of free space
449 es_free = cls._GetExclusiveStorageVgFree(vg_name)
450 assert es_free <= vg_free
452 data.append((float(vg_free), float(vg_size), vg_name))
457 def _ValidateName(cls, name):
458 """Validates that a given name is valid as VG or LV name.
460 The list of valid characters and restricted names is taken out of
461 the lvm(8) manpage, with the simplification that we enforce both
462 VG and LV restrictions on the names.
465 if (not cls._VALID_NAME_RE.match(name) or
466 name in cls._INVALID_NAMES or
467 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
468 base.ThrowError("Invalid LVM name '%s'", name)
471 """Remove this logical volume.
474 if not self.minor and not self.Attach():
475 # the LV does not exist
477 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
478 (self._vg_name, self._lv_name)])
480 base.ThrowError("Can't lvremove: %s - %s",
481 result.fail_reason, result.output)
483 def Rename(self, new_id):
484 """Rename this logical volume.
487 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
488 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
489 new_vg, new_name = new_id
490 if new_vg != self._vg_name:
491 raise errors.ProgrammerError("Can't move a logical volume across"
492 " volume groups (from %s to to %s)" %
493 (self._vg_name, new_vg))
494 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
496 base.ThrowError("Failed to rename the logical volume: %s", result.output)
497 self._lv_name = new_name
498 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
501 def _ParseLvInfoLine(cls, line, sep):
502 """Parse one line of the lvs output used in L{_GetLvInfo}.
505 elems = line.strip().rstrip(sep).split(sep)
507 base.ThrowError("Can't parse LVS output, len(%s) != 6", str(elems))
509 (status, major, minor, pe_size, stripes, pvs) = elems
511 base.ThrowError("lvs lv_attr is not at least 6 characters (%s)", status)
516 except (TypeError, ValueError), err:
517 base.ThrowError("lvs major/minor cannot be parsed: %s", str(err))
520 pe_size = int(float(pe_size))
521 except (TypeError, ValueError), err:
522 base.ThrowError("Can't parse vg extent size: %s", err)
525 stripes = int(stripes)
526 except (TypeError, ValueError), err:
527 base.ThrowError("Can't parse the number of stripes: %s", err)
530 for pv in pvs.split(","):
531 m = re.match(cls._PARSE_PV_DEV_RE, pv)
533 base.ThrowError("Can't parse this device list: %s", pvs)
534 pv_names.append(m.group(1))
535 assert len(pv_names) > 0
537 return (status, major, minor, pe_size, stripes, pv_names)
540 def _GetLvInfo(cls, dev_path, _run_cmd=utils.RunCmd):
541 """Get info about the given existing LV to be used.
545 result = _run_cmd(["lvs", "--noheadings", "--separator=%s" % sep,
546 "--units=k", "--nosuffix",
547 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
548 "vg_extent_size,stripes,devices", dev_path])
550 base.ThrowError("Can't find LV %s: %s, %s",
551 dev_path, result.fail_reason, result.output)
552 # the output can (and will) have multiple lines for multi-segment
553 # LVs, as the 'stripes' parameter is a segment one, so we take
554 # only the last entry, which is the one we're interested in; note
555 # that with LVM2 anyway the 'stripes' value must be constant
556 # across segments, so this is a no-op actually
557 out = result.stdout.splitlines()
558 if not out: # totally empty result? splitlines() returns at least
559 # one line for any non-empty string
560 base.ThrowError("Can't parse LVS output, no lines? Got '%s'", str(out))
563 (status, major, minor, pe_size, stripes, more_pvs) = \
564 cls._ParseLvInfoLine(line, sep)
565 pv_names.update(more_pvs)
566 return (status, major, minor, pe_size, stripes, pv_names)
569 """Attach to an existing LV.
571 This method will try to see if an existing and active LV exists
572 which matches our name. If so, its major/minor will be
576 self.attached = False
578 (status, major, minor, pe_size, stripes, pv_names) = \
579 self._GetLvInfo(self.dev_path)
580 except errors.BlockDeviceError:
585 self.pe_size = pe_size
586 self.stripe_count = stripes
587 self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
589 self.pv_names = pv_names
594 """Assemble the device.
596 We always run `lvchange -ay` on the LV to ensure it's active before
597 use, as there were cases when xenvg was not active after boot
598 (also possibly after disk issues).
601 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
603 base.ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
606 """Shutdown the device.
608 This is a no-op for the LV device type, as we don't deactivate the
614 def GetSyncStatus(self):
615 """Returns the sync status of the device.
617 If this device is a mirroring device, this function returns the
618 status of the mirror.
620 For logical volumes, sync_percent and estimated_time are always
621 None (no recovery in progress, as we don't handle the mirrored LV
622 case). The is_degraded parameter is the inverse of the ldisk
625 For the ldisk parameter, we check if the logical volume has the
626 'virtual' type, which means it's not backed by existing storage
627 anymore (read from it return I/O error). This happens after a
628 physical disk failure and subsequent 'vgreduce --removemissing' on
631 The status was already read in Attach, so we just return it.
633 @rtype: objects.BlockDevStatus
637 ldisk_status = constants.LDS_FAULTY
639 ldisk_status = constants.LDS_OKAY
641 return objects.BlockDevStatus(dev_path=self.dev_path,
646 is_degraded=self._degraded,
647 ldisk_status=ldisk_status)
649 def Open(self, force=False):
650 """Make the device ready for I/O.
652 This is a no-op for the LV device type.
658 """Notifies that the device will no longer be used for I/O.
660 This is a no-op for the LV device type.
665 def Snapshot(self, size):
666 """Create a snapshot copy of an lvm block device.
668 @returns: tuple (vg, lv)
671 snap_name = self._lv_name + ".snap"
673 # remove existing snapshot if found
674 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
675 base.IgnoreError(snap.Remove)
677 vg_info = self.GetVGInfo([self._vg_name], False)
679 base.ThrowError("Can't compute VG info for vg %s", self._vg_name)
680 free_size, _, _ = vg_info[0]
682 base.ThrowError("Not enough free space: required %s,"
683 " available %s", size, free_size)
685 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
686 "-n%s" % snap_name, self.dev_path]))
688 return (self._vg_name, snap_name)
690 def _RemoveOldInfo(self):
691 """Try to remove old tags from the lv.
694 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
698 raw_tags = result.stdout.strip()
700 for tag in raw_tags.split(","):
701 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
702 tag.strip(), self.dev_path]))
704 def SetInfo(self, text):
705 """Update metadata with info text.
708 base.BlockDev.SetInfo(self, text)
710 self._RemoveOldInfo()
712 # Replace invalid characters
713 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
714 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
716 # Only up to 128 characters are allowed
719 _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
721 def Grow(self, amount, dryrun, backingstore):
722 """Grow the logical volume.
727 if self.pe_size is None or self.stripe_count is None:
728 if not self.Attach():
729 base.ThrowError("Can't attach to LV during Grow()")
730 full_stripe_size = self.pe_size * self.stripe_count
733 rest = amount % full_stripe_size
735 amount += full_stripe_size - rest
736 cmd = ["lvextend", "-L", "+%dk" % amount]
739 # we try multiple algorithms since the 'best' ones might not have
740 # space available in the right place, but later ones might (since
741 # they have less constraints); also note that only recent LVM
743 for alloc_policy in "contiguous", "cling", "normal":
744 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
745 if not result.failed:
747 base.ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
749 def GetActualSpindles(self):
750 """Return the number of spindles used.
753 assert self.attached, "BlockDevice not attached in GetActualSpindles()"
754 return len(self.pv_names)
757 class FileStorage(base.BlockDev):
760 This class represents the a file storage backend device.
762 The unique_id for the file device is a (file_driver, file_path) tuple.
765 def __init__(self, unique_id, children, size, params):
766 """Initalizes a file device backend.
770 raise errors.BlockDeviceError("Invalid setup for file device")
771 super(FileStorage, self).__init__(unique_id, children, size, params)
772 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
773 raise ValueError("Invalid configuration data %s" % str(unique_id))
774 self.driver = unique_id[0]
775 self.dev_path = unique_id[1]
777 CheckFileStoragePath(self.dev_path)
782 """Assemble the device.
784 Checks whether the file device exists, raises BlockDeviceError otherwise.
787 if not os.path.exists(self.dev_path):
788 base.ThrowError("File device '%s' does not exist" % self.dev_path)
791 """Shutdown the device.
793 This is a no-op for the file type, as we don't deactivate
794 the file on shutdown.
799 def Open(self, force=False):
800 """Make the device ready for I/O.
802 This is a no-op for the file type.
808 """Notifies that the device will no longer be used for I/O.
810 This is a no-op for the file type.
816 """Remove the file backing the block device.
819 @return: True if the removal was successful
823 os.remove(self.dev_path)
825 if err.errno != errno.ENOENT:
826 base.ThrowError("Can't remove file '%s': %s", self.dev_path, err)
828 def Rename(self, new_id):
832 # TODO: implement rename for file-based storage
833 base.ThrowError("Rename is not supported for file-based storage")
835 def Grow(self, amount, dryrun, backingstore):
838 @param amount: the amount (in mebibytes) to grow with
843 # Check that the file exists
845 current_size = self.GetActualSize()
846 new_size = current_size + amount * 1024 * 1024
847 assert new_size > current_size, "Cannot Grow with a negative amount"
848 # We can't really simulate the growth
852 f = open(self.dev_path, "a+")
855 except EnvironmentError, err:
856 base.ThrowError("Error in file growth: %", str(err))
859 """Attach to an existing file.
861 Check if this file already exists.
864 @return: True if file exists
867 self.attached = os.path.exists(self.dev_path)
870 def GetActualSize(self):
871 """Return the actual disk size.
873 @note: the device needs to be active when this is called
876 assert self.attached, "BlockDevice not attached in GetActualSize()"
878 st = os.stat(self.dev_path)
881 base.ThrowError("Can't stat %s: %s", self.dev_path, err)
884 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
885 """Create a new file.
887 @param size: the size of file in MiB
889 @rtype: L{bdev.FileStorage}
890 @return: an instance of FileStorage
894 raise errors.ProgrammerError("FileStorage device requested with"
895 " exclusive_storage")
896 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
897 raise ValueError("Invalid configuration data %s" % str(unique_id))
899 dev_path = unique_id[1]
901 CheckFileStoragePath(dev_path)
904 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
905 f = os.fdopen(fd, "w")
906 f.truncate(size * 1024 * 1024)
908 except EnvironmentError, err:
909 if err.errno == errno.EEXIST:
910 base.ThrowError("File already existing: %s", dev_path)
911 base.ThrowError("Error in file creation: %", str(err))
913 return FileStorage(unique_id, children, size, params)
916 class PersistentBlockDevice(base.BlockDev):
917 """A block device with persistent node
919 May be either directly attached, or exposed through DM (e.g. dm-multipath).
920 udev helpers are probably required to give persistent, human-friendly
923 For the time being, pathnames are required to lie under /dev.
926 def __init__(self, unique_id, children, size, params):
927 """Attaches to a static block device.
929 The unique_id is a path under /dev.
932 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
934 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
935 raise ValueError("Invalid configuration data %s" % str(unique_id))
936 self.dev_path = unique_id[1]
937 if not os.path.realpath(self.dev_path).startswith("/dev/"):
938 raise ValueError("Full path '%s' lies outside /dev" %
939 os.path.realpath(self.dev_path))
940 # TODO: this is just a safety guard checking that we only deal with devices
941 # we know how to handle. In the future this will be integrated with
942 # external storage backends and possible values will probably be collected
943 # from the cluster configuration.
944 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
945 raise ValueError("Got persistent block device of invalid type: %s" %
948 self.major = self.minor = None
952 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
953 """Create a new device
955 This is a noop, we only return a PersistentBlockDevice instance
959 raise errors.ProgrammerError("Persistent block device requested with"
960 " exclusive_storage")
961 return PersistentBlockDevice(unique_id, children, 0, params)
971 def Rename(self, new_id):
972 """Rename this device.
975 base.ThrowError("Rename is not supported for PersistentBlockDev storage")
978 """Attach to an existing block device.
982 self.attached = False
984 st = os.stat(self.dev_path)
986 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
989 if not stat.S_ISBLK(st.st_mode):
990 logging.error("%s is not a block device", self.dev_path)
993 self.major = os.major(st.st_rdev)
994 self.minor = os.minor(st.st_rdev)
1000 """Assemble the device.
1006 """Shutdown the device.
1011 def Open(self, force=False):
1012 """Make the device ready for I/O.
1018 """Notifies that the device will no longer be used for I/O.
1023 def Grow(self, amount, dryrun, backingstore):
1024 """Grow the logical volume.
1027 base.ThrowError("Grow is not supported for PersistentBlockDev storage")
1030 class RADOSBlockDevice(base.BlockDev):
1031 """A RADOS Block Device (rbd).
1033 This class implements the RADOS Block Device for the backend. You need
1034 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
1035 this to be functional.
1038 def __init__(self, unique_id, children, size, params):
1039 """Attaches to an rbd device.
1042 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
1043 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1044 raise ValueError("Invalid configuration data %s" % str(unique_id))
1046 self.driver, self.rbd_name = unique_id
1048 self.major = self.minor = None
1052 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1053 """Create a new rbd device.
1055 Provision a new rbd volume inside a RADOS pool.
1058 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1059 raise errors.ProgrammerError("Invalid configuration data %s" %
1062 raise errors.ProgrammerError("RBD device requested with"
1063 " exclusive_storage")
1064 rbd_pool = params[constants.LDP_POOL]
1065 rbd_name = unique_id[1]
1067 # Provision a new rbd volume (Image) inside the RADOS cluster.
1068 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
1069 rbd_name, "--size", "%s" % size]
1070 result = utils.RunCmd(cmd)
1072 base.ThrowError("rbd creation failed (%s): %s",
1073 result.fail_reason, result.output)
1075 return RADOSBlockDevice(unique_id, children, size, params)
1078 """Remove the rbd device.
1081 rbd_pool = self.params[constants.LDP_POOL]
1082 rbd_name = self.unique_id[1]
1084 if not self.minor and not self.Attach():
1085 # The rbd device doesn't exist.
1088 # First shutdown the device (remove mappings).
1091 # Remove the actual Volume (Image) from the RADOS cluster.
1092 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
1093 result = utils.RunCmd(cmd)
1095 base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
1096 result.fail_reason, result.output)
1098 def Rename(self, new_id):
1099 """Rename this device.
1105 """Attach to an existing rbd device.
1107 This method maps the rbd volume that matches our name with
1108 an rbd device and then attaches to this device.
1111 self.attached = False
1113 # Map the rbd volume to a block device under /dev
1114 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
1117 st = os.stat(self.dev_path)
1118 except OSError, err:
1119 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1122 if not stat.S_ISBLK(st.st_mode):
1123 logging.error("%s is not a block device", self.dev_path)
1126 self.major = os.major(st.st_rdev)
1127 self.minor = os.minor(st.st_rdev)
1128 self.attached = True
1132 def _MapVolumeToBlockdev(self, unique_id):
1133 """Maps existing rbd volumes to block devices.
1135 This method should be idempotent if the mapping already exists.
1138 @return: the block device path that corresponds to the volume
1141 pool = self.params[constants.LDP_POOL]
1144 # Check if the mapping already exists.
1145 rbd_dev = self._VolumeToBlockdev(pool, name)
1147 # The mapping exists. Return it.
1150 # The mapping doesn't exist. Create it.
1151 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
1152 result = utils.RunCmd(map_cmd)
1154 base.ThrowError("rbd map failed (%s): %s",
1155 result.fail_reason, result.output)
1157 # Find the corresponding rbd device.
1158 rbd_dev = self._VolumeToBlockdev(pool, name)
1160 base.ThrowError("rbd map succeeded, but could not find the rbd block"
1161 " device in output of showmapped, for volume: %s", name)
1163 # The device was successfully mapped. Return it.
1167 def _VolumeToBlockdev(cls, pool, volume_name):
1168 """Do the 'volume name'-to-'rbd block device' resolving.
1171 @param pool: RADOS pool to use
1172 @type volume_name: string
1173 @param volume_name: the name of the volume whose device we search for
1174 @rtype: string or None
1175 @return: block device path if the volume is mapped, else None
1179 # Newer versions of the rbd tool support json output formatting. Use it
1189 result = utils.RunCmd(showmap_cmd)
1191 logging.error("rbd JSON output formatting returned error (%s): %s,"
1192 "falling back to plain output parsing",
1193 result.fail_reason, result.output)
1194 raise RbdShowmappedJsonError
1196 return cls._ParseRbdShowmappedJson(result.output, volume_name)
1197 except RbdShowmappedJsonError:
1198 # For older versions of rbd, we have to parse the plain / text output
1200 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
1201 result = utils.RunCmd(showmap_cmd)
1203 base.ThrowError("rbd showmapped failed (%s): %s",
1204 result.fail_reason, result.output)
1206 return cls._ParseRbdShowmappedPlain(result.output, volume_name)
1209 def _ParseRbdShowmappedJson(output, volume_name):
1210 """Parse the json output of `rbd showmapped'.
1212 This method parses the json output of `rbd showmapped' and returns the rbd
1213 block device path (e.g. /dev/rbd0) that matches the given rbd volume.
1215 @type output: string
1216 @param output: the json output of `rbd showmapped'
1217 @type volume_name: string
1218 @param volume_name: the name of the volume whose device we search for
1219 @rtype: string or None
1220 @return: block device path if the volume is mapped, else None
1224 devices = serializer.LoadJson(output)
1225 except ValueError, err:
1226 base.ThrowError("Unable to parse JSON data: %s" % err)
1229 for d in devices.values(): # pylint: disable=E1103
1233 base.ThrowError("'name' key missing from json object %s", devices)
1235 if name == volume_name:
1236 if rbd_dev is not None:
1237 base.ThrowError("rbd volume %s is mapped more than once", volume_name)
1239 rbd_dev = d["device"]
1244 def _ParseRbdShowmappedPlain(output, volume_name):
1245 """Parse the (plain / text) output of `rbd showmapped'.
1247 This method parses the output of `rbd showmapped' and returns
1248 the rbd block device path (e.g. /dev/rbd0) that matches the
1251 @type output: string
1252 @param output: the plain text output of `rbd showmapped'
1253 @type volume_name: string
1254 @param volume_name: the name of the volume whose device we search for
1255 @rtype: string or None
1256 @return: block device path if the volume is mapped, else None
1263 lines = output.splitlines()
1265 # Try parsing the new output format (ceph >= 0.55).
1266 splitted_lines = map(lambda l: l.split(), lines)
1268 # Check for empty output.
1269 if not splitted_lines:
1272 # Check showmapped output, to determine number of fields.
1273 field_cnt = len(splitted_lines[0])
1274 if field_cnt != allfields:
1275 # Parsing the new format failed. Fallback to parsing the old output
1277 splitted_lines = map(lambda l: l.split("\t"), lines)
1278 if field_cnt != allfields:
1279 base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
1280 " found %s", allfields, field_cnt)
1283 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
1286 if len(matched_lines) > 1:
1287 base.ThrowError("rbd volume %s mapped more than once", volume_name)
1290 # rbd block device found. Return it.
1291 rbd_dev = matched_lines[0][devicefield]
1294 # The given volume is not mapped.
1298 """Assemble the device.
1304 """Shutdown the device.
1307 if not self.minor and not self.Attach():
1308 # The rbd device doesn't exist.
1311 # Unmap the block device from the Volume.
1312 self._UnmapVolumeFromBlockdev(self.unique_id)
1315 self.dev_path = None
1317 def _UnmapVolumeFromBlockdev(self, unique_id):
1318 """Unmaps the rbd device from the Volume it is mapped.
1320 Unmaps the rbd device from the Volume it was previously mapped to.
1321 This method should be idempotent if the Volume isn't mapped.
1324 pool = self.params[constants.LDP_POOL]
1327 # Check if the mapping already exists.
1328 rbd_dev = self._VolumeToBlockdev(pool, name)
1331 # The mapping exists. Unmap the rbd device.
1332 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
1333 result = utils.RunCmd(unmap_cmd)
1335 base.ThrowError("rbd unmap failed (%s): %s",
1336 result.fail_reason, result.output)
1338 def Open(self, force=False):
1339 """Make the device ready for I/O.
1345 """Notifies that the device will no longer be used for I/O.
1350 def Grow(self, amount, dryrun, backingstore):
1353 @type amount: integer
1354 @param amount: the amount (in mebibytes) to grow with
1355 @type dryrun: boolean
1356 @param dryrun: whether to execute the operation in simulation mode
1357 only, without actually increasing the size
1360 if not backingstore:
1362 if not self.Attach():
1363 base.ThrowError("Can't attach to rbd device during Grow()")
1366 # the rbd tool does not support dry runs of resize operations.
1367 # Since rbd volumes are thinly provisioned, we assume
1368 # there is always enough free space for the operation.
1371 rbd_pool = self.params[constants.LDP_POOL]
1372 rbd_name = self.unique_id[1]
1373 new_size = self.size + amount
1375 # Resize the rbd volume (Image) inside the RADOS cluster.
1376 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
1377 rbd_name, "--size", "%s" % new_size]
1378 result = utils.RunCmd(cmd)
1380 base.ThrowError("rbd resize failed (%s): %s",
1381 result.fail_reason, result.output)
1384 class ExtStorageDevice(base.BlockDev):
1385 """A block device provided by an ExtStorage Provider.
1387 This class implements the External Storage Interface, which means
1388 handling of the externally provided block devices.
1391 def __init__(self, unique_id, children, size, params):
1392 """Attaches to an extstorage block device.
1395 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
1396 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1397 raise ValueError("Invalid configuration data %s" % str(unique_id))
1399 self.driver, self.vol_name = unique_id
1400 self.ext_params = params
1402 self.major = self.minor = None
1406 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1407 """Create a new extstorage device.
1409 Provision a new volume using an extstorage provider, which will
1410 then be mapped to a block device.
1413 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1414 raise errors.ProgrammerError("Invalid configuration data %s" %
1417 raise errors.ProgrammerError("extstorage device requested with"
1418 " exclusive_storage")
1420 # Call the External Storage's create script,
1421 # to provision a new Volume inside the External Storage
1422 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
1425 return ExtStorageDevice(unique_id, children, size, params)
1428 """Remove the extstorage device.
1431 if not self.minor and not self.Attach():
1432 # The extstorage device doesn't exist.
1435 # First shutdown the device (remove mappings).
1438 # Call the External Storage's remove script,
1439 # to remove the Volume from the External Storage
1440 _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
1443 def Rename(self, new_id):
1444 """Rename this device.
1450 """Attach to an existing extstorage device.
1452 This method maps the extstorage volume that matches our name with
1453 a corresponding block device and then attaches to this device.
1456 self.attached = False
1458 # Call the External Storage's attach script,
1459 # to attach an existing Volume to a block device under /dev
1460 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
1461 self.unique_id, self.ext_params)
1464 st = os.stat(self.dev_path)
1465 except OSError, err:
1466 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1469 if not stat.S_ISBLK(st.st_mode):
1470 logging.error("%s is not a block device", self.dev_path)
1473 self.major = os.major(st.st_rdev)
1474 self.minor = os.minor(st.st_rdev)
1475 self.attached = True
1480 """Assemble the device.
1486 """Shutdown the device.
1489 if not self.minor and not self.Attach():
1490 # The extstorage device doesn't exist.
1493 # Call the External Storage's detach script,
1494 # to detach an existing Volume from it's block device under /dev
1495 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
1499 self.dev_path = None
1501 def Open(self, force=False):
1502 """Make the device ready for I/O.
1508 """Notifies that the device will no longer be used for I/O.
1513 def Grow(self, amount, dryrun, backingstore):
1516 @type amount: integer
1517 @param amount: the amount (in mebibytes) to grow with
1518 @type dryrun: boolean
1519 @param dryrun: whether to execute the operation in simulation mode
1520 only, without actually increasing the size
1523 if not backingstore:
1525 if not self.Attach():
1526 base.ThrowError("Can't attach to extstorage device during Grow()")
1529 # we do not support dry runs of resize operations for now.
1532 new_size = self.size + amount
1534 # Call the External Storage's grow script,
1535 # to grow an existing Volume inside the External Storage
1536 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
1537 self.ext_params, str(self.size), grow=str(new_size))
1539 def SetInfo(self, text):
1540 """Update metadata with info text.
1543 # Replace invalid characters
1544 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1545 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1547 # Only up to 128 characters are allowed
1550 # Call the External Storage's setinfo script,
1551 # to set metadata for an existing Volume inside the External Storage
1552 _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
1553 self.ext_params, metadata=text)
1556 def _ExtStorageAction(action, unique_id, ext_params,
1557 size=None, grow=None, metadata=None):
1558 """Take an External Storage action.
1560 Take an External Storage action concerning or affecting
1561 a specific Volume inside the External Storage.
1563 @type action: string
1564 @param action: which action to perform. One of:
1565 create / remove / grow / attach / detach
1566 @type unique_id: tuple (driver, vol_name)
1567 @param unique_id: a tuple containing the type of ExtStorage (driver)
1569 @type ext_params: dict
1570 @param ext_params: ExtStorage parameters
1572 @param size: the size of the Volume in mebibytes
1574 @param grow: the new size in mebibytes (after grow)
1575 @type metadata: string
1576 @param metadata: metadata info of the Volume, for use by the provider
1577 @rtype: None or a block device path (during attach)
1580 driver, vol_name = unique_id
1582 # Create an External Storage instance of type `driver'
1583 status, inst_es = ExtStorageFromDisk(driver)
1585 base.ThrowError("%s" % inst_es)
1587 # Create the basic environment for the driver's scripts
1588 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
1591 # Do not use log file for action `attach' as we need
1592 # to get the output from RunResult
1593 # TODO: find a way to have a log file for attach too
1595 if action is not constants.ES_ACTION_ATTACH:
1596 logfile = _VolumeLogName(action, driver, vol_name)
1598 # Make sure the given action results in a valid script
1599 if action not in constants.ES_SCRIPTS:
1600 base.ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
1603 # Find out which external script to run according the given action
1604 script_name = action + "_script"
1605 script = getattr(inst_es, script_name)
1607 # Run the external script
1608 result = utils.RunCmd([script], env=create_env,
1609 cwd=inst_es.path, output=logfile,)
1611 logging.error("External storage's %s command '%s' returned"
1612 " error: %s, logfile: %s, output: %s",
1613 action, result.cmd, result.fail_reason,
1614 logfile, result.output)
1616 # If logfile is 'None' (during attach), it breaks TailFile
1617 # TODO: have a log file for attach too
1618 if action is not constants.ES_ACTION_ATTACH:
1619 lines = [utils.SafeEncode(val)
1620 for val in utils.TailFile(logfile, lines=20)]
1622 lines = result.output[-20:]
1624 base.ThrowError("External storage's %s script failed (%s), last"
1625 " lines of output:\n%s",
1626 action, result.fail_reason, "\n".join(lines))
1628 if action == constants.ES_ACTION_ATTACH:
1629 return result.stdout
1632 def ExtStorageFromDisk(name, base_dir=None):
1633 """Create an ExtStorage instance from disk.
1635 This function will return an ExtStorage instance
1636 if the given name is a valid ExtStorage name.
1638 @type base_dir: string
1639 @keyword base_dir: Base directory containing ExtStorage installations.
1640 Defaults to a search in all the ES_SEARCH_PATH dirs.
1642 @return: True and the ExtStorage instance if we find a valid one, or
1643 False and the diagnose message on error
1646 if base_dir is None:
1647 es_base_dir = pathutils.ES_SEARCH_PATH
1649 es_base_dir = [base_dir]
1651 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
1654 return False, ("Directory for External Storage Provider %s not"
1655 " found in search path" % name)
1657 # ES Files dictionary, we will populate it with the absolute path
1658 # names; if the value is True, then it is a required file, otherwise
1660 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
1662 es_files[constants.ES_PARAMETERS_FILE] = True
1664 for (filename, _) in es_files.items():
1665 es_files[filename] = utils.PathJoin(es_dir, filename)
1668 st = os.stat(es_files[filename])
1669 except EnvironmentError, err:
1670 return False, ("File '%s' under path '%s' is missing (%s)" %
1671 (filename, es_dir, utils.ErrnoOrStr(err)))
1673 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1674 return False, ("File '%s' under path '%s' is not a regular file" %
1677 if filename in constants.ES_SCRIPTS:
1678 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
1679 return False, ("File '%s' under path '%s' is not executable" %
1683 if constants.ES_PARAMETERS_FILE in es_files:
1684 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
1686 parameters = utils.ReadFile(parameters_file).splitlines()
1687 except EnvironmentError, err:
1688 return False, ("Error while reading the EXT parameters file at %s: %s" %
1689 (parameters_file, utils.ErrnoOrStr(err)))
1690 parameters = [v.split(None, 1) for v in parameters]
1693 objects.ExtStorage(name=name, path=es_dir,
1694 create_script=es_files[constants.ES_SCRIPT_CREATE],
1695 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
1696 grow_script=es_files[constants.ES_SCRIPT_GROW],
1697 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
1698 detach_script=es_files[constants.ES_SCRIPT_DETACH],
1699 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
1700 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
1701 supported_parameters=parameters)
1705 def _ExtStorageEnvironment(unique_id, ext_params,
1706 size=None, grow=None, metadata=None):
1707 """Calculate the environment for an External Storage script.
1709 @type unique_id: tuple (driver, vol_name)
1710 @param unique_id: ExtStorage pool and name of the Volume
1711 @type ext_params: dict
1712 @param ext_params: the EXT parameters
1714 @param size: size of the Volume (in mebibytes)
1716 @param grow: new size of Volume after grow (in mebibytes)
1717 @type metadata: string
1718 @param metadata: metadata info of the Volume
1720 @return: dict of environment variables
1723 vol_name = unique_id[1]
1726 result["VOL_NAME"] = vol_name
1729 for pname, pvalue in ext_params.items():
1730 result["EXTP_%s" % pname.upper()] = str(pvalue)
1732 if size is not None:
1733 result["VOL_SIZE"] = size
1735 if grow is not None:
1736 result["VOL_NEW_SIZE"] = grow
1738 if metadata is not None:
1739 result["VOL_METADATA"] = metadata
1744 def _VolumeLogName(kind, es_name, volume):
1745 """Compute the ExtStorage log filename for a given Volume and operation.
1748 @param kind: the operation type (e.g. create, remove etc.)
1749 @type es_name: string
1750 @param es_name: the ExtStorage name
1751 @type volume: string
1752 @param volume: the name of the Volume inside the External Storage
1755 # Check if the extstorage log dir is a valid dir
1756 if not os.path.isdir(pathutils.LOG_ES_DIR):
1757 base.ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
1759 # TODO: Use tempfile.mkstemp to create unique filename
1760 basename = ("%s-%s-%s-%s.log" %
1761 (kind, es_name, volume, utils.TimestampForFilename()))
1762 return utils.PathJoin(pathutils.LOG_ES_DIR, basename)
1766 constants.LD_LV: LogicalVolume,
1767 constants.LD_DRBD8: drbd.DRBD8Dev,
1768 constants.LD_BLOCKDEV: PersistentBlockDevice,
1769 constants.LD_RBD: RADOSBlockDevice,
1770 constants.LD_EXT: ExtStorageDevice,
1773 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
1774 DEV_MAP[constants.LD_FILE] = FileStorage
1777 def _VerifyDiskType(dev_type):
1778 if dev_type not in DEV_MAP:
1779 raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
1782 def _VerifyDiskParams(disk):
1783 """Verifies if all disk parameters are set.
1786 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
1788 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
1792 def FindDevice(disk, children):
1793 """Search for an existing, assembled device.
1795 This will succeed only if the device exists and is assembled, but it
1796 does not do any actions in order to activate the device.
1798 @type disk: L{objects.Disk}
1799 @param disk: the disk object to find
1800 @type children: list of L{bdev.BlockDev}
1801 @param children: the list of block devices that are children of the device
1802 represented by the disk parameter
1805 _VerifyDiskType(disk.dev_type)
1806 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1808 if not device.attached:
1813 def Assemble(disk, children):
1814 """Try to attach or assemble an existing device.
1816 This will attach to assemble the device, as needed, to bring it
1817 fully up. It must be safe to run on already-assembled devices.
1819 @type disk: L{objects.Disk}
1820 @param disk: the disk object to assemble
1821 @type children: list of L{bdev.BlockDev}
1822 @param children: the list of block devices that are children of the device
1823 represented by the disk parameter
1826 _VerifyDiskType(disk.dev_type)
1827 _VerifyDiskParams(disk)
1828 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1834 def Create(disk, children, excl_stor):
1837 @type disk: L{objects.Disk}
1838 @param disk: the disk object to create
1839 @type children: list of L{bdev.BlockDev}
1840 @param children: the list of block devices that are children of the device
1841 represented by the disk parameter
1842 @type excl_stor: boolean
1843 @param excl_stor: Whether exclusive_storage is active
1844 @rtype: L{bdev.BlockDev}
1845 @return: the created device, or C{None} in case of an error
1848 _VerifyDiskType(disk.dev_type)
1849 _VerifyDiskParams(disk)
1850 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
1851 disk.spindles, disk.params, excl_stor)