4 # Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Block device abstraction"""
31 from ganeti import utils
32 from ganeti import errors
33 from ganeti import constants
34 from ganeti import objects
35 from ganeti import compat
36 from ganeti import pathutils
37 from ganeti import serializer
38 from ganeti.storage import drbd
39 from ganeti.storage import base
42 class RbdShowmappedJsonError(Exception):
43 """`rbd showmmapped' JSON formatting error Exception class.
49 def _CheckResult(result):
50 """Throws an error if the given result is a failed one.
52 @param result: result from RunCmd
56 base.ThrowError("Command: %s error: %s - %s",
57 result.cmd, result.fail_reason, result.output)
60 def _GetForbiddenFileStoragePaths():
61 """Builds a list of path prefixes which shouldn't be used for file storage.
76 for prefix in ["", "/usr", "/usr/local"]:
77 paths.update(map(lambda s: "%s/%s" % (prefix, s),
78 ["bin", "lib", "lib32", "lib64", "sbin"]))
80 return compat.UniqueFrozenset(map(os.path.normpath, paths))
83 def _ComputeWrongFileStoragePaths(paths,
84 _forbidden=_GetForbiddenFileStoragePaths()):
85 """Cross-checks a list of paths for prefixes considered bad.
87 Some paths, e.g. "/bin", should not be used for file storage.
90 @param paths: List of paths to be checked
92 @return: Sorted list of paths for which the user should be warned
96 return (not os.path.isabs(path) or
98 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
100 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
103 def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
104 """Returns a list of file storage paths whose prefix is considered bad.
106 See L{_ComputeWrongFileStoragePaths}.
109 return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
112 def _CheckFileStoragePath(path, allowed):
113 """Checks if a path is in a list of allowed paths for file storage.
116 @param path: Path to check
118 @param allowed: List of allowed paths
119 @raise errors.FileStoragePathError: If the path is not allowed
122 if not os.path.isabs(path):
123 raise errors.FileStoragePathError("File storage path must be absolute,"
127 if not os.path.isabs(i):
128 logging.info("Ignoring relative path '%s' for file storage", i)
131 if utils.IsBelowDir(i, path):
134 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
138 def _LoadAllowedFileStoragePaths(filename):
139 """Loads file containing allowed file storage paths.
142 @return: List of allowed paths (can be an empty list)
146 contents = utils.ReadFile(filename)
147 except EnvironmentError:
150 return utils.FilterEmptyLinesAndComments(contents)
153 def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
154 """Checks if a path is allowed for file storage.
157 @param path: Path to check
158 @raise errors.FileStoragePathError: If the path is not allowed
161 allowed = _LoadAllowedFileStoragePaths(_filename)
163 if _ComputeWrongFileStoragePaths([path]):
164 raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
167 _CheckFileStoragePath(path, allowed)
170 class LogicalVolume(base.BlockDev):
171 """Logical Volume block device.
174 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
175 _PARSE_PV_DEV_RE = re.compile("^([^ ()]+)\([0-9]+\)$")
176 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
177 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
179 def __init__(self, unique_id, children, size, params):
180 """Attaches to a LV device.
182 The unique_id is a tuple (vg_name, lv_name)
185 super(LogicalVolume, self).__init__(unique_id, children, size, params)
186 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
187 raise ValueError("Invalid configuration data %s" % str(unique_id))
188 self._vg_name, self._lv_name = unique_id
189 self._ValidateName(self._vg_name)
190 self._ValidateName(self._lv_name)
191 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
192 self._degraded = True
193 self.major = self.minor = self.pe_size = self.stripe_count = None
198 def _GetStdPvSize(pvs_info):
199 """Return the the standard PV size (used with exclusive storage).
201 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
206 assert len(pvs_info) > 0
207 smallest = min([pv.size for pv in pvs_info])
208 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
211 def _ComputeNumPvs(size, pvs_info):
212 """Compute the number of PVs needed for an LV (with exclusive storage).
215 @param size: LV size in MiB
216 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
218 @return: number of PVs needed
220 assert len(pvs_info) > 0
221 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
222 return int(math.ceil(float(size) / pv_size))
225 def _GetEmptyPvNames(pvs_info, max_pvs=None):
226 """Return a list of empty PVs, by name.
229 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
230 if max_pvs is not None:
231 empty_pvs = empty_pvs[:max_pvs]
232 return map((lambda pv: pv.name), empty_pvs)
235 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
236 """Create a new logical volume.
239 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
240 raise errors.ProgrammerError("Invalid configuration data %s" %
242 vg_name, lv_name = unique_id
243 cls._ValidateName(vg_name)
244 cls._ValidateName(lv_name)
245 pvs_info = cls.GetPVInfo([vg_name])
248 msg = "No (empty) PVs found"
250 msg = "Can't compute PV info for vg %s" % vg_name
252 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
254 pvlist = [pv.name for pv in pvs_info]
255 if compat.any(":" in v for v in pvlist):
256 base.ThrowError("Some of your PVs have the invalid character ':' in their"
257 " name, this is not supported - please filter them out"
258 " in lvm.conf using either 'filter' or 'preferred_names'")
260 current_pvs = len(pvlist)
261 desired_stripes = params[constants.LDP_STRIPES]
262 stripes = min(current_pvs, desired_stripes)
266 base.ThrowError("Unspecified number of spindles: this is required"
267 "when exclusive storage is enabled, try running"
268 " gnt-cluster repair-disk-sizes")
269 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
273 req_pvs = cls._ComputeNumPvs(size, pvs_info)
274 if spindles < req_pvs:
275 base.ThrowError("Requested number of spindles (%s) is not enough for"
276 " a disk of %d MB (at least %d spindles needed)",
277 spindles, size, req_pvs)
280 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
281 current_pvs = len(pvlist)
282 if current_pvs < req_pvs:
283 base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
284 " MB: %d available, %d needed",
285 size, current_pvs, req_pvs)
286 assert current_pvs == len(pvlist)
287 # We must update stripes to be sure to use all the desired spindles
288 stripes = current_pvs
289 if stripes > desired_stripes:
290 # Don't warn when lowering stripes, as it's no surprise
291 logging.warning("Using %s stripes instead of %s, to be able to use"
292 " %s spindles", stripes, desired_stripes, current_pvs)
295 if stripes < desired_stripes:
296 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
297 " available.", desired_stripes, vg_name, current_pvs)
298 free_size = sum([pv.free for pv in pvs_info])
299 # The size constraint should have been checked from the master before
300 # calling the create function.
302 base.ThrowError("Not enough free space: required %s,"
303 " available %s", size, free_size)
305 # If the free space is not well distributed, we won't be able to
306 # create an optimally-striped volume; in that case, we want to try
307 # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
309 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
310 for stripes_arg in range(stripes, 0, -1):
311 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
312 if not result.failed:
315 base.ThrowError("LV create failed (%s): %s",
316 result.fail_reason, result.output)
317 return LogicalVolume(unique_id, children, size, params)
320 def _GetVolumeInfo(lvm_cmd, fields):
321 """Returns LVM Volume infos using lvm_cmd
323 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
324 @param fields: Fields to return
325 @return: A list of dicts each with the parsed fields
329 raise errors.ProgrammerError("No fields specified")
332 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
333 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
335 result = utils.RunCmd(cmd)
337 raise errors.CommandError("Can't get the volume information: %s - %s" %
338 (result.fail_reason, result.output))
341 for line in result.stdout.splitlines():
342 splitted_fields = line.strip().split(sep)
344 if len(fields) != len(splitted_fields):
345 raise errors.CommandError("Can't parse %s output: line '%s'" %
348 data.append(splitted_fields)
353 def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
354 """Get the free space info for PVs in a volume group.
356 @param vg_names: list of volume group names, if empty all will be returned
357 @param filter_allocatable: whether to skip over unallocatable PVs
358 @param include_lvs: whether to include a list of LVs hosted on each PV
361 @return: list of objects.LvmPvInfo objects
364 # We request "lv_name" field only if we care about LVs, so we don't get
365 # a long list of entries with many duplicates unless we really have to.
366 # The duplicate "pv_name" field will be ignored.
372 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
373 "pv_attr", "pv_size", lvfield])
374 except errors.GenericError, err:
375 logging.error("Can't get PV information: %s", err)
378 # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
379 # pair. We sort entries by PV name and then LV name, so it's easy to weed
382 info.sort(key=(lambda i: (i[0], i[5])))
385 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
386 # (possibly) skip over pvs which are not allocatable
387 if filter_allocatable and pv_attr[0] != "a":
389 # (possibly) skip over pvs which are not in the right volume group(s)
390 if vg_names and vg_name not in vg_names:
392 # Beware of duplicates (check before inserting)
393 if lastpvi and lastpvi.name == pv_name:
394 if include_lvs and lv_name:
395 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
396 lastpvi.lv_list.append(lv_name)
398 if include_lvs and lv_name:
402 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
403 size=float(pv_size), free=float(pv_free),
404 attributes=pv_attr, lv_list=lvl)
410 def _GetRawFreePvInfo(cls, vg_name):
411 """Return info (size/free) about PVs.
413 @type vg_name: string
414 @param vg_name: VG name
416 @return: (standard_pv_size_in_MiB, number_of_free_pvs, total_number_of_pvs)
419 pvs_info = cls.GetPVInfo([vg_name])
425 pv_size = cls._GetStdPvSize(pvs_info)
426 free_pvs = len(cls._GetEmptyPvNames(pvs_info))
427 num_pvs = len(pvs_info)
428 return (pv_size, free_pvs, num_pvs)
431 def _GetExclusiveStorageVgFree(cls, vg_name):
432 """Return the free disk space in the given VG, in exclusive storage mode.
434 @type vg_name: string
435 @param vg_name: VG name
437 @return: free space in MiB
439 (pv_size, free_pvs, _) = cls._GetRawFreePvInfo(vg_name)
440 return pv_size * free_pvs
443 def GetVgSpindlesInfo(cls, vg_name):
444 """Get the free space info for specific VGs.
446 @param vg_name: volume group name
448 @return: (free_spindles, total_spindles)
451 (_, free_pvs, num_pvs) = cls._GetRawFreePvInfo(vg_name)
452 return (free_pvs, num_pvs)
455 def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
456 """Get the free space info for specific VGs.
458 @param vg_names: list of volume group names, if empty all will be returned
459 @param excl_stor: whether exclusive_storage is enabled
460 @param filter_readonly: whether to skip over readonly VGs
463 @return: list of tuples (free_space, total_size, name) with free_space in
468 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
470 except errors.GenericError, err:
471 logging.error("Can't get VG information: %s", err)
475 for vg_name, vg_free, vg_attr, vg_size in info:
476 # (possibly) skip over vgs which are not writable
477 if filter_readonly and vg_attr[0] == "r":
479 # (possibly) skip over vgs which are not in the right volume group(s)
480 if vg_names and vg_name not in vg_names:
482 # Exclusive storage needs a different concept of free space
484 es_free = cls._GetExclusiveStorageVgFree(vg_name)
485 assert es_free <= vg_free
487 data.append((float(vg_free), float(vg_size), vg_name))
492 def _ValidateName(cls, name):
493 """Validates that a given name is valid as VG or LV name.
495 The list of valid characters and restricted names is taken out of
496 the lvm(8) manpage, with the simplification that we enforce both
497 VG and LV restrictions on the names.
500 if (not cls._VALID_NAME_RE.match(name) or
501 name in cls._INVALID_NAMES or
502 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
503 base.ThrowError("Invalid LVM name '%s'", name)
506 """Remove this logical volume.
509 if not self.minor and not self.Attach():
510 # the LV does not exist
512 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
513 (self._vg_name, self._lv_name)])
515 base.ThrowError("Can't lvremove: %s - %s",
516 result.fail_reason, result.output)
518 def Rename(self, new_id):
519 """Rename this logical volume.
522 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
523 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
524 new_vg, new_name = new_id
525 if new_vg != self._vg_name:
526 raise errors.ProgrammerError("Can't move a logical volume across"
527 " volume groups (from %s to to %s)" %
528 (self._vg_name, new_vg))
529 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
531 base.ThrowError("Failed to rename the logical volume: %s", result.output)
532 self._lv_name = new_name
533 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
536 def _ParseLvInfoLine(cls, line, sep):
537 """Parse one line of the lvs output used in L{_GetLvInfo}.
540 elems = line.strip().rstrip(sep).split(sep)
542 base.ThrowError("Can't parse LVS output, len(%s) != 6", str(elems))
544 (status, major, minor, pe_size, stripes, pvs) = elems
546 base.ThrowError("lvs lv_attr is not at least 6 characters (%s)", status)
551 except (TypeError, ValueError), err:
552 base.ThrowError("lvs major/minor cannot be parsed: %s", str(err))
555 pe_size = int(float(pe_size))
556 except (TypeError, ValueError), err:
557 base.ThrowError("Can't parse vg extent size: %s", err)
560 stripes = int(stripes)
561 except (TypeError, ValueError), err:
562 base.ThrowError("Can't parse the number of stripes: %s", err)
565 for pv in pvs.split(","):
566 m = re.match(cls._PARSE_PV_DEV_RE, pv)
568 base.ThrowError("Can't parse this device list: %s", pvs)
569 pv_names.append(m.group(1))
570 assert len(pv_names) > 0
572 return (status, major, minor, pe_size, stripes, pv_names)
575 def _GetLvInfo(cls, dev_path, _run_cmd=utils.RunCmd):
576 """Get info about the given existing LV to be used.
580 result = _run_cmd(["lvs", "--noheadings", "--separator=%s" % sep,
581 "--units=k", "--nosuffix",
582 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
583 "vg_extent_size,stripes,devices", dev_path])
585 base.ThrowError("Can't find LV %s: %s, %s",
586 dev_path, result.fail_reason, result.output)
587 # the output can (and will) have multiple lines for multi-segment
588 # LVs, as the 'stripes' parameter is a segment one, so we take
589 # only the last entry, which is the one we're interested in; note
590 # that with LVM2 anyway the 'stripes' value must be constant
591 # across segments, so this is a no-op actually
592 out = result.stdout.splitlines()
593 if not out: # totally empty result? splitlines() returns at least
594 # one line for any non-empty string
595 base.ThrowError("Can't parse LVS output, no lines? Got '%s'", str(out))
598 (status, major, minor, pe_size, stripes, more_pvs) = \
599 cls._ParseLvInfoLine(line, sep)
600 pv_names.update(more_pvs)
601 return (status, major, minor, pe_size, stripes, pv_names)
604 """Attach to an existing LV.
606 This method will try to see if an existing and active LV exists
607 which matches our name. If so, its major/minor will be
611 self.attached = False
613 (status, major, minor, pe_size, stripes, pv_names) = \
614 self._GetLvInfo(self.dev_path)
615 except errors.BlockDeviceError:
620 self.pe_size = pe_size
621 self.stripe_count = stripes
622 self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
624 self.pv_names = pv_names
629 """Assemble the device.
631 We always run `lvchange -ay` on the LV to ensure it's active before
632 use, as there were cases when xenvg was not active after boot
633 (also possibly after disk issues).
636 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
638 base.ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
641 """Shutdown the device.
643 This is a no-op for the LV device type, as we don't deactivate the
649 def GetSyncStatus(self):
650 """Returns the sync status of the device.
652 If this device is a mirroring device, this function returns the
653 status of the mirror.
655 For logical volumes, sync_percent and estimated_time are always
656 None (no recovery in progress, as we don't handle the mirrored LV
657 case). The is_degraded parameter is the inverse of the ldisk
660 For the ldisk parameter, we check if the logical volume has the
661 'virtual' type, which means it's not backed by existing storage
662 anymore (read from it return I/O error). This happens after a
663 physical disk failure and subsequent 'vgreduce --removemissing' on
666 The status was already read in Attach, so we just return it.
668 @rtype: objects.BlockDevStatus
672 ldisk_status = constants.LDS_FAULTY
674 ldisk_status = constants.LDS_OKAY
676 return objects.BlockDevStatus(dev_path=self.dev_path,
681 is_degraded=self._degraded,
682 ldisk_status=ldisk_status)
684 def Open(self, force=False):
685 """Make the device ready for I/O.
687 This is a no-op for the LV device type.
693 """Notifies that the device will no longer be used for I/O.
695 This is a no-op for the LV device type.
700 def Snapshot(self, size):
701 """Create a snapshot copy of an lvm block device.
703 @returns: tuple (vg, lv)
706 snap_name = self._lv_name + ".snap"
708 # remove existing snapshot if found
709 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
710 base.IgnoreError(snap.Remove)
712 vg_info = self.GetVGInfo([self._vg_name], False)
714 base.ThrowError("Can't compute VG info for vg %s", self._vg_name)
715 free_size, _, _ = vg_info[0]
717 base.ThrowError("Not enough free space: required %s,"
718 " available %s", size, free_size)
720 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
721 "-n%s" % snap_name, self.dev_path]))
723 return (self._vg_name, snap_name)
725 def _RemoveOldInfo(self):
726 """Try to remove old tags from the lv.
729 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
733 raw_tags = result.stdout.strip()
735 for tag in raw_tags.split(","):
736 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
737 tag.strip(), self.dev_path]))
739 def SetInfo(self, text):
740 """Update metadata with info text.
743 base.BlockDev.SetInfo(self, text)
745 self._RemoveOldInfo()
747 # Replace invalid characters
748 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
749 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
751 # Only up to 128 characters are allowed
754 _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
756 def Grow(self, amount, dryrun, backingstore):
757 """Grow the logical volume.
762 if self.pe_size is None or self.stripe_count is None:
763 if not self.Attach():
764 base.ThrowError("Can't attach to LV during Grow()")
765 full_stripe_size = self.pe_size * self.stripe_count
768 rest = amount % full_stripe_size
770 amount += full_stripe_size - rest
771 cmd = ["lvextend", "-L", "+%dk" % amount]
774 # we try multiple algorithms since the 'best' ones might not have
775 # space available in the right place, but later ones might (since
776 # they have less constraints); also note that only recent LVM
778 for alloc_policy in "contiguous", "cling", "normal":
779 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
780 if not result.failed:
782 base.ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
784 def GetActualSpindles(self):
785 """Return the number of spindles used.
788 assert self.attached, "BlockDevice not attached in GetActualSpindles()"
789 return len(self.pv_names)
792 class FileStorage(base.BlockDev):
795 This class represents the a file storage backend device.
797 The unique_id for the file device is a (file_driver, file_path) tuple.
800 def __init__(self, unique_id, children, size, params):
801 """Initalizes a file device backend.
805 raise errors.BlockDeviceError("Invalid setup for file device")
806 super(FileStorage, self).__init__(unique_id, children, size, params)
807 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
808 raise ValueError("Invalid configuration data %s" % str(unique_id))
809 self.driver = unique_id[0]
810 self.dev_path = unique_id[1]
812 CheckFileStoragePath(self.dev_path)
817 """Assemble the device.
819 Checks whether the file device exists, raises BlockDeviceError otherwise.
822 if not os.path.exists(self.dev_path):
823 base.ThrowError("File device '%s' does not exist" % self.dev_path)
826 """Shutdown the device.
828 This is a no-op for the file type, as we don't deactivate
829 the file on shutdown.
834 def Open(self, force=False):
835 """Make the device ready for I/O.
837 This is a no-op for the file type.
843 """Notifies that the device will no longer be used for I/O.
845 This is a no-op for the file type.
851 """Remove the file backing the block device.
854 @return: True if the removal was successful
858 os.remove(self.dev_path)
860 if err.errno != errno.ENOENT:
861 base.ThrowError("Can't remove file '%s': %s", self.dev_path, err)
863 def Rename(self, new_id):
867 # TODO: implement rename for file-based storage
868 base.ThrowError("Rename is not supported for file-based storage")
870 def Grow(self, amount, dryrun, backingstore):
873 @param amount: the amount (in mebibytes) to grow with
878 # Check that the file exists
880 current_size = self.GetActualSize()
881 new_size = current_size + amount * 1024 * 1024
882 assert new_size > current_size, "Cannot Grow with a negative amount"
883 # We can't really simulate the growth
887 f = open(self.dev_path, "a+")
890 except EnvironmentError, err:
891 base.ThrowError("Error in file growth: %", str(err))
894 """Attach to an existing file.
896 Check if this file already exists.
899 @return: True if file exists
902 self.attached = os.path.exists(self.dev_path)
905 def GetActualSize(self):
906 """Return the actual disk size.
908 @note: the device needs to be active when this is called
911 assert self.attached, "BlockDevice not attached in GetActualSize()"
913 st = os.stat(self.dev_path)
916 base.ThrowError("Can't stat %s: %s", self.dev_path, err)
919 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
920 """Create a new file.
922 @param size: the size of file in MiB
924 @rtype: L{bdev.FileStorage}
925 @return: an instance of FileStorage
929 raise errors.ProgrammerError("FileStorage device requested with"
930 " exclusive_storage")
931 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
932 raise ValueError("Invalid configuration data %s" % str(unique_id))
934 dev_path = unique_id[1]
936 CheckFileStoragePath(dev_path)
939 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
940 f = os.fdopen(fd, "w")
941 f.truncate(size * 1024 * 1024)
943 except EnvironmentError, err:
944 if err.errno == errno.EEXIST:
945 base.ThrowError("File already existing: %s", dev_path)
946 base.ThrowError("Error in file creation: %", str(err))
948 return FileStorage(unique_id, children, size, params)
951 class PersistentBlockDevice(base.BlockDev):
952 """A block device with persistent node
954 May be either directly attached, or exposed through DM (e.g. dm-multipath).
955 udev helpers are probably required to give persistent, human-friendly
958 For the time being, pathnames are required to lie under /dev.
961 def __init__(self, unique_id, children, size, params):
962 """Attaches to a static block device.
964 The unique_id is a path under /dev.
967 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
969 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
970 raise ValueError("Invalid configuration data %s" % str(unique_id))
971 self.dev_path = unique_id[1]
972 if not os.path.realpath(self.dev_path).startswith("/dev/"):
973 raise ValueError("Full path '%s' lies outside /dev" %
974 os.path.realpath(self.dev_path))
975 # TODO: this is just a safety guard checking that we only deal with devices
976 # we know how to handle. In the future this will be integrated with
977 # external storage backends and possible values will probably be collected
978 # from the cluster configuration.
979 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
980 raise ValueError("Got persistent block device of invalid type: %s" %
983 self.major = self.minor = None
987 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
988 """Create a new device
990 This is a noop, we only return a PersistentBlockDevice instance
994 raise errors.ProgrammerError("Persistent block device requested with"
995 " exclusive_storage")
996 return PersistentBlockDevice(unique_id, children, 0, params)
1006 def Rename(self, new_id):
1007 """Rename this device.
1010 base.ThrowError("Rename is not supported for PersistentBlockDev storage")
1013 """Attach to an existing block device.
1017 self.attached = False
1019 st = os.stat(self.dev_path)
1020 except OSError, err:
1021 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1024 if not stat.S_ISBLK(st.st_mode):
1025 logging.error("%s is not a block device", self.dev_path)
1028 self.major = os.major(st.st_rdev)
1029 self.minor = os.minor(st.st_rdev)
1030 self.attached = True
1035 """Assemble the device.
1041 """Shutdown the device.
1046 def Open(self, force=False):
1047 """Make the device ready for I/O.
1053 """Notifies that the device will no longer be used for I/O.
1058 def Grow(self, amount, dryrun, backingstore):
1059 """Grow the logical volume.
1062 base.ThrowError("Grow is not supported for PersistentBlockDev storage")
1065 class RADOSBlockDevice(base.BlockDev):
1066 """A RADOS Block Device (rbd).
1068 This class implements the RADOS Block Device for the backend. You need
1069 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
1070 this to be functional.
1073 def __init__(self, unique_id, children, size, params):
1074 """Attaches to an rbd device.
1077 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
1078 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1079 raise ValueError("Invalid configuration data %s" % str(unique_id))
1081 self.driver, self.rbd_name = unique_id
1083 self.major = self.minor = None
1087 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1088 """Create a new rbd device.
1090 Provision a new rbd volume inside a RADOS pool.
1093 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1094 raise errors.ProgrammerError("Invalid configuration data %s" %
1097 raise errors.ProgrammerError("RBD device requested with"
1098 " exclusive_storage")
1099 rbd_pool = params[constants.LDP_POOL]
1100 rbd_name = unique_id[1]
1102 # Provision a new rbd volume (Image) inside the RADOS cluster.
1103 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
1104 rbd_name, "--size", "%s" % size]
1105 result = utils.RunCmd(cmd)
1107 base.ThrowError("rbd creation failed (%s): %s",
1108 result.fail_reason, result.output)
1110 return RADOSBlockDevice(unique_id, children, size, params)
1113 """Remove the rbd device.
1116 rbd_pool = self.params[constants.LDP_POOL]
1117 rbd_name = self.unique_id[1]
1119 if not self.minor and not self.Attach():
1120 # The rbd device doesn't exist.
1123 # First shutdown the device (remove mappings).
1126 # Remove the actual Volume (Image) from the RADOS cluster.
1127 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
1128 result = utils.RunCmd(cmd)
1130 base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
1131 result.fail_reason, result.output)
1133 def Rename(self, new_id):
1134 """Rename this device.
1140 """Attach to an existing rbd device.
1142 This method maps the rbd volume that matches our name with
1143 an rbd device and then attaches to this device.
1146 self.attached = False
1148 # Map the rbd volume to a block device under /dev
1149 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
1152 st = os.stat(self.dev_path)
1153 except OSError, err:
1154 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1157 if not stat.S_ISBLK(st.st_mode):
1158 logging.error("%s is not a block device", self.dev_path)
1161 self.major = os.major(st.st_rdev)
1162 self.minor = os.minor(st.st_rdev)
1163 self.attached = True
1167 def _MapVolumeToBlockdev(self, unique_id):
1168 """Maps existing rbd volumes to block devices.
1170 This method should be idempotent if the mapping already exists.
1173 @return: the block device path that corresponds to the volume
1176 pool = self.params[constants.LDP_POOL]
1179 # Check if the mapping already exists.
1180 rbd_dev = self._VolumeToBlockdev(pool, name)
1182 # The mapping exists. Return it.
1185 # The mapping doesn't exist. Create it.
1186 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
1187 result = utils.RunCmd(map_cmd)
1189 base.ThrowError("rbd map failed (%s): %s",
1190 result.fail_reason, result.output)
1192 # Find the corresponding rbd device.
1193 rbd_dev = self._VolumeToBlockdev(pool, name)
1195 base.ThrowError("rbd map succeeded, but could not find the rbd block"
1196 " device in output of showmapped, for volume: %s", name)
1198 # The device was successfully mapped. Return it.
1202 def _VolumeToBlockdev(cls, pool, volume_name):
1203 """Do the 'volume name'-to-'rbd block device' resolving.
1206 @param pool: RADOS pool to use
1207 @type volume_name: string
1208 @param volume_name: the name of the volume whose device we search for
1209 @rtype: string or None
1210 @return: block device path if the volume is mapped, else None
1214 # Newer versions of the rbd tool support json output formatting. Use it
1224 result = utils.RunCmd(showmap_cmd)
1226 logging.error("rbd JSON output formatting returned error (%s): %s,"
1227 "falling back to plain output parsing",
1228 result.fail_reason, result.output)
1229 raise RbdShowmappedJsonError
1231 return cls._ParseRbdShowmappedJson(result.output, volume_name)
1232 except RbdShowmappedJsonError:
1233 # For older versions of rbd, we have to parse the plain / text output
1235 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
1236 result = utils.RunCmd(showmap_cmd)
1238 base.ThrowError("rbd showmapped failed (%s): %s",
1239 result.fail_reason, result.output)
1241 return cls._ParseRbdShowmappedPlain(result.output, volume_name)
1244 def _ParseRbdShowmappedJson(output, volume_name):
1245 """Parse the json output of `rbd showmapped'.
1247 This method parses the json output of `rbd showmapped' and returns the rbd
1248 block device path (e.g. /dev/rbd0) that matches the given rbd volume.
1250 @type output: string
1251 @param output: the json output of `rbd showmapped'
1252 @type volume_name: string
1253 @param volume_name: the name of the volume whose device we search for
1254 @rtype: string or None
1255 @return: block device path if the volume is mapped, else None
1259 devices = serializer.LoadJson(output)
1260 except ValueError, err:
1261 base.ThrowError("Unable to parse JSON data: %s" % err)
1264 for d in devices.values(): # pylint: disable=E1103
1268 base.ThrowError("'name' key missing from json object %s", devices)
1270 if name == volume_name:
1271 if rbd_dev is not None:
1272 base.ThrowError("rbd volume %s is mapped more than once", volume_name)
1274 rbd_dev = d["device"]
1279 def _ParseRbdShowmappedPlain(output, volume_name):
1280 """Parse the (plain / text) output of `rbd showmapped'.
1282 This method parses the output of `rbd showmapped' and returns
1283 the rbd block device path (e.g. /dev/rbd0) that matches the
1286 @type output: string
1287 @param output: the plain text output of `rbd showmapped'
1288 @type volume_name: string
1289 @param volume_name: the name of the volume whose device we search for
1290 @rtype: string or None
1291 @return: block device path if the volume is mapped, else None
1298 lines = output.splitlines()
1300 # Try parsing the new output format (ceph >= 0.55).
1301 splitted_lines = map(lambda l: l.split(), lines)
1303 # Check for empty output.
1304 if not splitted_lines:
1307 # Check showmapped output, to determine number of fields.
1308 field_cnt = len(splitted_lines[0])
1309 if field_cnt != allfields:
1310 # Parsing the new format failed. Fallback to parsing the old output
1312 splitted_lines = map(lambda l: l.split("\t"), lines)
1313 if field_cnt != allfields:
1314 base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
1315 " found %s", allfields, field_cnt)
1318 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
1321 if len(matched_lines) > 1:
1322 base.ThrowError("rbd volume %s mapped more than once", volume_name)
1325 # rbd block device found. Return it.
1326 rbd_dev = matched_lines[0][devicefield]
1329 # The given volume is not mapped.
1333 """Assemble the device.
1339 """Shutdown the device.
1342 if not self.minor and not self.Attach():
1343 # The rbd device doesn't exist.
1346 # Unmap the block device from the Volume.
1347 self._UnmapVolumeFromBlockdev(self.unique_id)
1350 self.dev_path = None
1352 def _UnmapVolumeFromBlockdev(self, unique_id):
1353 """Unmaps the rbd device from the Volume it is mapped.
1355 Unmaps the rbd device from the Volume it was previously mapped to.
1356 This method should be idempotent if the Volume isn't mapped.
1359 pool = self.params[constants.LDP_POOL]
1362 # Check if the mapping already exists.
1363 rbd_dev = self._VolumeToBlockdev(pool, name)
1366 # The mapping exists. Unmap the rbd device.
1367 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
1368 result = utils.RunCmd(unmap_cmd)
1370 base.ThrowError("rbd unmap failed (%s): %s",
1371 result.fail_reason, result.output)
1373 def Open(self, force=False):
1374 """Make the device ready for I/O.
1380 """Notifies that the device will no longer be used for I/O.
1385 def Grow(self, amount, dryrun, backingstore):
1388 @type amount: integer
1389 @param amount: the amount (in mebibytes) to grow with
1390 @type dryrun: boolean
1391 @param dryrun: whether to execute the operation in simulation mode
1392 only, without actually increasing the size
1395 if not backingstore:
1397 if not self.Attach():
1398 base.ThrowError("Can't attach to rbd device during Grow()")
1401 # the rbd tool does not support dry runs of resize operations.
1402 # Since rbd volumes are thinly provisioned, we assume
1403 # there is always enough free space for the operation.
1406 rbd_pool = self.params[constants.LDP_POOL]
1407 rbd_name = self.unique_id[1]
1408 new_size = self.size + amount
1410 # Resize the rbd volume (Image) inside the RADOS cluster.
1411 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
1412 rbd_name, "--size", "%s" % new_size]
1413 result = utils.RunCmd(cmd)
1415 base.ThrowError("rbd resize failed (%s): %s",
1416 result.fail_reason, result.output)
1419 class ExtStorageDevice(base.BlockDev):
1420 """A block device provided by an ExtStorage Provider.
1422 This class implements the External Storage Interface, which means
1423 handling of the externally provided block devices.
1426 def __init__(self, unique_id, children, size, params):
1427 """Attaches to an extstorage block device.
1430 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
1431 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1432 raise ValueError("Invalid configuration data %s" % str(unique_id))
1434 self.driver, self.vol_name = unique_id
1435 self.ext_params = params
1437 self.major = self.minor = None
1441 def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1442 """Create a new extstorage device.
1444 Provision a new volume using an extstorage provider, which will
1445 then be mapped to a block device.
1448 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1449 raise errors.ProgrammerError("Invalid configuration data %s" %
1452 raise errors.ProgrammerError("extstorage device requested with"
1453 " exclusive_storage")
1455 # Call the External Storage's create script,
1456 # to provision a new Volume inside the External Storage
1457 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
1460 return ExtStorageDevice(unique_id, children, size, params)
1463 """Remove the extstorage device.
1466 if not self.minor and not self.Attach():
1467 # The extstorage device doesn't exist.
1470 # First shutdown the device (remove mappings).
1473 # Call the External Storage's remove script,
1474 # to remove the Volume from the External Storage
1475 _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
1478 def Rename(self, new_id):
1479 """Rename this device.
1485 """Attach to an existing extstorage device.
1487 This method maps the extstorage volume that matches our name with
1488 a corresponding block device and then attaches to this device.
1491 self.attached = False
1493 # Call the External Storage's attach script,
1494 # to attach an existing Volume to a block device under /dev
1495 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
1496 self.unique_id, self.ext_params)
1499 st = os.stat(self.dev_path)
1500 except OSError, err:
1501 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1504 if not stat.S_ISBLK(st.st_mode):
1505 logging.error("%s is not a block device", self.dev_path)
1508 self.major = os.major(st.st_rdev)
1509 self.minor = os.minor(st.st_rdev)
1510 self.attached = True
1515 """Assemble the device.
1521 """Shutdown the device.
1524 if not self.minor and not self.Attach():
1525 # The extstorage device doesn't exist.
1528 # Call the External Storage's detach script,
1529 # to detach an existing Volume from it's block device under /dev
1530 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
1534 self.dev_path = None
1536 def Open(self, force=False):
1537 """Make the device ready for I/O.
1543 """Notifies that the device will no longer be used for I/O.
1548 def Grow(self, amount, dryrun, backingstore):
1551 @type amount: integer
1552 @param amount: the amount (in mebibytes) to grow with
1553 @type dryrun: boolean
1554 @param dryrun: whether to execute the operation in simulation mode
1555 only, without actually increasing the size
1558 if not backingstore:
1560 if not self.Attach():
1561 base.ThrowError("Can't attach to extstorage device during Grow()")
1564 # we do not support dry runs of resize operations for now.
1567 new_size = self.size + amount
1569 # Call the External Storage's grow script,
1570 # to grow an existing Volume inside the External Storage
1571 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
1572 self.ext_params, str(self.size), grow=str(new_size))
1574 def SetInfo(self, text):
1575 """Update metadata with info text.
1578 # Replace invalid characters
1579 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1580 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1582 # Only up to 128 characters are allowed
1585 # Call the External Storage's setinfo script,
1586 # to set metadata for an existing Volume inside the External Storage
1587 _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
1588 self.ext_params, metadata=text)
1591 def _ExtStorageAction(action, unique_id, ext_params,
1592 size=None, grow=None, metadata=None):
1593 """Take an External Storage action.
1595 Take an External Storage action concerning or affecting
1596 a specific Volume inside the External Storage.
1598 @type action: string
1599 @param action: which action to perform. One of:
1600 create / remove / grow / attach / detach
1601 @type unique_id: tuple (driver, vol_name)
1602 @param unique_id: a tuple containing the type of ExtStorage (driver)
1604 @type ext_params: dict
1605 @param ext_params: ExtStorage parameters
1607 @param size: the size of the Volume in mebibytes
1609 @param grow: the new size in mebibytes (after grow)
1610 @type metadata: string
1611 @param metadata: metadata info of the Volume, for use by the provider
1612 @rtype: None or a block device path (during attach)
1615 driver, vol_name = unique_id
1617 # Create an External Storage instance of type `driver'
1618 status, inst_es = ExtStorageFromDisk(driver)
1620 base.ThrowError("%s" % inst_es)
1622 # Create the basic environment for the driver's scripts
1623 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
1626 # Do not use log file for action `attach' as we need
1627 # to get the output from RunResult
1628 # TODO: find a way to have a log file for attach too
1630 if action is not constants.ES_ACTION_ATTACH:
1631 logfile = _VolumeLogName(action, driver, vol_name)
1633 # Make sure the given action results in a valid script
1634 if action not in constants.ES_SCRIPTS:
1635 base.ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
1638 # Find out which external script to run according the given action
1639 script_name = action + "_script"
1640 script = getattr(inst_es, script_name)
1642 # Run the external script
1643 result = utils.RunCmd([script], env=create_env,
1644 cwd=inst_es.path, output=logfile,)
1646 logging.error("External storage's %s command '%s' returned"
1647 " error: %s, logfile: %s, output: %s",
1648 action, result.cmd, result.fail_reason,
1649 logfile, result.output)
1651 # If logfile is 'None' (during attach), it breaks TailFile
1652 # TODO: have a log file for attach too
1653 if action is not constants.ES_ACTION_ATTACH:
1654 lines = [utils.SafeEncode(val)
1655 for val in utils.TailFile(logfile, lines=20)]
1657 lines = result.output[-20:]
1659 base.ThrowError("External storage's %s script failed (%s), last"
1660 " lines of output:\n%s",
1661 action, result.fail_reason, "\n".join(lines))
1663 if action == constants.ES_ACTION_ATTACH:
1664 return result.stdout
1667 def ExtStorageFromDisk(name, base_dir=None):
1668 """Create an ExtStorage instance from disk.
1670 This function will return an ExtStorage instance
1671 if the given name is a valid ExtStorage name.
1673 @type base_dir: string
1674 @keyword base_dir: Base directory containing ExtStorage installations.
1675 Defaults to a search in all the ES_SEARCH_PATH dirs.
1677 @return: True and the ExtStorage instance if we find a valid one, or
1678 False and the diagnose message on error
1681 if base_dir is None:
1682 es_base_dir = pathutils.ES_SEARCH_PATH
1684 es_base_dir = [base_dir]
1686 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
1689 return False, ("Directory for External Storage Provider %s not"
1690 " found in search path" % name)
1692 # ES Files dictionary, we will populate it with the absolute path
1693 # names; if the value is True, then it is a required file, otherwise
1695 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
1697 es_files[constants.ES_PARAMETERS_FILE] = True
1699 for (filename, _) in es_files.items():
1700 es_files[filename] = utils.PathJoin(es_dir, filename)
1703 st = os.stat(es_files[filename])
1704 except EnvironmentError, err:
1705 return False, ("File '%s' under path '%s' is missing (%s)" %
1706 (filename, es_dir, utils.ErrnoOrStr(err)))
1708 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1709 return False, ("File '%s' under path '%s' is not a regular file" %
1712 if filename in constants.ES_SCRIPTS:
1713 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
1714 return False, ("File '%s' under path '%s' is not executable" %
1718 if constants.ES_PARAMETERS_FILE in es_files:
1719 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
1721 parameters = utils.ReadFile(parameters_file).splitlines()
1722 except EnvironmentError, err:
1723 return False, ("Error while reading the EXT parameters file at %s: %s" %
1724 (parameters_file, utils.ErrnoOrStr(err)))
1725 parameters = [v.split(None, 1) for v in parameters]
1728 objects.ExtStorage(name=name, path=es_dir,
1729 create_script=es_files[constants.ES_SCRIPT_CREATE],
1730 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
1731 grow_script=es_files[constants.ES_SCRIPT_GROW],
1732 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
1733 detach_script=es_files[constants.ES_SCRIPT_DETACH],
1734 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
1735 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
1736 supported_parameters=parameters)
1740 def _ExtStorageEnvironment(unique_id, ext_params,
1741 size=None, grow=None, metadata=None):
1742 """Calculate the environment for an External Storage script.
1744 @type unique_id: tuple (driver, vol_name)
1745 @param unique_id: ExtStorage pool and name of the Volume
1746 @type ext_params: dict
1747 @param ext_params: the EXT parameters
1749 @param size: size of the Volume (in mebibytes)
1751 @param grow: new size of Volume after grow (in mebibytes)
1752 @type metadata: string
1753 @param metadata: metadata info of the Volume
1755 @return: dict of environment variables
1758 vol_name = unique_id[1]
1761 result["VOL_NAME"] = vol_name
1764 for pname, pvalue in ext_params.items():
1765 result["EXTP_%s" % pname.upper()] = str(pvalue)
1767 if size is not None:
1768 result["VOL_SIZE"] = size
1770 if grow is not None:
1771 result["VOL_NEW_SIZE"] = grow
1773 if metadata is not None:
1774 result["VOL_METADATA"] = metadata
1779 def _VolumeLogName(kind, es_name, volume):
1780 """Compute the ExtStorage log filename for a given Volume and operation.
1783 @param kind: the operation type (e.g. create, remove etc.)
1784 @type es_name: string
1785 @param es_name: the ExtStorage name
1786 @type volume: string
1787 @param volume: the name of the Volume inside the External Storage
1790 # Check if the extstorage log dir is a valid dir
1791 if not os.path.isdir(pathutils.LOG_ES_DIR):
1792 base.ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
1794 # TODO: Use tempfile.mkstemp to create unique filename
1795 basename = ("%s-%s-%s-%s.log" %
1796 (kind, es_name, volume, utils.TimestampForFilename()))
1797 return utils.PathJoin(pathutils.LOG_ES_DIR, basename)
1801 constants.LD_LV: LogicalVolume,
1802 constants.LD_DRBD8: drbd.DRBD8Dev,
1803 constants.LD_BLOCKDEV: PersistentBlockDevice,
1804 constants.LD_RBD: RADOSBlockDevice,
1805 constants.LD_EXT: ExtStorageDevice,
1808 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
1809 DEV_MAP[constants.LD_FILE] = FileStorage
1812 def _VerifyDiskType(dev_type):
1813 if dev_type not in DEV_MAP:
1814 raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
1817 def _VerifyDiskParams(disk):
1818 """Verifies if all disk parameters are set.
1821 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
1823 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
1827 def FindDevice(disk, children):
1828 """Search for an existing, assembled device.
1830 This will succeed only if the device exists and is assembled, but it
1831 does not do any actions in order to activate the device.
1833 @type disk: L{objects.Disk}
1834 @param disk: the disk object to find
1835 @type children: list of L{bdev.BlockDev}
1836 @param children: the list of block devices that are children of the device
1837 represented by the disk parameter
1840 _VerifyDiskType(disk.dev_type)
1841 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1843 if not device.attached:
1848 def Assemble(disk, children):
1849 """Try to attach or assemble an existing device.
1851 This will attach to assemble the device, as needed, to bring it
1852 fully up. It must be safe to run on already-assembled devices.
1854 @type disk: L{objects.Disk}
1855 @param disk: the disk object to assemble
1856 @type children: list of L{bdev.BlockDev}
1857 @param children: the list of block devices that are children of the device
1858 represented by the disk parameter
1861 _VerifyDiskType(disk.dev_type)
1862 _VerifyDiskParams(disk)
1863 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1869 def Create(disk, children, excl_stor):
1872 @type disk: L{objects.Disk}
1873 @param disk: the disk object to create
1874 @type children: list of L{bdev.BlockDev}
1875 @param children: the list of block devices that are children of the device
1876 represented by the disk parameter
1877 @type excl_stor: boolean
1878 @param excl_stor: Whether exclusive_storage is active
1879 @rtype: L{bdev.BlockDev}
1880 @return: the created device, or C{None} in case of an error
1883 _VerifyDiskType(disk.dev_type)
1884 _VerifyDiskParams(disk)
1885 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
1886 disk.spindles, disk.params, excl_stor)