4 # Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Block device abstraction"""
29 import pyparsing as pyp
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import objects
38 from ganeti import compat
39 from ganeti import netutils
40 from ganeti import pathutils
41 from ganeti import serializer
44 # Size of reads in _CanReadDevice
45 _DEVICE_READ_SIZE = 128 * 1024
48 class RbdShowmappedJsonError(Exception):
49 """`rbd showmmapped' JSON formatting error Exception class.
55 def _IgnoreError(fn, *args, **kwargs):
56 """Executes the given function, ignoring BlockDeviceErrors.
58 This is used in order to simplify the execution of cleanup or
62 @return: True when fn didn't raise an exception, False otherwise
68 except errors.BlockDeviceError, err:
69 logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
73 def _ThrowError(msg, *args):
74 """Log an error to the node daemon and the raise an exception.
77 @param msg: the text of the exception
78 @raise errors.BlockDeviceError
84 raise errors.BlockDeviceError(msg)
87 def _CheckResult(result):
88 """Throws an error if the given result is a failed one.
90 @param result: result from RunCmd
94 _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
98 def _CanReadDevice(path):
99 """Check if we can read from the given device.
101 This tries to read the first 128k of the device.
105 utils.ReadFile(path, size=_DEVICE_READ_SIZE)
107 except EnvironmentError:
108 logging.warning("Can't read from device %s", path, exc_info=True)
112 def _GetForbiddenFileStoragePaths():
113 """Builds a list of path prefixes which shouldn't be used for file storage.
128 for prefix in ["", "/usr", "/usr/local"]:
129 paths.update(map(lambda s: "%s/%s" % (prefix, s),
130 ["bin", "lib", "lib32", "lib64", "sbin"]))
132 return compat.UniqueFrozenset(map(os.path.normpath, paths))
135 def _ComputeWrongFileStoragePaths(paths,
136 _forbidden=_GetForbiddenFileStoragePaths()):
137 """Cross-checks a list of paths for prefixes considered bad.
139 Some paths, e.g. "/bin", should not be used for file storage.
142 @param paths: List of paths to be checked
144 @return: Sorted list of paths for which the user should be warned
148 return (not os.path.isabs(path) or
149 path in _forbidden or
150 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
152 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
155 def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
156 """Returns a list of file storage paths whose prefix is considered bad.
158 See L{_ComputeWrongFileStoragePaths}.
161 return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
164 def _CheckFileStoragePath(path, allowed):
165 """Checks if a path is in a list of allowed paths for file storage.
168 @param path: Path to check
170 @param allowed: List of allowed paths
171 @raise errors.FileStoragePathError: If the path is not allowed
174 if not os.path.isabs(path):
175 raise errors.FileStoragePathError("File storage path must be absolute,"
179 if not os.path.isabs(i):
180 logging.info("Ignoring relative path '%s' for file storage", i)
183 if utils.IsBelowDir(i, path):
186 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
190 def _LoadAllowedFileStoragePaths(filename):
191 """Loads file containing allowed file storage paths.
194 @return: List of allowed paths (can be an empty list)
198 contents = utils.ReadFile(filename)
199 except EnvironmentError:
202 return utils.FilterEmptyLinesAndComments(contents)
205 def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
206 """Checks if a path is allowed for file storage.
209 @param path: Path to check
210 @raise errors.FileStoragePathError: If the path is not allowed
213 allowed = _LoadAllowedFileStoragePaths(_filename)
215 if _ComputeWrongFileStoragePaths([path]):
216 raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
219 _CheckFileStoragePath(path, allowed)
222 class BlockDev(object):
223 """Block device abstract class.
225 A block device can be in the following states:
226 - not existing on the system, and by `Create()` it goes into:
227 - existing but not setup/not active, and by `Assemble()` goes into:
228 - active read-write and by `Open()` it goes into
229 - online (=used, or ready for use)
231 A device can also be online but read-only, however we are not using
232 the readonly state (LV has it, if needed in the future) and we are
233 usually looking at this like at a stack, so it's easier to
234 conceptualise the transition from not-existing to online and back
237 The many different states of the device are due to the fact that we
238 need to cover many device types:
239 - logical volumes are created, lvchange -a y $lv, and used
240 - drbd devices are attached to a local disk/remote peer and made primary
242 A block device is identified by three items:
243 - the /dev path of the device (dynamic)
244 - a unique ID of the device (static)
245 - it's major/minor pair (dynamic)
247 Not all devices implement both the first two as distinct items. LVM
248 logical volumes have their unique ID (the pair volume group, logical
249 volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
250 the /dev path is again dynamic and the unique id is the pair (host1,
251 dev1), (host2, dev2).
253 You can get to a device in two ways:
254 - creating the (real) device, which returns you
255 an attached instance (lvcreate)
256 - attaching of a python instance to an existing (real) device
258 The second point, the attachment to a device, is different
259 depending on whether the device is assembled or not. At init() time,
260 we search for a device with the same unique_id as us. If found,
261 good. It also means that the device is already assembled. If not,
262 after assembly we'll have our correct major/minor.
265 def __init__(self, unique_id, children, size, params):
266 self._children = children
268 self.unique_id = unique_id
271 self.attached = False
276 """Assemble the device from its components.
278 Implementations of this method by child classes must ensure that:
279 - after the device has been assembled, it knows its major/minor
280 numbers; this allows other devices (usually parents) to probe
281 correctly for their children
282 - calling this method on an existing, in-use device is safe
283 - if the device is already configured (and in an OK state),
284 this method is idempotent
290 """Find a device which matches our config and attach to it.
293 raise NotImplementedError
296 """Notifies that the device will no longer be used for I/O.
299 raise NotImplementedError
302 def Create(cls, unique_id, children, size, params, excl_stor):
303 """Create the device.
305 If the device cannot be created, it will return None
306 instead. Error messages go to the logging system.
308 Note that for some devices, the unique_id is used, and for other,
309 the children. The idea is that these two, taken together, are
310 enough for both creation and assembly (later).
313 raise NotImplementedError
316 """Remove this device.
318 This makes sense only for some of the device types: LV and file
319 storage. Also note that if the device can't attach, the removal
323 raise NotImplementedError
325 def Rename(self, new_id):
326 """Rename this device.
328 This may or may not make sense for a given device type.
331 raise NotImplementedError
333 def Open(self, force=False):
334 """Make the device ready for use.
336 This makes the device ready for I/O. For now, just the DRBD
339 The force parameter signifies that if the device has any kind of
340 --force thing, it should be used, we know what we are doing.
343 raise NotImplementedError
346 """Shut down the device, freeing its children.
348 This undoes the `Assemble()` work, except for the child
349 assembling; as such, the children on the device are still
350 assembled after this call.
353 raise NotImplementedError
355 def SetSyncParams(self, params):
356 """Adjust the synchronization parameters of the mirror.
358 In case this is not a mirroring device, this is no-op.
360 @param params: dictionary of LD level disk parameters related to the
363 @return: a list of error messages, emitted both by the current node and by
364 children. An empty list means no errors.
369 for child in self._children:
370 result.extend(child.SetSyncParams(params))
373 def PauseResumeSync(self, pause):
374 """Pause/Resume the sync of the mirror.
376 In case this is not a mirroring device, this is no-op.
378 @param pause: Whether to pause or resume
383 for child in self._children:
384 result = result and child.PauseResumeSync(pause)
387 def GetSyncStatus(self):
388 """Returns the sync status of the device.
390 If this device is a mirroring device, this function returns the
391 status of the mirror.
393 If sync_percent is None, it means the device is not syncing.
395 If estimated_time is None, it means we can't estimate
396 the time needed, otherwise it's the time left in seconds.
398 If is_degraded is True, it means the device is missing
399 redundancy. This is usually a sign that something went wrong in
400 the device setup, if sync_percent is None.
402 The ldisk parameter represents the degradation of the local
403 data. This is only valid for some devices, the rest will always
404 return False (not degraded).
406 @rtype: objects.BlockDevStatus
409 return objects.BlockDevStatus(dev_path=self.dev_path,
415 ldisk_status=constants.LDS_OKAY)
417 def CombinedSyncStatus(self):
418 """Calculate the mirror status recursively for our children.
420 The return value is the same as for `GetSyncStatus()` except the
421 minimum percent and maximum time are calculated across our
424 @rtype: objects.BlockDevStatus
427 status = self.GetSyncStatus()
429 min_percent = status.sync_percent
430 max_time = status.estimated_time
431 is_degraded = status.is_degraded
432 ldisk_status = status.ldisk_status
435 for child in self._children:
436 child_status = child.GetSyncStatus()
438 if min_percent is None:
439 min_percent = child_status.sync_percent
440 elif child_status.sync_percent is not None:
441 min_percent = min(min_percent, child_status.sync_percent)
444 max_time = child_status.estimated_time
445 elif child_status.estimated_time is not None:
446 max_time = max(max_time, child_status.estimated_time)
448 is_degraded = is_degraded or child_status.is_degraded
450 if ldisk_status is None:
451 ldisk_status = child_status.ldisk_status
452 elif child_status.ldisk_status is not None:
453 ldisk_status = max(ldisk_status, child_status.ldisk_status)
455 return objects.BlockDevStatus(dev_path=self.dev_path,
458 sync_percent=min_percent,
459 estimated_time=max_time,
460 is_degraded=is_degraded,
461 ldisk_status=ldisk_status)
463 def SetInfo(self, text):
464 """Update metadata with info text.
466 Only supported for some device types.
469 for child in self._children:
472 def Grow(self, amount, dryrun, backingstore):
473 """Grow the block device.
475 @type amount: integer
476 @param amount: the amount (in mebibytes) to grow with
477 @type dryrun: boolean
478 @param dryrun: whether to execute the operation in simulation mode
479 only, without actually increasing the size
480 @param backingstore: whether to execute the operation on backing storage
481 only, or on "logical" storage only; e.g. DRBD is logical storage,
482 whereas LVM, file, RBD are backing storage
485 raise NotImplementedError
487 def GetActualSize(self):
488 """Return the actual disk size.
490 @note: the device needs to be active when this is called
493 assert self.attached, "BlockDevice not attached in GetActualSize()"
494 result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
496 _ThrowError("blockdev failed (%s): %s",
497 result.fail_reason, result.output)
499 sz = int(result.output.strip())
500 except (ValueError, TypeError), err:
501 _ThrowError("Failed to parse blockdev output: %s", str(err))
505 return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
506 (self.__class__, self.unique_id, self._children,
507 self.major, self.minor, self.dev_path))
510 class LogicalVolume(BlockDev):
511 """Logical Volume block device.
514 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
515 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
516 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
518 def __init__(self, unique_id, children, size, params):
519 """Attaches to a LV device.
521 The unique_id is a tuple (vg_name, lv_name)
524 super(LogicalVolume, self).__init__(unique_id, children, size, params)
525 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
526 raise ValueError("Invalid configuration data %s" % str(unique_id))
527 self._vg_name, self._lv_name = unique_id
528 self._ValidateName(self._vg_name)
529 self._ValidateName(self._lv_name)
530 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
531 self._degraded = True
532 self.major = self.minor = self.pe_size = self.stripe_count = None
536 def _GetStdPvSize(pvs_info):
537 """Return the the standard PV size (used with exclusive storage).
539 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
544 assert len(pvs_info) > 0
545 smallest = min([pv.size for pv in pvs_info])
546 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
549 def _ComputeNumPvs(size, pvs_info):
550 """Compute the number of PVs needed for an LV (with exclusive storage).
553 @param size: LV size in MiB
554 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
556 @return: number of PVs needed
558 assert len(pvs_info) > 0
559 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
560 return int(math.ceil(float(size) / pv_size))
563 def _GetEmptyPvNames(pvs_info, max_pvs=None):
564 """Return a list of empty PVs, by name.
567 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
568 if max_pvs is not None:
569 empty_pvs = empty_pvs[:max_pvs]
570 return map((lambda pv: pv.name), empty_pvs)
573 def Create(cls, unique_id, children, size, params, excl_stor):
574 """Create a new logical volume.
577 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
578 raise errors.ProgrammerError("Invalid configuration data %s" %
580 vg_name, lv_name = unique_id
581 cls._ValidateName(vg_name)
582 cls._ValidateName(lv_name)
583 pvs_info = cls.GetPVInfo([vg_name])
586 msg = "No (empty) PVs found"
588 msg = "Can't compute PV info for vg %s" % vg_name
590 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
592 pvlist = [pv.name for pv in pvs_info]
593 if compat.any(":" in v for v in pvlist):
594 _ThrowError("Some of your PVs have the invalid character ':' in their"
595 " name, this is not supported - please filter them out"
596 " in lvm.conf using either 'filter' or 'preferred_names'")
598 current_pvs = len(pvlist)
599 desired_stripes = params[constants.LDP_STRIPES]
600 stripes = min(current_pvs, desired_stripes)
603 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
607 req_pvs = cls._ComputeNumPvs(size, pvs_info)
608 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
609 current_pvs = len(pvlist)
610 if current_pvs < req_pvs:
611 _ThrowError("Not enough empty PVs to create a disk of %d MB:"
612 " %d available, %d needed", size, current_pvs, req_pvs)
613 assert current_pvs == len(pvlist)
614 if stripes > current_pvs:
615 # No warning issued for this, as it's no surprise
616 stripes = current_pvs
619 if stripes < desired_stripes:
620 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
621 " available.", desired_stripes, vg_name, current_pvs)
622 free_size = sum([pv.free for pv in pvs_info])
623 # The size constraint should have been checked from the master before
624 # calling the create function.
626 _ThrowError("Not enough free space: required %s,"
627 " available %s", size, free_size)
629 # If the free space is not well distributed, we won't be able to
630 # create an optimally-striped volume; in that case, we want to try
631 # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
633 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
634 for stripes_arg in range(stripes, 0, -1):
635 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
636 if not result.failed:
639 _ThrowError("LV create failed (%s): %s",
640 result.fail_reason, result.output)
641 return LogicalVolume(unique_id, children, size, params)
644 def _GetVolumeInfo(lvm_cmd, fields):
645 """Returns LVM Volume infos using lvm_cmd
647 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
648 @param fields: Fields to return
649 @return: A list of dicts each with the parsed fields
653 raise errors.ProgrammerError("No fields specified")
656 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
657 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
659 result = utils.RunCmd(cmd)
661 raise errors.CommandError("Can't get the volume information: %s - %s" %
662 (result.fail_reason, result.output))
665 for line in result.stdout.splitlines():
666 splitted_fields = line.strip().split(sep)
668 if len(fields) != len(splitted_fields):
669 raise errors.CommandError("Can't parse %s output: line '%s'" %
672 data.append(splitted_fields)
677 def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
678 """Get the free space info for PVs in a volume group.
680 @param vg_names: list of volume group names, if empty all will be returned
681 @param filter_allocatable: whether to skip over unallocatable PVs
682 @param include_lvs: whether to include a list of LVs hosted on each PV
685 @return: list of objects.LvmPvInfo objects
688 # We request "lv_name" field only if we care about LVs, so we don't get
689 # a long list of entries with many duplicates unless we really have to.
690 # The duplicate "pv_name" field will be ignored.
696 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
697 "pv_attr", "pv_size", lvfield])
698 except errors.GenericError, err:
699 logging.error("Can't get PV information: %s", err)
702 # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
703 # pair. We sort entries by PV name and then LV name, so it's easy to weed
706 info.sort(key=(lambda i: (i[0], i[5])))
709 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
710 # (possibly) skip over pvs which are not allocatable
711 if filter_allocatable and pv_attr[0] != "a":
713 # (possibly) skip over pvs which are not in the right volume group(s)
714 if vg_names and vg_name not in vg_names:
716 # Beware of duplicates (check before inserting)
717 if lastpvi and lastpvi.name == pv_name:
718 if include_lvs and lv_name:
719 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
720 lastpvi.lv_list.append(lv_name)
722 if include_lvs and lv_name:
726 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
727 size=float(pv_size), free=float(pv_free),
728 attributes=pv_attr, lv_list=lvl)
734 def _GetExclusiveStorageVgFree(cls, vg_name):
735 """Return the free disk space in the given VG, in exclusive storage mode.
737 @type vg_name: string
738 @param vg_name: VG name
740 @return: free space in MiB
742 pvs_info = cls.GetPVInfo([vg_name])
745 pv_size = cls._GetStdPvSize(pvs_info)
746 num_pvs = len(cls._GetEmptyPvNames(pvs_info))
747 return pv_size * num_pvs
750 def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
751 """Get the free space info for specific VGs.
753 @param vg_names: list of volume group names, if empty all will be returned
754 @param excl_stor: whether exclusive_storage is enabled
755 @param filter_readonly: whether to skip over readonly VGs
758 @return: list of tuples (free_space, total_size, name) with free_space in
763 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
765 except errors.GenericError, err:
766 logging.error("Can't get VG information: %s", err)
770 for vg_name, vg_free, vg_attr, vg_size in info:
771 # (possibly) skip over vgs which are not writable
772 if filter_readonly and vg_attr[0] == "r":
774 # (possibly) skip over vgs which are not in the right volume group(s)
775 if vg_names and vg_name not in vg_names:
777 # Exclusive storage needs a different concept of free space
779 es_free = cls._GetExclusiveStorageVgFree(vg_name)
780 assert es_free <= vg_free
782 data.append((float(vg_free), float(vg_size), vg_name))
787 def _ValidateName(cls, name):
788 """Validates that a given name is valid as VG or LV name.
790 The list of valid characters and restricted names is taken out of
791 the lvm(8) manpage, with the simplification that we enforce both
792 VG and LV restrictions on the names.
795 if (not cls._VALID_NAME_RE.match(name) or
796 name in cls._INVALID_NAMES or
797 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
798 _ThrowError("Invalid LVM name '%s'", name)
801 """Remove this logical volume.
804 if not self.minor and not self.Attach():
805 # the LV does not exist
807 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
808 (self._vg_name, self._lv_name)])
810 _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
812 def Rename(self, new_id):
813 """Rename this logical volume.
816 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
817 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
818 new_vg, new_name = new_id
819 if new_vg != self._vg_name:
820 raise errors.ProgrammerError("Can't move a logical volume across"
821 " volume groups (from %s to to %s)" %
822 (self._vg_name, new_vg))
823 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
825 _ThrowError("Failed to rename the logical volume: %s", result.output)
826 self._lv_name = new_name
827 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
830 """Attach to an existing LV.
832 This method will try to see if an existing and active LV exists
833 which matches our name. If so, its major/minor will be
837 self.attached = False
838 result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
839 "--units=k", "--nosuffix",
840 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
841 "vg_extent_size,stripes", self.dev_path])
843 logging.error("Can't find LV %s: %s, %s",
844 self.dev_path, result.fail_reason, result.output)
846 # the output can (and will) have multiple lines for multi-segment
847 # LVs, as the 'stripes' parameter is a segment one, so we take
848 # only the last entry, which is the one we're interested in; note
849 # that with LVM2 anyway the 'stripes' value must be constant
850 # across segments, so this is a no-op actually
851 out = result.stdout.splitlines()
852 if not out: # totally empty result? splitlines() returns at least
853 # one line for any non-empty string
854 logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
856 out = out[-1].strip().rstrip(",")
859 logging.error("Can't parse LVS output, len(%s) != 5", str(out))
862 status, major, minor, pe_size, stripes = out
864 logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
870 except (TypeError, ValueError), err:
871 logging.error("lvs major/minor cannot be parsed: %s", str(err))
874 pe_size = int(float(pe_size))
875 except (TypeError, ValueError), err:
876 logging.error("Can't parse vg extent size: %s", err)
880 stripes = int(stripes)
881 except (TypeError, ValueError), err:
882 logging.error("Can't parse the number of stripes: %s", err)
887 self.pe_size = pe_size
888 self.stripe_count = stripes
889 self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
895 """Assemble the device.
897 We always run `lvchange -ay` on the LV to ensure it's active before
898 use, as there were cases when xenvg was not active after boot
899 (also possibly after disk issues).
902 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
904 _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
907 """Shutdown the device.
909 This is a no-op for the LV device type, as we don't deactivate the
915 def GetSyncStatus(self):
916 """Returns the sync status of the device.
918 If this device is a mirroring device, this function returns the
919 status of the mirror.
921 For logical volumes, sync_percent and estimated_time are always
922 None (no recovery in progress, as we don't handle the mirrored LV
923 case). The is_degraded parameter is the inverse of the ldisk
926 For the ldisk parameter, we check if the logical volume has the
927 'virtual' type, which means it's not backed by existing storage
928 anymore (read from it return I/O error). This happens after a
929 physical disk failure and subsequent 'vgreduce --removemissing' on
932 The status was already read in Attach, so we just return it.
934 @rtype: objects.BlockDevStatus
938 ldisk_status = constants.LDS_FAULTY
940 ldisk_status = constants.LDS_OKAY
942 return objects.BlockDevStatus(dev_path=self.dev_path,
947 is_degraded=self._degraded,
948 ldisk_status=ldisk_status)
950 def Open(self, force=False):
951 """Make the device ready for I/O.
953 This is a no-op for the LV device type.
959 """Notifies that the device will no longer be used for I/O.
961 This is a no-op for the LV device type.
966 def Snapshot(self, size):
967 """Create a snapshot copy of an lvm block device.
969 @returns: tuple (vg, lv)
972 snap_name = self._lv_name + ".snap"
974 # remove existing snapshot if found
975 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
976 _IgnoreError(snap.Remove)
978 vg_info = self.GetVGInfo([self._vg_name], False)
980 _ThrowError("Can't compute VG info for vg %s", self._vg_name)
981 free_size, _, _ = vg_info[0]
983 _ThrowError("Not enough free space: required %s,"
984 " available %s", size, free_size)
986 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
987 "-n%s" % snap_name, self.dev_path]))
989 return (self._vg_name, snap_name)
991 def _RemoveOldInfo(self):
992 """Try to remove old tags from the lv.
995 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
999 raw_tags = result.stdout.strip()
1001 for tag in raw_tags.split(","):
1002 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1003 tag.strip(), self.dev_path]))
1005 def SetInfo(self, text):
1006 """Update metadata with info text.
1009 BlockDev.SetInfo(self, text)
1011 self._RemoveOldInfo()
1013 # Replace invalid characters
1014 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1015 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1017 # Only up to 128 characters are allowed
1020 _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1022 def Grow(self, amount, dryrun, backingstore):
1023 """Grow the logical volume.
1026 if not backingstore:
1028 if self.pe_size is None or self.stripe_count is None:
1029 if not self.Attach():
1030 _ThrowError("Can't attach to LV during Grow()")
1031 full_stripe_size = self.pe_size * self.stripe_count
1034 rest = amount % full_stripe_size
1036 amount += full_stripe_size - rest
1037 cmd = ["lvextend", "-L", "+%dk" % amount]
1039 cmd.append("--test")
1040 # we try multiple algorithms since the 'best' ones might not have
1041 # space available in the right place, but later ones might (since
1042 # they have less constraints); also note that only recent LVM
1044 for alloc_policy in "contiguous", "cling", "normal":
1045 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1046 if not result.failed:
1048 _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1051 class DRBD8Status(object):
1052 """A DRBD status representation class.
1054 Note that this doesn't support unconfigured devices (cs:Unconfigured).
1057 UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1058 LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1059 "\s+ds:([^/]+)/(\S+)\s+.*$")
1060 SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1061 # Due to a bug in drbd in the kernel, introduced in
1062 # commit 4b0715f096 (still unfixed as of 2011-08-22)
1064 "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1066 CS_UNCONFIGURED = "Unconfigured"
1067 CS_STANDALONE = "StandAlone"
1068 CS_WFCONNECTION = "WFConnection"
1069 CS_WFREPORTPARAMS = "WFReportParams"
1070 CS_CONNECTED = "Connected"
1071 CS_STARTINGSYNCS = "StartingSyncS"
1072 CS_STARTINGSYNCT = "StartingSyncT"
1073 CS_WFBITMAPS = "WFBitMapS"
1074 CS_WFBITMAPT = "WFBitMapT"
1075 CS_WFSYNCUUID = "WFSyncUUID"
1076 CS_SYNCSOURCE = "SyncSource"
1077 CS_SYNCTARGET = "SyncTarget"
1078 CS_PAUSEDSYNCS = "PausedSyncS"
1079 CS_PAUSEDSYNCT = "PausedSyncT"
1080 CSET_SYNC = compat.UniqueFrozenset([
1093 DS_DISKLESS = "Diskless"
1094 DS_ATTACHING = "Attaching" # transient state
1095 DS_FAILED = "Failed" # transient state, next: diskless
1096 DS_NEGOTIATING = "Negotiating" # transient state
1097 DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1098 DS_OUTDATED = "Outdated"
1099 DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1100 DS_CONSISTENT = "Consistent"
1101 DS_UPTODATE = "UpToDate" # normal state
1103 RO_PRIMARY = "Primary"
1104 RO_SECONDARY = "Secondary"
1105 RO_UNKNOWN = "Unknown"
1107 def __init__(self, procline):
1108 u = self.UNCONF_RE.match(procline)
1110 self.cstatus = self.CS_UNCONFIGURED
1111 self.lrole = self.rrole = self.ldisk = self.rdisk = None
1113 m = self.LINE_RE.match(procline)
1115 raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1116 self.cstatus = m.group(1)
1117 self.lrole = m.group(2)
1118 self.rrole = m.group(3)
1119 self.ldisk = m.group(4)
1120 self.rdisk = m.group(5)
1122 # end reading of data from the LINE_RE or UNCONF_RE
1124 self.is_standalone = self.cstatus == self.CS_STANDALONE
1125 self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1126 self.is_connected = self.cstatus == self.CS_CONNECTED
1127 self.is_primary = self.lrole == self.RO_PRIMARY
1128 self.is_secondary = self.lrole == self.RO_SECONDARY
1129 self.peer_primary = self.rrole == self.RO_PRIMARY
1130 self.peer_secondary = self.rrole == self.RO_SECONDARY
1131 self.both_primary = self.is_primary and self.peer_primary
1132 self.both_secondary = self.is_secondary and self.peer_secondary
1134 self.is_diskless = self.ldisk == self.DS_DISKLESS
1135 self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1137 self.is_in_resync = self.cstatus in self.CSET_SYNC
1138 self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1140 m = self.SYNC_RE.match(procline)
1142 self.sync_percent = float(m.group(1))
1143 hours = int(m.group(2))
1144 minutes = int(m.group(3))
1145 seconds = int(m.group(4))
1146 self.est_time = hours * 3600 + minutes * 60 + seconds
1148 # we have (in this if branch) no percent information, but if
1149 # we're resyncing we need to 'fake' a sync percent information,
1150 # as this is how cmdlib determines if it makes sense to wait for
1152 if self.is_in_resync:
1153 self.sync_percent = 0
1155 self.sync_percent = None
1156 self.est_time = None
1159 class BaseDRBD(BlockDev): # pylint: disable=W0223
1162 This class contains a few bits of common functionality between the
1163 0.7 and 8.x versions of DRBD.
1166 _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1167 r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1168 _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1169 _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1172 _ST_UNCONFIGURED = "Unconfigured"
1173 _ST_WFCONNECTION = "WFConnection"
1174 _ST_CONNECTED = "Connected"
1176 _STATUS_FILE = constants.DRBD_STATUS_FILE
1177 _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1180 def _GetProcData(filename=_STATUS_FILE):
1181 """Return data from /proc/drbd.
1185 data = utils.ReadFile(filename).splitlines()
1186 except EnvironmentError, err:
1187 if err.errno == errno.ENOENT:
1188 _ThrowError("The file %s cannot be opened, check if the module"
1189 " is loaded (%s)", filename, str(err))
1191 _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1193 _ThrowError("Can't read any data from %s", filename)
1197 def _MassageProcData(cls, data):
1198 """Transform the output of _GetProdData into a nicer form.
1200 @return: a dictionary of minor: joined lines from /proc/drbd
1205 old_minor = old_line = None
1207 if not line: # completely empty lines, as can be returned by drbd8.0+
1209 lresult = cls._VALID_LINE_RE.match(line)
1210 if lresult is not None:
1211 if old_minor is not None:
1212 results[old_minor] = old_line
1213 old_minor = int(lresult.group(1))
1216 if old_minor is not None:
1217 old_line += " " + line.strip()
1219 if old_minor is not None:
1220 results[old_minor] = old_line
1224 def _GetVersion(cls, proc_data):
1225 """Return the DRBD version.
1227 This will return a dict with keys:
1233 - proto2 (only on drbd > 8.2.X)
1236 first_line = proc_data[0].strip()
1237 version = cls._VERSION_RE.match(first_line)
1239 raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1242 values = version.groups()
1244 "k_major": int(values[0]),
1245 "k_minor": int(values[1]),
1246 "k_point": int(values[2]),
1247 "api": int(values[3]),
1248 "proto": int(values[4]),
1250 if values[5] is not None:
1251 retval["proto2"] = values[5]
1256 def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1257 """Returns DRBD usermode_helper currently set.
1261 helper = utils.ReadFile(filename).splitlines()[0]
1262 except EnvironmentError, err:
1263 if err.errno == errno.ENOENT:
1264 _ThrowError("The file %s cannot be opened, check if the module"
1265 " is loaded (%s)", filename, str(err))
1267 _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1269 _ThrowError("Can't read any data from %s", filename)
1273 def _DevPath(minor):
1274 """Return the path to a drbd device for a given minor.
1277 return "/dev/drbd%d" % minor
1280 def GetUsedDevs(cls):
1281 """Compute the list of used DRBD devices.
1284 data = cls._GetProcData()
1288 match = cls._VALID_LINE_RE.match(line)
1291 minor = int(match.group(1))
1292 state = match.group(2)
1293 if state == cls._ST_UNCONFIGURED:
1295 used_devs[minor] = state, line
1299 def _SetFromMinor(self, minor):
1300 """Set our parameters based on the given minor.
1302 This sets our minor variable and our dev_path.
1306 self.minor = self.dev_path = None
1307 self.attached = False
1310 self.dev_path = self._DevPath(minor)
1311 self.attached = True
1314 def _CheckMetaSize(meta_device):
1315 """Check if the given meta device looks like a valid one.
1317 This currently only checks the size, which must be around
1321 result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1323 _ThrowError("Failed to get device size: %s - %s",
1324 result.fail_reason, result.output)
1326 sectors = int(result.stdout)
1327 except (TypeError, ValueError):
1328 _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1329 num_bytes = sectors * 512
1330 if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1331 _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1332 # the maximum *valid* size of the meta device when living on top
1333 # of LVM is hard to compute: it depends on the number of stripes
1334 # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1335 # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1336 # size meta device; as such, we restrict it to 1GB (a little bit
1337 # too generous, but making assumptions about PE size is hard)
1338 if num_bytes > 1024 * 1024 * 1024:
1339 _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1341 def Rename(self, new_id):
1344 This is not supported for drbd devices.
1347 raise errors.ProgrammerError("Can't rename a drbd device")
1350 class DRBD8(BaseDRBD):
1351 """DRBD v8.x block device.
1353 This implements the local host part of the DRBD device, i.e. it
1354 doesn't do anything to the supposed peer. If you need a fully
1355 connected DRBD pair, you need to use this class on both hosts.
1357 The unique_id for the drbd device is a (local_ip, local_port,
1358 remote_ip, remote_port, local_minor, secret) tuple, and it must have
1359 two children: the data device and the meta_device. The meta device
1360 is checked for valid size and is zeroed on create.
1367 _NET_RECONFIG_TIMEOUT = 60
1369 # command line options for barriers
1370 _DISABLE_DISK_OPTION = "--no-disk-barrier" # -a
1371 _DISABLE_DRAIN_OPTION = "--no-disk-drain" # -D
1372 _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1373 _DISABLE_META_FLUSH_OPTION = "--no-md-flushes" # -m
1375 def __init__(self, unique_id, children, size, params):
1376 if children and children.count(None) > 0:
1378 if len(children) not in (0, 2):
1379 raise ValueError("Invalid configuration data %s" % str(children))
1380 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1381 raise ValueError("Invalid configuration data %s" % str(unique_id))
1382 (self._lhost, self._lport,
1383 self._rhost, self._rport,
1384 self._aminor, self._secret) = unique_id
1386 if not _CanReadDevice(children[1].dev_path):
1387 logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1389 super(DRBD8, self).__init__(unique_id, children, size, params)
1390 self.major = self._DRBD_MAJOR
1391 version = self._GetVersion(self._GetProcData())
1392 if version["k_major"] != 8:
1393 _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1394 " usage: kernel is %s.%s, ganeti wants 8.x",
1395 version["k_major"], version["k_minor"])
1397 if (self._lhost is not None and self._lhost == self._rhost and
1398 self._lport == self._rport):
1399 raise ValueError("Invalid configuration data, same local/remote %s" %
1404 def _InitMeta(cls, minor, dev_path):
1405 """Initialize a meta device.
1407 This will not work if the given minor is in use.
1410 # Zero the metadata first, in order to make sure drbdmeta doesn't
1411 # try to auto-detect existing filesystems or similar (see
1412 # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1413 # care about the first 128MB of data in the device, even though it
1415 result = utils.RunCmd([constants.DD_CMD,
1416 "if=/dev/zero", "of=%s" % dev_path,
1417 "bs=1048576", "count=128", "oflag=direct"])
1419 _ThrowError("Can't wipe the meta device: %s", result.output)
1421 result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1422 "v08", dev_path, "0", "create-md"])
1424 _ThrowError("Can't initialize meta device: %s", result.output)
1427 def _FindUnusedMinor(cls):
1428 """Find an unused DRBD device.
1430 This is specific to 8.x as the minors are allocated dynamically,
1431 so non-existing numbers up to a max minor count are actually free.
1434 data = cls._GetProcData()
1438 match = cls._UNUSED_LINE_RE.match(line)
1440 return int(match.group(1))
1441 match = cls._VALID_LINE_RE.match(line)
1443 minor = int(match.group(1))
1444 highest = max(highest, minor)
1445 if highest is None: # there are no minors in use at all
1447 if highest >= cls._MAX_MINORS:
1448 logging.error("Error: no free drbd minors!")
1449 raise errors.BlockDeviceError("Can't find a free DRBD minor")
1453 def _GetShowParser(cls):
1454 """Return a parser for `drbd show` output.
1456 This will either create or return an already-created parser for the
1457 output of the command `drbd show`.
1460 if cls._PARSE_SHOW is not None:
1461 return cls._PARSE_SHOW
1464 lbrace = pyp.Literal("{").suppress()
1465 rbrace = pyp.Literal("}").suppress()
1466 lbracket = pyp.Literal("[").suppress()
1467 rbracket = pyp.Literal("]").suppress()
1468 semi = pyp.Literal(";").suppress()
1469 colon = pyp.Literal(":").suppress()
1470 # this also converts the value to an int
1471 number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1473 comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1474 defa = pyp.Literal("_is_default").suppress()
1475 dbl_quote = pyp.Literal('"').suppress()
1477 keyword = pyp.Word(pyp.alphanums + "-")
1480 value = pyp.Word(pyp.alphanums + "_-/.:")
1481 quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1482 ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1483 pyp.Word(pyp.nums + ".") + colon + number)
1484 ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1485 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1486 pyp.Optional(rbracket) + colon + number)
1487 # meta device, extended syntax
1488 meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1489 # device name, extended syntax
1490 device_value = pyp.Literal("minor").suppress() + number
1493 stmt = (~rbrace + keyword + ~lbrace +
1494 pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1496 pyp.Optional(defa) + semi +
1497 pyp.Optional(pyp.restOfLine).suppress())
1500 section_name = pyp.Word(pyp.alphas + "_")
1501 section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1503 bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1506 cls._PARSE_SHOW = bnf
1511 def _GetShowData(cls, minor):
1512 """Return the `drbdsetup show` data for a minor.
1515 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1517 logging.error("Can't display the drbd config: %s - %s",
1518 result.fail_reason, result.output)
1520 return result.stdout
1523 def _GetDevInfo(cls, out):
1524 """Parse details about a given DRBD minor.
1526 This return, if available, the local backing device (as a path)
1527 and the local and remote (ip, port) information from a string
1528 containing the output of the `drbdsetup show` command as returned
1536 bnf = cls._GetShowParser()
1540 results = bnf.parseString(out)
1541 except pyp.ParseException, err:
1542 _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1544 # and massage the results into our desired format
1545 for section in results:
1547 if sname == "_this_host":
1548 for lst in section[1:]:
1549 if lst[0] == "disk":
1550 data["local_dev"] = lst[1]
1551 elif lst[0] == "meta-disk":
1552 data["meta_dev"] = lst[1]
1553 data["meta_index"] = lst[2]
1554 elif lst[0] == "address":
1555 data["local_addr"] = tuple(lst[1:])
1556 elif sname == "_remote_host":
1557 for lst in section[1:]:
1558 if lst[0] == "address":
1559 data["remote_addr"] = tuple(lst[1:])
1562 def _MatchesLocal(self, info):
1563 """Test if our local config matches with an existing device.
1565 The parameter should be as returned from `_GetDevInfo()`. This
1566 method tests if our local backing device is the same as the one in
1567 the info parameter, in effect testing if we look like the given
1572 backend, meta = self._children
1574 backend = meta = None
1576 if backend is not None:
1577 retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1579 retval = ("local_dev" not in info)
1581 if meta is not None:
1582 retval = retval and ("meta_dev" in info and
1583 info["meta_dev"] == meta.dev_path)
1584 retval = retval and ("meta_index" in info and
1585 info["meta_index"] == 0)
1587 retval = retval and ("meta_dev" not in info and
1588 "meta_index" not in info)
1591 def _MatchesNet(self, info):
1592 """Test if our network config matches with an existing device.
1594 The parameter should be as returned from `_GetDevInfo()`. This
1595 method tests if our network configuration is the same as the one
1596 in the info parameter, in effect testing if we look like the given
1600 if (((self._lhost is None and not ("local_addr" in info)) and
1601 (self._rhost is None and not ("remote_addr" in info)))):
1604 if self._lhost is None:
1607 if not ("local_addr" in info and
1608 "remote_addr" in info):
1611 retval = (info["local_addr"] == (self._lhost, self._lport))
1612 retval = (retval and
1613 info["remote_addr"] == (self._rhost, self._rport))
1616 def _AssembleLocal(self, minor, backend, meta, size):
1617 """Configure the local part of a DRBD device.
1620 args = ["drbdsetup", self._DevPath(minor), "disk",
1625 args.extend(["-d", "%sm" % size])
1627 version = self._GetVersion(self._GetProcData())
1628 vmaj = version["k_major"]
1629 vmin = version["k_minor"]
1630 vrel = version["k_point"]
1633 self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1634 self.params[constants.LDP_BARRIERS],
1635 self.params[constants.LDP_NO_META_FLUSH])
1636 args.extend(barrier_args)
1638 if self.params[constants.LDP_DISK_CUSTOM]:
1639 args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1641 result = utils.RunCmd(args)
1643 _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1646 def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1647 disable_meta_flush):
1648 """Compute the DRBD command line parameters for disk barriers
1650 Returns a list of the disk barrier parameters as requested via the
1651 disabled_barriers and disable_meta_flush arguments, and according to the
1652 supported ones in the DRBD version vmaj.vmin.vrel
1654 If the desired option is unsupported, raises errors.BlockDeviceError.
1657 disabled_barriers_set = frozenset(disabled_barriers)
1658 if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1659 raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1660 " barriers" % disabled_barriers)
1664 # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1666 if not vmaj == 8 and vmin in (0, 2, 3):
1667 raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1670 def _AppendOrRaise(option, min_version):
1671 """Helper for DRBD options"""
1672 if min_version is not None and vrel >= min_version:
1675 raise errors.BlockDeviceError("Could not use the option %s as the"
1676 " DRBD version %d.%d.%d does not support"
1677 " it." % (option, vmaj, vmin, vrel))
1679 # the minimum version for each feature is encoded via pairs of (minor
1680 # version -> x) where x is version in which support for the option was
1682 meta_flush_supported = disk_flush_supported = {
1688 disk_drain_supported = {
1693 disk_barriers_supported = {
1698 if disable_meta_flush:
1699 _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1700 meta_flush_supported.get(vmin, None))
1703 if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1704 _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1705 disk_flush_supported.get(vmin, None))
1708 if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1709 _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1710 disk_drain_supported.get(vmin, None))
1713 if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1714 _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1715 disk_barriers_supported.get(vmin, None))
1719 def _AssembleNet(self, minor, net_info, protocol,
1720 dual_pri=False, hmac=None, secret=None):
1721 """Configure the network part of the device.
1724 lhost, lport, rhost, rport = net_info
1725 if None in net_info:
1726 # we don't want network connection and actually want to make
1728 self._ShutdownNet(minor)
1731 # Workaround for a race condition. When DRBD is doing its dance to
1732 # establish a connection with its peer, it also sends the
1733 # synchronization speed over the wire. In some cases setting the
1734 # sync speed only after setting up both sides can race with DRBD
1735 # connecting, hence we set it here before telling DRBD anything
1737 sync_errors = self._SetMinorSyncParams(minor, self.params)
1739 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1740 (minor, utils.CommaJoin(sync_errors)))
1742 if netutils.IP6Address.IsValid(lhost):
1743 if not netutils.IP6Address.IsValid(rhost):
1744 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1745 (minor, lhost, rhost))
1747 elif netutils.IP4Address.IsValid(lhost):
1748 if not netutils.IP4Address.IsValid(rhost):
1749 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1750 (minor, lhost, rhost))
1753 _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1755 args = ["drbdsetup", self._DevPath(minor), "net",
1756 "%s:%s:%s" % (family, lhost, lport),
1757 "%s:%s:%s" % (family, rhost, rport), protocol,
1758 "-A", "discard-zero-changes",
1765 args.extend(["-a", hmac, "-x", secret])
1767 if self.params[constants.LDP_NET_CUSTOM]:
1768 args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1770 result = utils.RunCmd(args)
1772 _ThrowError("drbd%d: can't setup network: %s - %s",
1773 minor, result.fail_reason, result.output)
1775 def _CheckNetworkConfig():
1776 info = self._GetDevInfo(self._GetShowData(minor))
1777 if not "local_addr" in info or not "remote_addr" in info:
1778 raise utils.RetryAgain()
1780 if (info["local_addr"] != (lhost, lport) or
1781 info["remote_addr"] != (rhost, rport)):
1782 raise utils.RetryAgain()
1785 utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1786 except utils.RetryTimeout:
1787 _ThrowError("drbd%d: timeout while configuring network", minor)
1789 def AddChildren(self, devices):
1790 """Add a disk to the DRBD device.
1793 if self.minor is None:
1794 _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1796 if len(devices) != 2:
1797 _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1798 info = self._GetDevInfo(self._GetShowData(self.minor))
1799 if "local_dev" in info:
1800 _ThrowError("drbd%d: already attached to a local disk", self.minor)
1801 backend, meta = devices
1802 if backend.dev_path is None or meta.dev_path is None:
1803 _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1806 self._CheckMetaSize(meta.dev_path)
1807 self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1809 self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1810 self._children = devices
1812 def RemoveChildren(self, devices):
1813 """Detach the drbd device from local storage.
1816 if self.minor is None:
1817 _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1819 # early return if we don't actually have backing storage
1820 info = self._GetDevInfo(self._GetShowData(self.minor))
1821 if "local_dev" not in info:
1823 if len(self._children) != 2:
1824 _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1826 if self._children.count(None) == 2: # we don't actually have children :)
1827 logging.warning("drbd%d: requested detach while detached", self.minor)
1829 if len(devices) != 2:
1830 _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1831 for child, dev in zip(self._children, devices):
1832 if dev != child.dev_path:
1833 _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1834 " RemoveChildren", self.minor, dev, child.dev_path)
1836 self._ShutdownLocal(self.minor)
1840 def _SetMinorSyncParams(cls, minor, params):
1841 """Set the parameters of the DRBD syncer.
1843 This is the low-level implementation.
1846 @param minor: the drbd minor whose settings we change
1848 @param params: LD level disk parameters related to the synchronization
1850 @return: a list of error messages
1854 args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1855 if params[constants.LDP_DYNAMIC_RESYNC]:
1856 version = cls._GetVersion(cls._GetProcData())
1857 vmin = version["k_minor"]
1858 vrel = version["k_point"]
1860 # By definition we are using 8.x, so just check the rest of the version
1862 if vmin != 3 or vrel < 9:
1863 msg = ("The current DRBD version (8.%d.%d) does not support the "
1864 "dynamic resync speed controller" % (vmin, vrel))
1868 if params[constants.LDP_PLAN_AHEAD] == 0:
1869 msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1870 " controller at DRBD level. If you want to disable it, please"
1871 " set the dynamic-resync disk parameter to False.")
1875 # add the c-* parameters to args
1876 args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1877 "--c-fill-target", params[constants.LDP_FILL_TARGET],
1878 "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1879 "--c-max-rate", params[constants.LDP_MAX_RATE],
1880 "--c-min-rate", params[constants.LDP_MIN_RATE],
1884 args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1886 args.append("--create-device")
1887 result = utils.RunCmd(args)
1889 msg = ("Can't change syncer rate: %s - %s" %
1890 (result.fail_reason, result.output))
1896 def SetSyncParams(self, params):
1897 """Set the synchronization parameters of the DRBD syncer.
1900 @param params: LD level disk parameters related to the synchronization
1902 @return: a list of error messages, emitted both by the current node and by
1903 children. An empty list means no errors
1906 if self.minor is None:
1907 err = "Not attached during SetSyncParams"
1911 children_result = super(DRBD8, self).SetSyncParams(params)
1912 children_result.extend(self._SetMinorSyncParams(self.minor, params))
1913 return children_result
1915 def PauseResumeSync(self, pause):
1916 """Pauses or resumes the sync of a DRBD device.
1918 @param pause: Wether to pause or resume
1919 @return: the success of the operation
1922 if self.minor is None:
1923 logging.info("Not attached during PauseSync")
1926 children_result = super(DRBD8, self).PauseResumeSync(pause)
1933 result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1935 logging.error("Can't %s: %s - %s", cmd,
1936 result.fail_reason, result.output)
1937 return not result.failed and children_result
1939 def GetProcStatus(self):
1940 """Return device data from /proc.
1943 if self.minor is None:
1944 _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1945 proc_info = self._MassageProcData(self._GetProcData())
1946 if self.minor not in proc_info:
1947 _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1948 return DRBD8Status(proc_info[self.minor])
1950 def GetSyncStatus(self):
1951 """Returns the sync status of the device.
1954 If sync_percent is None, it means all is ok
1955 If estimated_time is None, it means we can't estimate
1956 the time needed, otherwise it's the time left in seconds.
1959 We set the is_degraded parameter to True on two conditions:
1960 network not connected or local disk missing.
1962 We compute the ldisk parameter based on whether we have a local
1965 @rtype: objects.BlockDevStatus
1968 if self.minor is None and not self.Attach():
1969 _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1971 stats = self.GetProcStatus()
1972 is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1974 if stats.is_disk_uptodate:
1975 ldisk_status = constants.LDS_OKAY
1976 elif stats.is_diskless:
1977 ldisk_status = constants.LDS_FAULTY
1979 ldisk_status = constants.LDS_UNKNOWN
1981 return objects.BlockDevStatus(dev_path=self.dev_path,
1984 sync_percent=stats.sync_percent,
1985 estimated_time=stats.est_time,
1986 is_degraded=is_degraded,
1987 ldisk_status=ldisk_status)
1989 def Open(self, force=False):
1990 """Make the local state primary.
1992 If the 'force' parameter is given, the '-o' option is passed to
1993 drbdsetup. Since this is a potentially dangerous operation, the
1994 force flag should be only given after creation, when it actually
1998 if self.minor is None and not self.Attach():
1999 logging.error("DRBD cannot attach to a device during open")
2001 cmd = ["drbdsetup", self.dev_path, "primary"]
2004 result = utils.RunCmd(cmd)
2006 _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2010 """Make the local state secondary.
2012 This will, of course, fail if the device is in use.
2015 if self.minor is None and not self.Attach():
2016 _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2017 result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2019 _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2020 self.minor, result.output)
2022 def DisconnectNet(self):
2023 """Removes network configuration.
2025 This method shutdowns the network side of the device.
2027 The method will wait up to a hardcoded timeout for the device to
2028 go into standalone after the 'disconnect' command before
2029 re-configuring it, as sometimes it takes a while for the
2030 disconnect to actually propagate and thus we might issue a 'net'
2031 command while the device is still connected. If the device will
2032 still be attached to the network and we time out, we raise an
2036 if self.minor is None:
2037 _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2039 if None in (self._lhost, self._lport, self._rhost, self._rport):
2040 _ThrowError("drbd%d: DRBD disk missing network info in"
2041 " DisconnectNet()", self.minor)
2043 class _DisconnectStatus:
2044 def __init__(self, ever_disconnected):
2045 self.ever_disconnected = ever_disconnected
2047 dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2049 def _WaitForDisconnect():
2050 if self.GetProcStatus().is_standalone:
2053 # retry the disconnect, it seems possible that due to a well-time
2054 # disconnect on the peer, my disconnect command might be ignored and
2056 dstatus.ever_disconnected = \
2057 _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2059 raise utils.RetryAgain()
2062 start_time = time.time()
2065 # Start delay at 100 milliseconds and grow up to 2 seconds
2066 utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2067 self._NET_RECONFIG_TIMEOUT)
2068 except utils.RetryTimeout:
2069 if dstatus.ever_disconnected:
2070 msg = ("drbd%d: device did not react to the"
2071 " 'disconnect' command in a timely manner")
2073 msg = "drbd%d: can't shutdown network, even after multiple retries"
2075 _ThrowError(msg, self.minor)
2077 reconfig_time = time.time() - start_time
2078 if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2079 logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2080 self.minor, reconfig_time)
2082 def AttachNet(self, multimaster):
2083 """Reconnects the network.
2085 This method connects the network side of the device with a
2086 specified multi-master flag. The device needs to be 'Standalone'
2087 but have valid network configuration data.
2090 - multimaster: init the network in dual-primary mode
2093 if self.minor is None:
2094 _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2096 if None in (self._lhost, self._lport, self._rhost, self._rport):
2097 _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2099 status = self.GetProcStatus()
2101 if not status.is_standalone:
2102 _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2104 self._AssembleNet(self.minor,
2105 (self._lhost, self._lport, self._rhost, self._rport),
2106 constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2107 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2110 """Check if our minor is configured.
2112 This doesn't do any device configurations - it only checks if the
2113 minor is in a state different from Unconfigured.
2115 Note that this function will not change the state of the system in
2116 any way (except in case of side-effects caused by reading from
2120 used_devs = self.GetUsedDevs()
2121 if self._aminor in used_devs:
2122 minor = self._aminor
2126 self._SetFromMinor(minor)
2127 return minor is not None
2130 """Assemble the drbd.
2133 - if we have a configured device, we try to ensure that it matches
2135 - if not, we create it from zero
2136 - anyway, set the device parameters
2139 super(DRBD8, self).Assemble()
2142 if self.minor is None:
2143 # local device completely unconfigured
2144 self._FastAssemble()
2146 # we have to recheck the local and network status and try to fix
2148 self._SlowAssemble()
2150 sync_errors = self.SetSyncParams(self.params)
2152 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2153 (self.minor, utils.CommaJoin(sync_errors)))
2155 def _SlowAssemble(self):
2156 """Assembles the DRBD device from a (partially) configured device.
2158 In case of partially attached (local device matches but no network
2159 setup), we perform the network attach. If successful, we re-test
2160 the attach if can return success.
2163 # TODO: Rewrite to not use a for loop just because there is 'break'
2164 # pylint: disable=W0631
2165 net_data = (self._lhost, self._lport, self._rhost, self._rport)
2166 for minor in (self._aminor,):
2167 info = self._GetDevInfo(self._GetShowData(minor))
2168 match_l = self._MatchesLocal(info)
2169 match_r = self._MatchesNet(info)
2171 if match_l and match_r:
2172 # everything matches
2175 if match_l and not match_r and "local_addr" not in info:
2176 # disk matches, but not attached to network, attach and recheck
2177 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2178 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2179 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2182 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2183 " show' disagrees", minor)
2185 if match_r and "local_dev" not in info:
2186 # no local disk, but network attached and it matches
2187 self._AssembleLocal(minor, self._children[0].dev_path,
2188 self._children[1].dev_path, self.size)
2189 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2192 _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2193 " show' disagrees", minor)
2195 # this case must be considered only if we actually have local
2196 # storage, i.e. not in diskless mode, because all diskless
2197 # devices are equal from the point of view of local
2199 if (match_l and "local_dev" in info and
2200 not match_r and "local_addr" in info):
2201 # strange case - the device network part points to somewhere
2202 # else, even though its local storage is ours; as we own the
2203 # drbd space, we try to disconnect from the remote peer and
2204 # reconnect to our correct one
2206 self._ShutdownNet(minor)
2207 except errors.BlockDeviceError, err:
2208 _ThrowError("drbd%d: device has correct local storage, wrong"
2209 " remote peer and is unable to disconnect in order"
2210 " to attach to the correct peer: %s", minor, str(err))
2211 # note: _AssembleNet also handles the case when we don't want
2212 # local storage (i.e. one or more of the _[lr](host|port) is
2214 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2215 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2216 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2219 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2220 " show' disagrees", minor)
2225 self._SetFromMinor(minor)
2227 _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2230 def _FastAssemble(self):
2231 """Assemble the drbd device from zero.
2233 This is run when in Assemble we detect our minor is unused.
2236 minor = self._aminor
2237 if self._children and self._children[0] and self._children[1]:
2238 self._AssembleLocal(minor, self._children[0].dev_path,
2239 self._children[1].dev_path, self.size)
2240 if self._lhost and self._lport and self._rhost and self._rport:
2241 self._AssembleNet(minor,
2242 (self._lhost, self._lport, self._rhost, self._rport),
2243 constants.DRBD_NET_PROTOCOL,
2244 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2245 self._SetFromMinor(minor)
2248 def _ShutdownLocal(cls, minor):
2249 """Detach from the local device.
2251 I/Os will continue to be served from the remote device. If we
2252 don't have a remote device, this operation will fail.
2255 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2257 _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2260 def _ShutdownNet(cls, minor):
2261 """Disconnect from the remote peer.
2263 This fails if we don't have a local device.
2266 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2268 _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2271 def _ShutdownAll(cls, minor):
2272 """Deactivate the device.
2274 This will, of course, fail if the device is in use.
2277 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2279 _ThrowError("drbd%d: can't shutdown drbd device: %s",
2280 minor, result.output)
2283 """Shutdown the DRBD device.
2286 if self.minor is None and not self.Attach():
2287 logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2291 self.dev_path = None
2292 self._ShutdownAll(minor)
2295 """Stub remove for DRBD devices.
2301 def Create(cls, unique_id, children, size, params, excl_stor):
2302 """Create a new DRBD8 device.
2304 Since DRBD devices are not created per se, just assembled, this
2305 function only initializes the metadata.
2308 if len(children) != 2:
2309 raise errors.ProgrammerError("Invalid setup for the drbd device")
2311 raise errors.ProgrammerError("DRBD device requested with"
2312 " exclusive_storage")
2313 # check that the minor is unused
2314 aminor = unique_id[4]
2315 proc_info = cls._MassageProcData(cls._GetProcData())
2316 if aminor in proc_info:
2317 status = DRBD8Status(proc_info[aminor])
2318 in_use = status.is_in_use
2322 _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2325 if not meta.Attach():
2326 _ThrowError("drbd%d: can't attach to meta device '%s'",
2328 cls._CheckMetaSize(meta.dev_path)
2329 cls._InitMeta(aminor, meta.dev_path)
2330 return cls(unique_id, children, size, params)
2332 def Grow(self, amount, dryrun, backingstore):
2333 """Resize the DRBD device and its backing storage.
2336 if self.minor is None:
2337 _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2338 if len(self._children) != 2 or None in self._children:
2339 _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2340 self._children[0].Grow(amount, dryrun, backingstore)
2341 if dryrun or backingstore:
2342 # DRBD does not support dry-run mode and is not backing storage,
2343 # so we'll return here
2345 result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2346 "%dm" % (self.size + amount)])
2348 _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2351 class FileStorage(BlockDev):
2354 This class represents the a file storage backend device.
2356 The unique_id for the file device is a (file_driver, file_path) tuple.
2359 def __init__(self, unique_id, children, size, params):
2360 """Initalizes a file device backend.
2364 raise errors.BlockDeviceError("Invalid setup for file device")
2365 super(FileStorage, self).__init__(unique_id, children, size, params)
2366 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2367 raise ValueError("Invalid configuration data %s" % str(unique_id))
2368 self.driver = unique_id[0]
2369 self.dev_path = unique_id[1]
2371 CheckFileStoragePath(self.dev_path)
2376 """Assemble the device.
2378 Checks whether the file device exists, raises BlockDeviceError otherwise.
2381 if not os.path.exists(self.dev_path):
2382 _ThrowError("File device '%s' does not exist" % self.dev_path)
2385 """Shutdown the device.
2387 This is a no-op for the file type, as we don't deactivate
2388 the file on shutdown.
2393 def Open(self, force=False):
2394 """Make the device ready for I/O.
2396 This is a no-op for the file type.
2402 """Notifies that the device will no longer be used for I/O.
2404 This is a no-op for the file type.
2410 """Remove the file backing the block device.
2413 @return: True if the removal was successful
2417 os.remove(self.dev_path)
2418 except OSError, err:
2419 if err.errno != errno.ENOENT:
2420 _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2422 def Rename(self, new_id):
2423 """Renames the file.
2426 # TODO: implement rename for file-based storage
2427 _ThrowError("Rename is not supported for file-based storage")
2429 def Grow(self, amount, dryrun, backingstore):
2432 @param amount: the amount (in mebibytes) to grow with
2435 if not backingstore:
2437 # Check that the file exists
2439 current_size = self.GetActualSize()
2440 new_size = current_size + amount * 1024 * 1024
2441 assert new_size > current_size, "Cannot Grow with a negative amount"
2442 # We can't really simulate the growth
2446 f = open(self.dev_path, "a+")
2447 f.truncate(new_size)
2449 except EnvironmentError, err:
2450 _ThrowError("Error in file growth: %", str(err))
2453 """Attach to an existing file.
2455 Check if this file already exists.
2458 @return: True if file exists
2461 self.attached = os.path.exists(self.dev_path)
2462 return self.attached
2464 def GetActualSize(self):
2465 """Return the actual disk size.
2467 @note: the device needs to be active when this is called
2470 assert self.attached, "BlockDevice not attached in GetActualSize()"
2472 st = os.stat(self.dev_path)
2474 except OSError, err:
2475 _ThrowError("Can't stat %s: %s", self.dev_path, err)
2478 def Create(cls, unique_id, children, size, params, excl_stor):
2479 """Create a new file.
2481 @param size: the size of file in MiB
2483 @rtype: L{bdev.FileStorage}
2484 @return: an instance of FileStorage
2488 raise errors.ProgrammerError("FileStorage device requested with"
2489 " exclusive_storage")
2490 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2491 raise ValueError("Invalid configuration data %s" % str(unique_id))
2493 dev_path = unique_id[1]
2495 CheckFileStoragePath(dev_path)
2498 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2499 f = os.fdopen(fd, "w")
2500 f.truncate(size * 1024 * 1024)
2502 except EnvironmentError, err:
2503 if err.errno == errno.EEXIST:
2504 _ThrowError("File already existing: %s", dev_path)
2505 _ThrowError("Error in file creation: %", str(err))
2507 return FileStorage(unique_id, children, size, params)
2510 class PersistentBlockDevice(BlockDev):
2511 """A block device with persistent node
2513 May be either directly attached, or exposed through DM (e.g. dm-multipath).
2514 udev helpers are probably required to give persistent, human-friendly
2517 For the time being, pathnames are required to lie under /dev.
2520 def __init__(self, unique_id, children, size, params):
2521 """Attaches to a static block device.
2523 The unique_id is a path under /dev.
2526 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2528 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2529 raise ValueError("Invalid configuration data %s" % str(unique_id))
2530 self.dev_path = unique_id[1]
2531 if not os.path.realpath(self.dev_path).startswith("/dev/"):
2532 raise ValueError("Full path '%s' lies outside /dev" %
2533 os.path.realpath(self.dev_path))
2534 # TODO: this is just a safety guard checking that we only deal with devices
2535 # we know how to handle. In the future this will be integrated with
2536 # external storage backends and possible values will probably be collected
2537 # from the cluster configuration.
2538 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2539 raise ValueError("Got persistent block device of invalid type: %s" %
2542 self.major = self.minor = None
2546 def Create(cls, unique_id, children, size, params, excl_stor):
2547 """Create a new device
2549 This is a noop, we only return a PersistentBlockDevice instance
2553 raise errors.ProgrammerError("Persistent block device requested with"
2554 " exclusive_storage")
2555 return PersistentBlockDevice(unique_id, children, 0, params)
2565 def Rename(self, new_id):
2566 """Rename this device.
2569 _ThrowError("Rename is not supported for PersistentBlockDev storage")
2572 """Attach to an existing block device.
2576 self.attached = False
2578 st = os.stat(self.dev_path)
2579 except OSError, err:
2580 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2583 if not stat.S_ISBLK(st.st_mode):
2584 logging.error("%s is not a block device", self.dev_path)
2587 self.major = os.major(st.st_rdev)
2588 self.minor = os.minor(st.st_rdev)
2589 self.attached = True
2594 """Assemble the device.
2600 """Shutdown the device.
2605 def Open(self, force=False):
2606 """Make the device ready for I/O.
2612 """Notifies that the device will no longer be used for I/O.
2617 def Grow(self, amount, dryrun, backingstore):
2618 """Grow the logical volume.
2621 _ThrowError("Grow is not supported for PersistentBlockDev storage")
2624 class RADOSBlockDevice(BlockDev):
2625 """A RADOS Block Device (rbd).
2627 This class implements the RADOS Block Device for the backend. You need
2628 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2629 this to be functional.
2632 def __init__(self, unique_id, children, size, params):
2633 """Attaches to an rbd device.
2636 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2637 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2638 raise ValueError("Invalid configuration data %s" % str(unique_id))
2640 self.driver, self.rbd_name = unique_id
2642 self.major = self.minor = None
2646 def Create(cls, unique_id, children, size, params, excl_stor):
2647 """Create a new rbd device.
2649 Provision a new rbd volume inside a RADOS pool.
2652 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2653 raise errors.ProgrammerError("Invalid configuration data %s" %
2656 raise errors.ProgrammerError("RBD device requested with"
2657 " exclusive_storage")
2658 rbd_pool = params[constants.LDP_POOL]
2659 rbd_name = unique_id[1]
2661 # Provision a new rbd volume (Image) inside the RADOS cluster.
2662 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2663 rbd_name, "--size", "%s" % size]
2664 result = utils.RunCmd(cmd)
2666 _ThrowError("rbd creation failed (%s): %s",
2667 result.fail_reason, result.output)
2669 return RADOSBlockDevice(unique_id, children, size, params)
2672 """Remove the rbd device.
2675 rbd_pool = self.params[constants.LDP_POOL]
2676 rbd_name = self.unique_id[1]
2678 if not self.minor and not self.Attach():
2679 # The rbd device doesn't exist.
2682 # First shutdown the device (remove mappings).
2685 # Remove the actual Volume (Image) from the RADOS cluster.
2686 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2687 result = utils.RunCmd(cmd)
2689 _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2690 result.fail_reason, result.output)
2692 def Rename(self, new_id):
2693 """Rename this device.
2699 """Attach to an existing rbd device.
2701 This method maps the rbd volume that matches our name with
2702 an rbd device and then attaches to this device.
2705 self.attached = False
2707 # Map the rbd volume to a block device under /dev
2708 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2711 st = os.stat(self.dev_path)
2712 except OSError, err:
2713 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2716 if not stat.S_ISBLK(st.st_mode):
2717 logging.error("%s is not a block device", self.dev_path)
2720 self.major = os.major(st.st_rdev)
2721 self.minor = os.minor(st.st_rdev)
2722 self.attached = True
2726 def _MapVolumeToBlockdev(self, unique_id):
2727 """Maps existing rbd volumes to block devices.
2729 This method should be idempotent if the mapping already exists.
2732 @return: the block device path that corresponds to the volume
2735 pool = self.params[constants.LDP_POOL]
2738 # Check if the mapping already exists.
2739 rbd_dev = self._VolumeToBlockdev(pool, name)
2741 # The mapping exists. Return it.
2744 # The mapping doesn't exist. Create it.
2745 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2746 result = utils.RunCmd(map_cmd)
2748 _ThrowError("rbd map failed (%s): %s",
2749 result.fail_reason, result.output)
2751 # Find the corresponding rbd device.
2752 rbd_dev = self._VolumeToBlockdev(pool, name)
2754 _ThrowError("rbd map succeeded, but could not find the rbd block"
2755 " device in output of showmapped, for volume: %s", name)
2757 # The device was successfully mapped. Return it.
2761 def _VolumeToBlockdev(cls, pool, volume_name):
2762 """Do the 'volume name'-to-'rbd block device' resolving.
2765 @param pool: RADOS pool to use
2766 @type volume_name: string
2767 @param volume_name: the name of the volume whose device we search for
2768 @rtype: string or None
2769 @return: block device path if the volume is mapped, else None
2773 # Newer versions of the rbd tool support json output formatting. Use it
2783 result = utils.RunCmd(showmap_cmd)
2785 logging.error("rbd JSON output formatting returned error (%s): %s,"
2786 "falling back to plain output parsing",
2787 result.fail_reason, result.output)
2788 raise RbdShowmappedJsonError
2790 return cls._ParseRbdShowmappedJson(result.output, volume_name)
2791 except RbdShowmappedJsonError:
2792 # For older versions of rbd, we have to parse the plain / text output
2794 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2795 result = utils.RunCmd(showmap_cmd)
2797 _ThrowError("rbd showmapped failed (%s): %s",
2798 result.fail_reason, result.output)
2800 return cls._ParseRbdShowmappedPlain(result.output, volume_name)
2803 def _ParseRbdShowmappedJson(output, volume_name):
2804 """Parse the json output of `rbd showmapped'.
2806 This method parses the json output of `rbd showmapped' and returns the rbd
2807 block device path (e.g. /dev/rbd0) that matches the given rbd volume.
2809 @type output: string
2810 @param output: the json output of `rbd showmapped'
2811 @type volume_name: string
2812 @param volume_name: the name of the volume whose device we search for
2813 @rtype: string or None
2814 @return: block device path if the volume is mapped, else None
2818 devices = serializer.LoadJson(output)
2819 except ValueError, err:
2820 _ThrowError("Unable to parse JSON data: %s" % err)
2823 for d in devices.values(): # pylint: disable=E1103
2827 _ThrowError("'name' key missing from json object %s", devices)
2829 if name == volume_name:
2830 if rbd_dev is not None:
2831 _ThrowError("rbd volume %s is mapped more than once", volume_name)
2833 rbd_dev = d["device"]
2838 def _ParseRbdShowmappedPlain(output, volume_name):
2839 """Parse the (plain / text) output of `rbd showmapped'.
2841 This method parses the output of `rbd showmapped' and returns
2842 the rbd block device path (e.g. /dev/rbd0) that matches the
2845 @type output: string
2846 @param output: the plain text output of `rbd showmapped'
2847 @type volume_name: string
2848 @param volume_name: the name of the volume whose device we search for
2849 @rtype: string or None
2850 @return: block device path if the volume is mapped, else None
2857 lines = output.splitlines()
2859 # Try parsing the new output format (ceph >= 0.55).
2860 splitted_lines = map(lambda l: l.split(), lines)
2862 # Check for empty output.
2863 if not splitted_lines:
2866 # Check showmapped output, to determine number of fields.
2867 field_cnt = len(splitted_lines[0])
2868 if field_cnt != allfields:
2869 # Parsing the new format failed. Fallback to parsing the old output
2871 splitted_lines = map(lambda l: l.split("\t"), lines)
2872 if field_cnt != allfields:
2873 _ThrowError("Cannot parse rbd showmapped output expected %s fields,"
2874 " found %s", allfields, field_cnt)
2877 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2880 if len(matched_lines) > 1:
2881 _ThrowError("rbd volume %s mapped more than once", volume_name)
2884 # rbd block device found. Return it.
2885 rbd_dev = matched_lines[0][devicefield]
2888 # The given volume is not mapped.
2892 """Assemble the device.
2898 """Shutdown the device.
2901 if not self.minor and not self.Attach():
2902 # The rbd device doesn't exist.
2905 # Unmap the block device from the Volume.
2906 self._UnmapVolumeFromBlockdev(self.unique_id)
2909 self.dev_path = None
2911 def _UnmapVolumeFromBlockdev(self, unique_id):
2912 """Unmaps the rbd device from the Volume it is mapped.
2914 Unmaps the rbd device from the Volume it was previously mapped to.
2915 This method should be idempotent if the Volume isn't mapped.
2918 pool = self.params[constants.LDP_POOL]
2921 # Check if the mapping already exists.
2922 rbd_dev = self._VolumeToBlockdev(pool, name)
2925 # The mapping exists. Unmap the rbd device.
2926 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2927 result = utils.RunCmd(unmap_cmd)
2929 _ThrowError("rbd unmap failed (%s): %s",
2930 result.fail_reason, result.output)
2932 def Open(self, force=False):
2933 """Make the device ready for I/O.
2939 """Notifies that the device will no longer be used for I/O.
2944 def Grow(self, amount, dryrun, backingstore):
2947 @type amount: integer
2948 @param amount: the amount (in mebibytes) to grow with
2949 @type dryrun: boolean
2950 @param dryrun: whether to execute the operation in simulation mode
2951 only, without actually increasing the size
2954 if not backingstore:
2956 if not self.Attach():
2957 _ThrowError("Can't attach to rbd device during Grow()")
2960 # the rbd tool does not support dry runs of resize operations.
2961 # Since rbd volumes are thinly provisioned, we assume
2962 # there is always enough free space for the operation.
2965 rbd_pool = self.params[constants.LDP_POOL]
2966 rbd_name = self.unique_id[1]
2967 new_size = self.size + amount
2969 # Resize the rbd volume (Image) inside the RADOS cluster.
2970 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2971 rbd_name, "--size", "%s" % new_size]
2972 result = utils.RunCmd(cmd)
2974 _ThrowError("rbd resize failed (%s): %s",
2975 result.fail_reason, result.output)
2978 class ExtStorageDevice(BlockDev):
2979 """A block device provided by an ExtStorage Provider.
2981 This class implements the External Storage Interface, which means
2982 handling of the externally provided block devices.
2985 def __init__(self, unique_id, children, size, params):
2986 """Attaches to an extstorage block device.
2989 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2990 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2991 raise ValueError("Invalid configuration data %s" % str(unique_id))
2993 self.driver, self.vol_name = unique_id
2994 self.ext_params = params
2996 self.major = self.minor = None
3000 def Create(cls, unique_id, children, size, params, excl_stor):
3001 """Create a new extstorage device.
3003 Provision a new volume using an extstorage provider, which will
3004 then be mapped to a block device.
3007 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
3008 raise errors.ProgrammerError("Invalid configuration data %s" %
3011 raise errors.ProgrammerError("extstorage device requested with"
3012 " exclusive_storage")
3014 # Call the External Storage's create script,
3015 # to provision a new Volume inside the External Storage
3016 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
3019 return ExtStorageDevice(unique_id, children, size, params)
3022 """Remove the extstorage device.
3025 if not self.minor and not self.Attach():
3026 # The extstorage device doesn't exist.
3029 # First shutdown the device (remove mappings).
3032 # Call the External Storage's remove script,
3033 # to remove the Volume from the External Storage
3034 _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
3037 def Rename(self, new_id):
3038 """Rename this device.
3044 """Attach to an existing extstorage device.
3046 This method maps the extstorage volume that matches our name with
3047 a corresponding block device and then attaches to this device.
3050 self.attached = False
3052 # Call the External Storage's attach script,
3053 # to attach an existing Volume to a block device under /dev
3054 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
3055 self.unique_id, self.ext_params)
3058 st = os.stat(self.dev_path)
3059 except OSError, err:
3060 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
3063 if not stat.S_ISBLK(st.st_mode):
3064 logging.error("%s is not a block device", self.dev_path)
3067 self.major = os.major(st.st_rdev)
3068 self.minor = os.minor(st.st_rdev)
3069 self.attached = True
3074 """Assemble the device.
3080 """Shutdown the device.
3083 if not self.minor and not self.Attach():
3084 # The extstorage device doesn't exist.
3087 # Call the External Storage's detach script,
3088 # to detach an existing Volume from it's block device under /dev
3089 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3093 self.dev_path = None
3095 def Open(self, force=False):
3096 """Make the device ready for I/O.
3102 """Notifies that the device will no longer be used for I/O.
3107 def Grow(self, amount, dryrun, backingstore):
3110 @type amount: integer
3111 @param amount: the amount (in mebibytes) to grow with
3112 @type dryrun: boolean
3113 @param dryrun: whether to execute the operation in simulation mode
3114 only, without actually increasing the size
3117 if not backingstore:
3119 if not self.Attach():
3120 _ThrowError("Can't attach to extstorage device during Grow()")
3123 # we do not support dry runs of resize operations for now.
3126 new_size = self.size + amount
3128 # Call the External Storage's grow script,
3129 # to grow an existing Volume inside the External Storage
3130 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3131 self.ext_params, str(self.size), grow=str(new_size))
3133 def SetInfo(self, text):
3134 """Update metadata with info text.
3137 # Replace invalid characters
3138 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3139 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3141 # Only up to 128 characters are allowed
3144 # Call the External Storage's setinfo script,
3145 # to set metadata for an existing Volume inside the External Storage
3146 _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3147 self.ext_params, metadata=text)
3150 def _ExtStorageAction(action, unique_id, ext_params,
3151 size=None, grow=None, metadata=None):
3152 """Take an External Storage action.
3154 Take an External Storage action concerning or affecting
3155 a specific Volume inside the External Storage.
3157 @type action: string
3158 @param action: which action to perform. One of:
3159 create / remove / grow / attach / detach
3160 @type unique_id: tuple (driver, vol_name)
3161 @param unique_id: a tuple containing the type of ExtStorage (driver)
3163 @type ext_params: dict
3164 @param ext_params: ExtStorage parameters
3166 @param size: the size of the Volume in mebibytes
3168 @param grow: the new size in mebibytes (after grow)
3169 @type metadata: string
3170 @param metadata: metadata info of the Volume, for use by the provider
3171 @rtype: None or a block device path (during attach)
3174 driver, vol_name = unique_id
3176 # Create an External Storage instance of type `driver'
3177 status, inst_es = ExtStorageFromDisk(driver)
3179 _ThrowError("%s" % inst_es)
3181 # Create the basic environment for the driver's scripts
3182 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3185 # Do not use log file for action `attach' as we need
3186 # to get the output from RunResult
3187 # TODO: find a way to have a log file for attach too
3189 if action is not constants.ES_ACTION_ATTACH:
3190 logfile = _VolumeLogName(action, driver, vol_name)
3192 # Make sure the given action results in a valid script
3193 if action not in constants.ES_SCRIPTS:
3194 _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3197 # Find out which external script to run according the given action
3198 script_name = action + "_script"
3199 script = getattr(inst_es, script_name)
3201 # Run the external script
3202 result = utils.RunCmd([script], env=create_env,
3203 cwd=inst_es.path, output=logfile,)
3205 logging.error("External storage's %s command '%s' returned"
3206 " error: %s, logfile: %s, output: %s",
3207 action, result.cmd, result.fail_reason,
3208 logfile, result.output)
3210 # If logfile is 'None' (during attach), it breaks TailFile
3211 # TODO: have a log file for attach too
3212 if action is not constants.ES_ACTION_ATTACH:
3213 lines = [utils.SafeEncode(val)
3214 for val in utils.TailFile(logfile, lines=20)]
3216 lines = result.output[-20:]
3218 _ThrowError("External storage's %s script failed (%s), last"
3219 " lines of output:\n%s",
3220 action, result.fail_reason, "\n".join(lines))
3222 if action == constants.ES_ACTION_ATTACH:
3223 return result.stdout
3226 def ExtStorageFromDisk(name, base_dir=None):
3227 """Create an ExtStorage instance from disk.
3229 This function will return an ExtStorage instance
3230 if the given name is a valid ExtStorage name.
3232 @type base_dir: string
3233 @keyword base_dir: Base directory containing ExtStorage installations.
3234 Defaults to a search in all the ES_SEARCH_PATH dirs.
3236 @return: True and the ExtStorage instance if we find a valid one, or
3237 False and the diagnose message on error
3240 if base_dir is None:
3241 es_base_dir = pathutils.ES_SEARCH_PATH
3243 es_base_dir = [base_dir]
3245 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3248 return False, ("Directory for External Storage Provider %s not"
3249 " found in search path" % name)
3251 # ES Files dictionary, we will populate it with the absolute path
3252 # names; if the value is True, then it is a required file, otherwise
3254 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3256 es_files[constants.ES_PARAMETERS_FILE] = True
3258 for (filename, _) in es_files.items():
3259 es_files[filename] = utils.PathJoin(es_dir, filename)
3262 st = os.stat(es_files[filename])
3263 except EnvironmentError, err:
3264 return False, ("File '%s' under path '%s' is missing (%s)" %
3265 (filename, es_dir, utils.ErrnoOrStr(err)))
3267 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3268 return False, ("File '%s' under path '%s' is not a regular file" %
3271 if filename in constants.ES_SCRIPTS:
3272 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3273 return False, ("File '%s' under path '%s' is not executable" %
3277 if constants.ES_PARAMETERS_FILE in es_files:
3278 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3280 parameters = utils.ReadFile(parameters_file).splitlines()
3281 except EnvironmentError, err:
3282 return False, ("Error while reading the EXT parameters file at %s: %s" %
3283 (parameters_file, utils.ErrnoOrStr(err)))
3284 parameters = [v.split(None, 1) for v in parameters]
3287 objects.ExtStorage(name=name, path=es_dir,
3288 create_script=es_files[constants.ES_SCRIPT_CREATE],
3289 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3290 grow_script=es_files[constants.ES_SCRIPT_GROW],
3291 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3292 detach_script=es_files[constants.ES_SCRIPT_DETACH],
3293 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3294 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3295 supported_parameters=parameters)
3299 def _ExtStorageEnvironment(unique_id, ext_params,
3300 size=None, grow=None, metadata=None):
3301 """Calculate the environment for an External Storage script.
3303 @type unique_id: tuple (driver, vol_name)
3304 @param unique_id: ExtStorage pool and name of the Volume
3305 @type ext_params: dict
3306 @param ext_params: the EXT parameters
3308 @param size: size of the Volume (in mebibytes)
3310 @param grow: new size of Volume after grow (in mebibytes)
3311 @type metadata: string
3312 @param metadata: metadata info of the Volume
3314 @return: dict of environment variables
3317 vol_name = unique_id[1]
3320 result["VOL_NAME"] = vol_name
3323 for pname, pvalue in ext_params.items():
3324 result["EXTP_%s" % pname.upper()] = str(pvalue)
3326 if size is not None:
3327 result["VOL_SIZE"] = size
3329 if grow is not None:
3330 result["VOL_NEW_SIZE"] = grow
3332 if metadata is not None:
3333 result["VOL_METADATA"] = metadata
3338 def _VolumeLogName(kind, es_name, volume):
3339 """Compute the ExtStorage log filename for a given Volume and operation.
3342 @param kind: the operation type (e.g. create, remove etc.)
3343 @type es_name: string
3344 @param es_name: the ExtStorage name
3345 @type volume: string
3346 @param volume: the name of the Volume inside the External Storage
3349 # Check if the extstorage log dir is a valid dir
3350 if not os.path.isdir(pathutils.LOG_ES_DIR):
3351 _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3353 # TODO: Use tempfile.mkstemp to create unique filename
3354 base = ("%s-%s-%s-%s.log" %
3355 (kind, es_name, volume, utils.TimestampForFilename()))
3356 return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3360 constants.LD_LV: LogicalVolume,
3361 constants.LD_DRBD8: DRBD8,
3362 constants.LD_BLOCKDEV: PersistentBlockDevice,
3363 constants.LD_RBD: RADOSBlockDevice,
3364 constants.LD_EXT: ExtStorageDevice,
3367 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3368 DEV_MAP[constants.LD_FILE] = FileStorage
3371 def _VerifyDiskType(dev_type):
3372 if dev_type not in DEV_MAP:
3373 raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3376 def _VerifyDiskParams(disk):
3377 """Verifies if all disk parameters are set.
3380 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3382 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3386 def FindDevice(disk, children):
3387 """Search for an existing, assembled device.
3389 This will succeed only if the device exists and is assembled, but it
3390 does not do any actions in order to activate the device.
3392 @type disk: L{objects.Disk}
3393 @param disk: the disk object to find
3394 @type children: list of L{bdev.BlockDev}
3395 @param children: the list of block devices that are children of the device
3396 represented by the disk parameter
3399 _VerifyDiskType(disk.dev_type)
3400 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3402 if not device.attached:
3407 def Assemble(disk, children):
3408 """Try to attach or assemble an existing device.
3410 This will attach to assemble the device, as needed, to bring it
3411 fully up. It must be safe to run on already-assembled devices.
3413 @type disk: L{objects.Disk}
3414 @param disk: the disk object to assemble
3415 @type children: list of L{bdev.BlockDev}
3416 @param children: the list of block devices that are children of the device
3417 represented by the disk parameter
3420 _VerifyDiskType(disk.dev_type)
3421 _VerifyDiskParams(disk)
3422 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3428 def Create(disk, children, excl_stor):
3431 @type disk: L{objects.Disk}
3432 @param disk: the disk object to create
3433 @type children: list of L{bdev.BlockDev}
3434 @param children: the list of block devices that are children of the device
3435 represented by the disk parameter
3436 @type excl_stor: boolean
3437 @param excl_stor: Whether exclusive_storage is active
3440 _VerifyDiskType(disk.dev_type)
3441 _VerifyDiskParams(disk)
3442 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3443 disk.params, excl_stor)