4 # Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Block device abstraction"""
29 import pyparsing as pyp
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import objects
38 from ganeti import compat
39 from ganeti import netutils
40 from ganeti import pathutils
41 from ganeti import serializer
44 # Size of reads in _CanReadDevice
45 _DEVICE_READ_SIZE = 128 * 1024
48 class RbdShowmappedJsonError(Exception):
49 """`rbd showmmapped' JSON formatting error Exception class.
55 def _IgnoreError(fn, *args, **kwargs):
56 """Executes the given function, ignoring BlockDeviceErrors.
58 This is used in order to simplify the execution of cleanup or
62 @return: True when fn didn't raise an exception, False otherwise
68 except errors.BlockDeviceError, err:
69 logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
73 def _ThrowError(msg, *args):
74 """Log an error to the node daemon and the raise an exception.
77 @param msg: the text of the exception
78 @raise errors.BlockDeviceError
84 raise errors.BlockDeviceError(msg)
87 def _CheckResult(result):
88 """Throws an error if the given result is a failed one.
90 @param result: result from RunCmd
94 _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
98 def _CanReadDevice(path):
99 """Check if we can read from the given device.
101 This tries to read the first 128k of the device.
105 utils.ReadFile(path, size=_DEVICE_READ_SIZE)
107 except EnvironmentError:
108 logging.warning("Can't read from device %s", path, exc_info=True)
112 def _GetForbiddenFileStoragePaths():
113 """Builds a list of path prefixes which shouldn't be used for file storage.
128 for prefix in ["", "/usr", "/usr/local"]:
129 paths.update(map(lambda s: "%s/%s" % (prefix, s),
130 ["bin", "lib", "lib32", "lib64", "sbin"]))
132 return compat.UniqueFrozenset(map(os.path.normpath, paths))
135 def _ComputeWrongFileStoragePaths(paths,
136 _forbidden=_GetForbiddenFileStoragePaths()):
137 """Cross-checks a list of paths for prefixes considered bad.
139 Some paths, e.g. "/bin", should not be used for file storage.
142 @param paths: List of paths to be checked
144 @return: Sorted list of paths for which the user should be warned
148 return (not os.path.isabs(path) or
149 path in _forbidden or
150 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
152 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
155 def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
156 """Returns a list of file storage paths whose prefix is considered bad.
158 See L{_ComputeWrongFileStoragePaths}.
161 return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
164 def _CheckFileStoragePath(path, allowed):
165 """Checks if a path is in a list of allowed paths for file storage.
168 @param path: Path to check
170 @param allowed: List of allowed paths
171 @raise errors.FileStoragePathError: If the path is not allowed
174 if not os.path.isabs(path):
175 raise errors.FileStoragePathError("File storage path must be absolute,"
179 if not os.path.isabs(i):
180 logging.info("Ignoring relative path '%s' for file storage", i)
183 if utils.IsBelowDir(i, path):
186 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
190 def _LoadAllowedFileStoragePaths(filename):
191 """Loads file containing allowed file storage paths.
194 @return: List of allowed paths (can be an empty list)
198 contents = utils.ReadFile(filename)
199 except EnvironmentError:
202 return utils.FilterEmptyLinesAndComments(contents)
205 def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
206 """Checks if a path is allowed for file storage.
209 @param path: Path to check
210 @raise errors.FileStoragePathError: If the path is not allowed
213 allowed = _LoadAllowedFileStoragePaths(_filename)
215 if _ComputeWrongFileStoragePaths([path]):
216 raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
219 _CheckFileStoragePath(path, allowed)
222 class BlockDev(object):
223 """Block device abstract class.
225 A block device can be in the following states:
226 - not existing on the system, and by `Create()` it goes into:
227 - existing but not setup/not active, and by `Assemble()` goes into:
228 - active read-write and by `Open()` it goes into
229 - online (=used, or ready for use)
231 A device can also be online but read-only, however we are not using
232 the readonly state (LV has it, if needed in the future) and we are
233 usually looking at this like at a stack, so it's easier to
234 conceptualise the transition from not-existing to online and back
237 The many different states of the device are due to the fact that we
238 need to cover many device types:
239 - logical volumes are created, lvchange -a y $lv, and used
240 - drbd devices are attached to a local disk/remote peer and made primary
242 A block device is identified by three items:
243 - the /dev path of the device (dynamic)
244 - a unique ID of the device (static)
245 - it's major/minor pair (dynamic)
247 Not all devices implement both the first two as distinct items. LVM
248 logical volumes have their unique ID (the pair volume group, logical
249 volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
250 the /dev path is again dynamic and the unique id is the pair (host1,
251 dev1), (host2, dev2).
253 You can get to a device in two ways:
254 - creating the (real) device, which returns you
255 an attached instance (lvcreate)
256 - attaching of a python instance to an existing (real) device
258 The second point, the attachement to a device, is different
259 depending on whether the device is assembled or not. At init() time,
260 we search for a device with the same unique_id as us. If found,
261 good. It also means that the device is already assembled. If not,
262 after assembly we'll have our correct major/minor.
265 def __init__(self, unique_id, children, size, params):
266 self._children = children
268 self.unique_id = unique_id
271 self.attached = False
276 """Assemble the device from its components.
278 Implementations of this method by child classes must ensure that:
279 - after the device has been assembled, it knows its major/minor
280 numbers; this allows other devices (usually parents) to probe
281 correctly for their children
282 - calling this method on an existing, in-use device is safe
283 - if the device is already configured (and in an OK state),
284 this method is idempotent
290 """Find a device which matches our config and attach to it.
293 raise NotImplementedError
296 """Notifies that the device will no longer be used for I/O.
299 raise NotImplementedError
302 def Create(cls, unique_id, children, size, params, excl_stor):
303 """Create the device.
305 If the device cannot be created, it will return None
306 instead. Error messages go to the logging system.
308 Note that for some devices, the unique_id is used, and for other,
309 the children. The idea is that these two, taken together, are
310 enough for both creation and assembly (later).
313 raise NotImplementedError
316 """Remove this device.
318 This makes sense only for some of the device types: LV and file
319 storage. Also note that if the device can't attach, the removal
323 raise NotImplementedError
325 def Rename(self, new_id):
326 """Rename this device.
328 This may or may not make sense for a given device type.
331 raise NotImplementedError
333 def Open(self, force=False):
334 """Make the device ready for use.
336 This makes the device ready for I/O. For now, just the DRBD
339 The force parameter signifies that if the device has any kind of
340 --force thing, it should be used, we know what we are doing.
343 raise NotImplementedError
346 """Shut down the device, freeing its children.
348 This undoes the `Assemble()` work, except for the child
349 assembling; as such, the children on the device are still
350 assembled after this call.
353 raise NotImplementedError
355 def SetSyncParams(self, params):
356 """Adjust the synchronization parameters of the mirror.
358 In case this is not a mirroring device, this is no-op.
360 @param params: dictionary of LD level disk parameters related to the
363 @return: a list of error messages, emitted both by the current node and by
364 children. An empty list means no errors.
369 for child in self._children:
370 result.extend(child.SetSyncParams(params))
373 def PauseResumeSync(self, pause):
374 """Pause/Resume the sync of the mirror.
376 In case this is not a mirroring device, this is no-op.
378 @param pause: Whether to pause or resume
383 for child in self._children:
384 result = result and child.PauseResumeSync(pause)
387 def GetSyncStatus(self):
388 """Returns the sync status of the device.
390 If this device is a mirroring device, this function returns the
391 status of the mirror.
393 If sync_percent is None, it means the device is not syncing.
395 If estimated_time is None, it means we can't estimate
396 the time needed, otherwise it's the time left in seconds.
398 If is_degraded is True, it means the device is missing
399 redundancy. This is usually a sign that something went wrong in
400 the device setup, if sync_percent is None.
402 The ldisk parameter represents the degradation of the local
403 data. This is only valid for some devices, the rest will always
404 return False (not degraded).
406 @rtype: objects.BlockDevStatus
409 return objects.BlockDevStatus(dev_path=self.dev_path,
415 ldisk_status=constants.LDS_OKAY)
417 def CombinedSyncStatus(self):
418 """Calculate the mirror status recursively for our children.
420 The return value is the same as for `GetSyncStatus()` except the
421 minimum percent and maximum time are calculated across our
424 @rtype: objects.BlockDevStatus
427 status = self.GetSyncStatus()
429 min_percent = status.sync_percent
430 max_time = status.estimated_time
431 is_degraded = status.is_degraded
432 ldisk_status = status.ldisk_status
435 for child in self._children:
436 child_status = child.GetSyncStatus()
438 if min_percent is None:
439 min_percent = child_status.sync_percent
440 elif child_status.sync_percent is not None:
441 min_percent = min(min_percent, child_status.sync_percent)
444 max_time = child_status.estimated_time
445 elif child_status.estimated_time is not None:
446 max_time = max(max_time, child_status.estimated_time)
448 is_degraded = is_degraded or child_status.is_degraded
450 if ldisk_status is None:
451 ldisk_status = child_status.ldisk_status
452 elif child_status.ldisk_status is not None:
453 ldisk_status = max(ldisk_status, child_status.ldisk_status)
455 return objects.BlockDevStatus(dev_path=self.dev_path,
458 sync_percent=min_percent,
459 estimated_time=max_time,
460 is_degraded=is_degraded,
461 ldisk_status=ldisk_status)
463 def SetInfo(self, text):
464 """Update metadata with info text.
466 Only supported for some device types.
469 for child in self._children:
472 def Grow(self, amount, dryrun, backingstore):
473 """Grow the block device.
475 @type amount: integer
476 @param amount: the amount (in mebibytes) to grow with
477 @type dryrun: boolean
478 @param dryrun: whether to execute the operation in simulation mode
479 only, without actually increasing the size
480 @param backingstore: whether to execute the operation on backing storage
481 only, or on "logical" storage only; e.g. DRBD is logical storage,
482 whereas LVM, file, RBD are backing storage
485 raise NotImplementedError
487 def GetActualSize(self):
488 """Return the actual disk size.
490 @note: the device needs to be active when this is called
493 assert self.attached, "BlockDevice not attached in GetActualSize()"
494 result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
496 _ThrowError("blockdev failed (%s): %s",
497 result.fail_reason, result.output)
499 sz = int(result.output.strip())
500 except (ValueError, TypeError), err:
501 _ThrowError("Failed to parse blockdev output: %s", str(err))
505 return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
506 (self.__class__, self.unique_id, self._children,
507 self.major, self.minor, self.dev_path))
510 class LogicalVolume(BlockDev):
511 """Logical Volume block device.
514 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
515 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
516 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
518 def __init__(self, unique_id, children, size, params):
519 """Attaches to a LV device.
521 The unique_id is a tuple (vg_name, lv_name)
524 super(LogicalVolume, self).__init__(unique_id, children, size, params)
525 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
526 raise ValueError("Invalid configuration data %s" % str(unique_id))
527 self._vg_name, self._lv_name = unique_id
528 self._ValidateName(self._vg_name)
529 self._ValidateName(self._lv_name)
530 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
531 self._degraded = True
532 self.major = self.minor = self.pe_size = self.stripe_count = None
536 def _GetStdPvSize(pvs_info):
537 """Return the the standard PV size (used with exclusive storage).
539 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
544 assert len(pvs_info) > 0
545 smallest = min([pv.size for pv in pvs_info])
546 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
549 def _ComputeNumPvs(size, pvs_info):
550 """Compute the number of PVs needed for an LV (with exclusive storage).
553 @param size: LV size in MiB
554 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
556 @return: number of PVs needed
558 assert len(pvs_info) > 0
559 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
560 return int(math.ceil(float(size) / pv_size))
563 def _GetEmptyPvNames(pvs_info, max_pvs=None):
564 """Return a list of empty PVs, by name.
567 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
568 if max_pvs is not None:
569 empty_pvs = empty_pvs[:max_pvs]
570 return map((lambda pv: pv.name), empty_pvs)
573 def Create(cls, unique_id, children, size, params, excl_stor):
574 """Create a new logical volume.
577 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
578 raise errors.ProgrammerError("Invalid configuration data %s" %
580 vg_name, lv_name = unique_id
581 cls._ValidateName(vg_name)
582 cls._ValidateName(lv_name)
583 pvs_info = cls.GetPVInfo([vg_name])
586 msg = "No (empty) PVs found"
588 msg = "Can't compute PV info for vg %s" % vg_name
590 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
592 pvlist = [pv.name for pv in pvs_info]
593 if compat.any(":" in v for v in pvlist):
594 _ThrowError("Some of your PVs have the invalid character ':' in their"
595 " name, this is not supported - please filter them out"
596 " in lvm.conf using either 'filter' or 'preferred_names'")
598 current_pvs = len(pvlist)
599 desired_stripes = params[constants.LDP_STRIPES]
600 stripes = min(current_pvs, desired_stripes)
603 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
607 req_pvs = cls._ComputeNumPvs(size, pvs_info)
608 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
609 current_pvs = len(pvlist)
610 if current_pvs < req_pvs:
611 _ThrowError("Not enough empty PVs to create a disk of %d MB:"
612 " %d available, %d needed", size, current_pvs, req_pvs)
613 assert current_pvs == len(pvlist)
614 if stripes > current_pvs:
615 # No warning issued for this, as it's no surprise
616 stripes = current_pvs
619 if stripes < desired_stripes:
620 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
621 " available.", desired_stripes, vg_name, current_pvs)
622 free_size = sum([pv.free for pv in pvs_info])
623 # The size constraint should have been checked from the master before
624 # calling the create function.
626 _ThrowError("Not enough free space: required %s,"
627 " available %s", size, free_size)
629 # If the free space is not well distributed, we won't be able to
630 # create an optimally-striped volume; in that case, we want to try
631 # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
633 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
634 for stripes_arg in range(stripes, 0, -1):
635 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
636 if not result.failed:
639 _ThrowError("LV create failed (%s): %s",
640 result.fail_reason, result.output)
641 return LogicalVolume(unique_id, children, size, params)
644 def _GetVolumeInfo(lvm_cmd, fields):
645 """Returns LVM Volumen infos using lvm_cmd
647 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
648 @param fields: Fields to return
649 @return: A list of dicts each with the parsed fields
653 raise errors.ProgrammerError("No fields specified")
656 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
657 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
659 result = utils.RunCmd(cmd)
661 raise errors.CommandError("Can't get the volume information: %s - %s" %
662 (result.fail_reason, result.output))
665 for line in result.stdout.splitlines():
666 splitted_fields = line.strip().split(sep)
668 if len(fields) != len(splitted_fields):
669 raise errors.CommandError("Can't parse %s output: line '%s'" %
672 data.append(splitted_fields)
677 def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
678 """Get the free space info for PVs in a volume group.
680 @param vg_names: list of volume group names, if empty all will be returned
681 @param filter_allocatable: whether to skip over unallocatable PVs
682 @param include_lvs: whether to include a list of LVs hosted on each PV
685 @return: list of objects.LvmPvInfo objects
688 # We request "lv_name" field only if we care about LVs, so we don't get
689 # a long list of entries with many duplicates unless we really have to.
690 # The duplicate "pv_name" field will be ignored.
696 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
697 "pv_attr", "pv_size", lvfield])
698 except errors.GenericError, err:
699 logging.error("Can't get PV information: %s", err)
702 # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
703 # pair. We sort entries by PV name and then LV name, so it's easy to weed
706 info.sort(key=(lambda i: (i[0], i[5])))
709 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
710 # (possibly) skip over pvs which are not allocatable
711 if filter_allocatable and pv_attr[0] != "a":
713 # (possibly) skip over pvs which are not in the right volume group(s)
714 if vg_names and vg_name not in vg_names:
716 # Beware of duplicates (check before inserting)
717 if lastpvi and lastpvi.name == pv_name:
718 if include_lvs and lv_name:
719 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
720 lastpvi.lv_list.append(lv_name)
722 if include_lvs and lv_name:
726 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
727 size=float(pv_size), free=float(pv_free),
728 attributes=pv_attr, lv_list=lvl)
734 def _GetExclusiveStorageVgFree(cls, vg_name):
735 """Return the free disk space in the given VG, in exclusive storage mode.
737 @type vg_name: string
738 @param vg_name: VG name
740 @return: free space in MiB
742 pvs_info = cls.GetPVInfo([vg_name])
745 pv_size = cls._GetStdPvSize(pvs_info)
746 num_pvs = len(cls._GetEmptyPvNames(pvs_info))
747 return pv_size * num_pvs
750 def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
751 """Get the free space info for specific VGs.
753 @param vg_names: list of volume group names, if empty all will be returned
754 @param excl_stor: whether exclusive_storage is enabled
755 @param filter_readonly: whether to skip over readonly VGs
758 @return: list of tuples (free_space, total_size, name) with free_space in
763 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
765 except errors.GenericError, err:
766 logging.error("Can't get VG information: %s", err)
770 for vg_name, vg_free, vg_attr, vg_size in info:
771 # (possibly) skip over vgs which are not writable
772 if filter_readonly and vg_attr[0] == "r":
774 # (possibly) skip over vgs which are not in the right volume group(s)
775 if vg_names and vg_name not in vg_names:
777 # Exclusive storage needs a different concept of free space
779 es_free = cls._GetExclusiveStorageVgFree(vg_name)
780 assert es_free <= vg_free
782 data.append((float(vg_free), float(vg_size), vg_name))
787 def _ValidateName(cls, name):
788 """Validates that a given name is valid as VG or LV name.
790 The list of valid characters and restricted names is taken out of
791 the lvm(8) manpage, with the simplification that we enforce both
792 VG and LV restrictions on the names.
795 if (not cls._VALID_NAME_RE.match(name) or
796 name in cls._INVALID_NAMES or
797 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
798 _ThrowError("Invalid LVM name '%s'", name)
801 """Remove this logical volume.
804 if not self.minor and not self.Attach():
805 # the LV does not exist
807 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
808 (self._vg_name, self._lv_name)])
810 _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
812 def Rename(self, new_id):
813 """Rename this logical volume.
816 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
817 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
818 new_vg, new_name = new_id
819 if new_vg != self._vg_name:
820 raise errors.ProgrammerError("Can't move a logical volume across"
821 " volume groups (from %s to to %s)" %
822 (self._vg_name, new_vg))
823 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
825 _ThrowError("Failed to rename the logical volume: %s", result.output)
826 self._lv_name = new_name
827 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
830 """Attach to an existing LV.
832 This method will try to see if an existing and active LV exists
833 which matches our name. If so, its major/minor will be
837 self.attached = False
838 result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
839 "--units=m", "--nosuffix",
840 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
841 "vg_extent_size,stripes", self.dev_path])
843 logging.error("Can't find LV %s: %s, %s",
844 self.dev_path, result.fail_reason, result.output)
846 # the output can (and will) have multiple lines for multi-segment
847 # LVs, as the 'stripes' parameter is a segment one, so we take
848 # only the last entry, which is the one we're interested in; note
849 # that with LVM2 anyway the 'stripes' value must be constant
850 # across segments, so this is a no-op actually
851 out = result.stdout.splitlines()
852 if not out: # totally empty result? splitlines() returns at least
853 # one line for any non-empty string
854 logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
856 out = out[-1].strip().rstrip(",")
859 logging.error("Can't parse LVS output, len(%s) != 5", str(out))
862 status, major, minor, pe_size, stripes = out
864 logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
870 except (TypeError, ValueError), err:
871 logging.error("lvs major/minor cannot be parsed: %s", str(err))
874 pe_size = int(float(pe_size))
875 except (TypeError, ValueError), err:
876 logging.error("Can't parse vg extent size: %s", err)
880 stripes = int(stripes)
881 except (TypeError, ValueError), err:
882 logging.error("Can't parse the number of stripes: %s", err)
887 self.pe_size = pe_size
888 self.stripe_count = stripes
889 self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
895 """Assemble the device.
897 We always run `lvchange -ay` on the LV to ensure it's active before
898 use, as there were cases when xenvg was not active after boot
899 (also possibly after disk issues).
902 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
904 _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
907 """Shutdown the device.
909 This is a no-op for the LV device type, as we don't deactivate the
915 def GetSyncStatus(self):
916 """Returns the sync status of the device.
918 If this device is a mirroring device, this function returns the
919 status of the mirror.
921 For logical volumes, sync_percent and estimated_time are always
922 None (no recovery in progress, as we don't handle the mirrored LV
923 case). The is_degraded parameter is the inverse of the ldisk
926 For the ldisk parameter, we check if the logical volume has the
927 'virtual' type, which means it's not backed by existing storage
928 anymore (read from it return I/O error). This happens after a
929 physical disk failure and subsequent 'vgreduce --removemissing' on
932 The status was already read in Attach, so we just return it.
934 @rtype: objects.BlockDevStatus
938 ldisk_status = constants.LDS_FAULTY
940 ldisk_status = constants.LDS_OKAY
942 return objects.BlockDevStatus(dev_path=self.dev_path,
947 is_degraded=self._degraded,
948 ldisk_status=ldisk_status)
950 def Open(self, force=False):
951 """Make the device ready for I/O.
953 This is a no-op for the LV device type.
959 """Notifies that the device will no longer be used for I/O.
961 This is a no-op for the LV device type.
966 def Snapshot(self, size):
967 """Create a snapshot copy of an lvm block device.
969 @returns: tuple (vg, lv)
972 snap_name = self._lv_name + ".snap"
974 # remove existing snapshot if found
975 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
976 _IgnoreError(snap.Remove)
978 vg_info = self.GetVGInfo([self._vg_name], False)
980 _ThrowError("Can't compute VG info for vg %s", self._vg_name)
981 free_size, _, _ = vg_info[0]
983 _ThrowError("Not enough free space: required %s,"
984 " available %s", size, free_size)
986 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
987 "-n%s" % snap_name, self.dev_path]))
989 return (self._vg_name, snap_name)
991 def _RemoveOldInfo(self):
992 """Try to remove old tags from the lv.
995 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
999 raw_tags = result.stdout.strip()
1001 for tag in raw_tags.split(","):
1002 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1003 tag.strip(), self.dev_path]))
1005 def SetInfo(self, text):
1006 """Update metadata with info text.
1009 BlockDev.SetInfo(self, text)
1011 self._RemoveOldInfo()
1013 # Replace invalid characters
1014 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1015 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1017 # Only up to 128 characters are allowed
1020 _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1022 def Grow(self, amount, dryrun, backingstore):
1023 """Grow the logical volume.
1026 if not backingstore:
1028 if self.pe_size is None or self.stripe_count is None:
1029 if not self.Attach():
1030 _ThrowError("Can't attach to LV during Grow()")
1031 full_stripe_size = self.pe_size * self.stripe_count
1032 rest = amount % full_stripe_size
1034 amount += full_stripe_size - rest
1035 cmd = ["lvextend", "-L", "+%dm" % amount]
1037 cmd.append("--test")
1038 # we try multiple algorithms since the 'best' ones might not have
1039 # space available in the right place, but later ones might (since
1040 # they have less constraints); also note that only recent LVM
1042 for alloc_policy in "contiguous", "cling", "normal":
1043 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1044 if not result.failed:
1046 _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1049 class DRBD8Status(object):
1050 """A DRBD status representation class.
1052 Note that this doesn't support unconfigured devices (cs:Unconfigured).
1055 UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1056 LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1057 "\s+ds:([^/]+)/(\S+)\s+.*$")
1058 SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1059 # Due to a bug in drbd in the kernel, introduced in
1060 # commit 4b0715f096 (still unfixed as of 2011-08-22)
1062 "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1064 CS_UNCONFIGURED = "Unconfigured"
1065 CS_STANDALONE = "StandAlone"
1066 CS_WFCONNECTION = "WFConnection"
1067 CS_WFREPORTPARAMS = "WFReportParams"
1068 CS_CONNECTED = "Connected"
1069 CS_STARTINGSYNCS = "StartingSyncS"
1070 CS_STARTINGSYNCT = "StartingSyncT"
1071 CS_WFBITMAPS = "WFBitMapS"
1072 CS_WFBITMAPT = "WFBitMapT"
1073 CS_WFSYNCUUID = "WFSyncUUID"
1074 CS_SYNCSOURCE = "SyncSource"
1075 CS_SYNCTARGET = "SyncTarget"
1076 CS_PAUSEDSYNCS = "PausedSyncS"
1077 CS_PAUSEDSYNCT = "PausedSyncT"
1078 CSET_SYNC = compat.UniqueFrozenset([
1091 DS_DISKLESS = "Diskless"
1092 DS_ATTACHING = "Attaching" # transient state
1093 DS_FAILED = "Failed" # transient state, next: diskless
1094 DS_NEGOTIATING = "Negotiating" # transient state
1095 DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1096 DS_OUTDATED = "Outdated"
1097 DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1098 DS_CONSISTENT = "Consistent"
1099 DS_UPTODATE = "UpToDate" # normal state
1101 RO_PRIMARY = "Primary"
1102 RO_SECONDARY = "Secondary"
1103 RO_UNKNOWN = "Unknown"
1105 def __init__(self, procline):
1106 u = self.UNCONF_RE.match(procline)
1108 self.cstatus = self.CS_UNCONFIGURED
1109 self.lrole = self.rrole = self.ldisk = self.rdisk = None
1111 m = self.LINE_RE.match(procline)
1113 raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1114 self.cstatus = m.group(1)
1115 self.lrole = m.group(2)
1116 self.rrole = m.group(3)
1117 self.ldisk = m.group(4)
1118 self.rdisk = m.group(5)
1120 # end reading of data from the LINE_RE or UNCONF_RE
1122 self.is_standalone = self.cstatus == self.CS_STANDALONE
1123 self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1124 self.is_connected = self.cstatus == self.CS_CONNECTED
1125 self.is_primary = self.lrole == self.RO_PRIMARY
1126 self.is_secondary = self.lrole == self.RO_SECONDARY
1127 self.peer_primary = self.rrole == self.RO_PRIMARY
1128 self.peer_secondary = self.rrole == self.RO_SECONDARY
1129 self.both_primary = self.is_primary and self.peer_primary
1130 self.both_secondary = self.is_secondary and self.peer_secondary
1132 self.is_diskless = self.ldisk == self.DS_DISKLESS
1133 self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1135 self.is_in_resync = self.cstatus in self.CSET_SYNC
1136 self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1138 m = self.SYNC_RE.match(procline)
1140 self.sync_percent = float(m.group(1))
1141 hours = int(m.group(2))
1142 minutes = int(m.group(3))
1143 seconds = int(m.group(4))
1144 self.est_time = hours * 3600 + minutes * 60 + seconds
1146 # we have (in this if branch) no percent information, but if
1147 # we're resyncing we need to 'fake' a sync percent information,
1148 # as this is how cmdlib determines if it makes sense to wait for
1150 if self.is_in_resync:
1151 self.sync_percent = 0
1153 self.sync_percent = None
1154 self.est_time = None
1157 class BaseDRBD(BlockDev): # pylint: disable=W0223
1160 This class contains a few bits of common functionality between the
1161 0.7 and 8.x versions of DRBD.
1164 _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1165 r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1166 _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1167 _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1170 _ST_UNCONFIGURED = "Unconfigured"
1171 _ST_WFCONNECTION = "WFConnection"
1172 _ST_CONNECTED = "Connected"
1174 _STATUS_FILE = constants.DRBD_STATUS_FILE
1175 _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1178 def _GetProcData(filename=_STATUS_FILE):
1179 """Return data from /proc/drbd.
1183 data = utils.ReadFile(filename).splitlines()
1184 except EnvironmentError, err:
1185 if err.errno == errno.ENOENT:
1186 _ThrowError("The file %s cannot be opened, check if the module"
1187 " is loaded (%s)", filename, str(err))
1189 _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1191 _ThrowError("Can't read any data from %s", filename)
1195 def _MassageProcData(cls, data):
1196 """Transform the output of _GetProdData into a nicer form.
1198 @return: a dictionary of minor: joined lines from /proc/drbd
1203 old_minor = old_line = None
1205 if not line: # completely empty lines, as can be returned by drbd8.0+
1207 lresult = cls._VALID_LINE_RE.match(line)
1208 if lresult is not None:
1209 if old_minor is not None:
1210 results[old_minor] = old_line
1211 old_minor = int(lresult.group(1))
1214 if old_minor is not None:
1215 old_line += " " + line.strip()
1217 if old_minor is not None:
1218 results[old_minor] = old_line
1222 def _GetVersion(cls, proc_data):
1223 """Return the DRBD version.
1225 This will return a dict with keys:
1231 - proto2 (only on drbd > 8.2.X)
1234 first_line = proc_data[0].strip()
1235 version = cls._VERSION_RE.match(first_line)
1237 raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1240 values = version.groups()
1242 "k_major": int(values[0]),
1243 "k_minor": int(values[1]),
1244 "k_point": int(values[2]),
1245 "api": int(values[3]),
1246 "proto": int(values[4]),
1248 if values[5] is not None:
1249 retval["proto2"] = values[5]
1254 def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1255 """Returns DRBD usermode_helper currently set.
1259 helper = utils.ReadFile(filename).splitlines()[0]
1260 except EnvironmentError, err:
1261 if err.errno == errno.ENOENT:
1262 _ThrowError("The file %s cannot be opened, check if the module"
1263 " is loaded (%s)", filename, str(err))
1265 _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1267 _ThrowError("Can't read any data from %s", filename)
1271 def _DevPath(minor):
1272 """Return the path to a drbd device for a given minor.
1275 return "/dev/drbd%d" % minor
1278 def GetUsedDevs(cls):
1279 """Compute the list of used DRBD devices.
1282 data = cls._GetProcData()
1286 match = cls._VALID_LINE_RE.match(line)
1289 minor = int(match.group(1))
1290 state = match.group(2)
1291 if state == cls._ST_UNCONFIGURED:
1293 used_devs[minor] = state, line
1297 def _SetFromMinor(self, minor):
1298 """Set our parameters based on the given minor.
1300 This sets our minor variable and our dev_path.
1304 self.minor = self.dev_path = None
1305 self.attached = False
1308 self.dev_path = self._DevPath(minor)
1309 self.attached = True
1312 def _CheckMetaSize(meta_device):
1313 """Check if the given meta device looks like a valid one.
1315 This currently only checks the size, which must be around
1319 result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1321 _ThrowError("Failed to get device size: %s - %s",
1322 result.fail_reason, result.output)
1324 sectors = int(result.stdout)
1325 except (TypeError, ValueError):
1326 _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1327 num_bytes = sectors * 512
1328 if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1329 _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1330 # the maximum *valid* size of the meta device when living on top
1331 # of LVM is hard to compute: it depends on the number of stripes
1332 # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1333 # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1334 # size meta device; as such, we restrict it to 1GB (a little bit
1335 # too generous, but making assumptions about PE size is hard)
1336 if num_bytes > 1024 * 1024 * 1024:
1337 _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1339 def Rename(self, new_id):
1342 This is not supported for drbd devices.
1345 raise errors.ProgrammerError("Can't rename a drbd device")
1348 class DRBD8(BaseDRBD):
1349 """DRBD v8.x block device.
1351 This implements the local host part of the DRBD device, i.e. it
1352 doesn't do anything to the supposed peer. If you need a fully
1353 connected DRBD pair, you need to use this class on both hosts.
1355 The unique_id for the drbd device is a (local_ip, local_port,
1356 remote_ip, remote_port, local_minor, secret) tuple, and it must have
1357 two children: the data device and the meta_device. The meta device
1358 is checked for valid size and is zeroed on create.
1365 _NET_RECONFIG_TIMEOUT = 60
1367 # command line options for barriers
1368 _DISABLE_DISK_OPTION = "--no-disk-barrier" # -a
1369 _DISABLE_DRAIN_OPTION = "--no-disk-drain" # -D
1370 _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1371 _DISABLE_META_FLUSH_OPTION = "--no-md-flushes" # -m
1373 def __init__(self, unique_id, children, size, params):
1374 if children and children.count(None) > 0:
1376 if len(children) not in (0, 2):
1377 raise ValueError("Invalid configuration data %s" % str(children))
1378 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1379 raise ValueError("Invalid configuration data %s" % str(unique_id))
1380 (self._lhost, self._lport,
1381 self._rhost, self._rport,
1382 self._aminor, self._secret) = unique_id
1384 if not _CanReadDevice(children[1].dev_path):
1385 logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1387 super(DRBD8, self).__init__(unique_id, children, size, params)
1388 self.major = self._DRBD_MAJOR
1389 version = self._GetVersion(self._GetProcData())
1390 if version["k_major"] != 8:
1391 _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1392 " usage: kernel is %s.%s, ganeti wants 8.x",
1393 version["k_major"], version["k_minor"])
1395 if (self._lhost is not None and self._lhost == self._rhost and
1396 self._lport == self._rport):
1397 raise ValueError("Invalid configuration data, same local/remote %s" %
1402 def _InitMeta(cls, minor, dev_path):
1403 """Initialize a meta device.
1405 This will not work if the given minor is in use.
1408 # Zero the metadata first, in order to make sure drbdmeta doesn't
1409 # try to auto-detect existing filesystems or similar (see
1410 # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1411 # care about the first 128MB of data in the device, even though it
1413 result = utils.RunCmd([constants.DD_CMD,
1414 "if=/dev/zero", "of=%s" % dev_path,
1415 "bs=1048576", "count=128", "oflag=direct"])
1417 _ThrowError("Can't wipe the meta device: %s", result.output)
1419 result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1420 "v08", dev_path, "0", "create-md"])
1422 _ThrowError("Can't initialize meta device: %s", result.output)
1425 def _FindUnusedMinor(cls):
1426 """Find an unused DRBD device.
1428 This is specific to 8.x as the minors are allocated dynamically,
1429 so non-existing numbers up to a max minor count are actually free.
1432 data = cls._GetProcData()
1436 match = cls._UNUSED_LINE_RE.match(line)
1438 return int(match.group(1))
1439 match = cls._VALID_LINE_RE.match(line)
1441 minor = int(match.group(1))
1442 highest = max(highest, minor)
1443 if highest is None: # there are no minors in use at all
1445 if highest >= cls._MAX_MINORS:
1446 logging.error("Error: no free drbd minors!")
1447 raise errors.BlockDeviceError("Can't find a free DRBD minor")
1451 def _GetShowParser(cls):
1452 """Return a parser for `drbd show` output.
1454 This will either create or return an already-created parser for the
1455 output of the command `drbd show`.
1458 if cls._PARSE_SHOW is not None:
1459 return cls._PARSE_SHOW
1462 lbrace = pyp.Literal("{").suppress()
1463 rbrace = pyp.Literal("}").suppress()
1464 lbracket = pyp.Literal("[").suppress()
1465 rbracket = pyp.Literal("]").suppress()
1466 semi = pyp.Literal(";").suppress()
1467 colon = pyp.Literal(":").suppress()
1468 # this also converts the value to an int
1469 number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1471 comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1472 defa = pyp.Literal("_is_default").suppress()
1473 dbl_quote = pyp.Literal('"').suppress()
1475 keyword = pyp.Word(pyp.alphanums + "-")
1478 value = pyp.Word(pyp.alphanums + "_-/.:")
1479 quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1480 ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1481 pyp.Word(pyp.nums + ".") + colon + number)
1482 ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1483 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1484 pyp.Optional(rbracket) + colon + number)
1485 # meta device, extended syntax
1486 meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1487 # device name, extended syntax
1488 device_value = pyp.Literal("minor").suppress() + number
1491 stmt = (~rbrace + keyword + ~lbrace +
1492 pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1494 pyp.Optional(defa) + semi +
1495 pyp.Optional(pyp.restOfLine).suppress())
1498 section_name = pyp.Word(pyp.alphas + "_")
1499 section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1501 bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1504 cls._PARSE_SHOW = bnf
1509 def _GetShowData(cls, minor):
1510 """Return the `drbdsetup show` data for a minor.
1513 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1515 logging.error("Can't display the drbd config: %s - %s",
1516 result.fail_reason, result.output)
1518 return result.stdout
1521 def _GetDevInfo(cls, out):
1522 """Parse details about a given DRBD minor.
1524 This return, if available, the local backing device (as a path)
1525 and the local and remote (ip, port) information from a string
1526 containing the output of the `drbdsetup show` command as returned
1534 bnf = cls._GetShowParser()
1538 results = bnf.parseString(out)
1539 except pyp.ParseException, err:
1540 _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1542 # and massage the results into our desired format
1543 for section in results:
1545 if sname == "_this_host":
1546 for lst in section[1:]:
1547 if lst[0] == "disk":
1548 data["local_dev"] = lst[1]
1549 elif lst[0] == "meta-disk":
1550 data["meta_dev"] = lst[1]
1551 data["meta_index"] = lst[2]
1552 elif lst[0] == "address":
1553 data["local_addr"] = tuple(lst[1:])
1554 elif sname == "_remote_host":
1555 for lst in section[1:]:
1556 if lst[0] == "address":
1557 data["remote_addr"] = tuple(lst[1:])
1560 def _MatchesLocal(self, info):
1561 """Test if our local config matches with an existing device.
1563 The parameter should be as returned from `_GetDevInfo()`. This
1564 method tests if our local backing device is the same as the one in
1565 the info parameter, in effect testing if we look like the given
1570 backend, meta = self._children
1572 backend = meta = None
1574 if backend is not None:
1575 retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1577 retval = ("local_dev" not in info)
1579 if meta is not None:
1580 retval = retval and ("meta_dev" in info and
1581 info["meta_dev"] == meta.dev_path)
1582 retval = retval and ("meta_index" in info and
1583 info["meta_index"] == 0)
1585 retval = retval and ("meta_dev" not in info and
1586 "meta_index" not in info)
1589 def _MatchesNet(self, info):
1590 """Test if our network config matches with an existing device.
1592 The parameter should be as returned from `_GetDevInfo()`. This
1593 method tests if our network configuration is the same as the one
1594 in the info parameter, in effect testing if we look like the given
1598 if (((self._lhost is None and not ("local_addr" in info)) and
1599 (self._rhost is None and not ("remote_addr" in info)))):
1602 if self._lhost is None:
1605 if not ("local_addr" in info and
1606 "remote_addr" in info):
1609 retval = (info["local_addr"] == (self._lhost, self._lport))
1610 retval = (retval and
1611 info["remote_addr"] == (self._rhost, self._rport))
1614 def _AssembleLocal(self, minor, backend, meta, size):
1615 """Configure the local part of a DRBD device.
1618 args = ["drbdsetup", self._DevPath(minor), "disk",
1623 args.extend(["-d", "%sm" % size])
1625 version = self._GetVersion(self._GetProcData())
1626 vmaj = version["k_major"]
1627 vmin = version["k_minor"]
1628 vrel = version["k_point"]
1631 self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1632 self.params[constants.LDP_BARRIERS],
1633 self.params[constants.LDP_NO_META_FLUSH])
1634 args.extend(barrier_args)
1636 if self.params[constants.LDP_DISK_CUSTOM]:
1637 args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1639 result = utils.RunCmd(args)
1641 _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1644 def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1645 disable_meta_flush):
1646 """Compute the DRBD command line parameters for disk barriers
1648 Returns a list of the disk barrier parameters as requested via the
1649 disabled_barriers and disable_meta_flush arguments, and according to the
1650 supported ones in the DRBD version vmaj.vmin.vrel
1652 If the desired option is unsupported, raises errors.BlockDeviceError.
1655 disabled_barriers_set = frozenset(disabled_barriers)
1656 if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1657 raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1658 " barriers" % disabled_barriers)
1662 # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1664 if not vmaj == 8 and vmin in (0, 2, 3):
1665 raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1668 def _AppendOrRaise(option, min_version):
1669 """Helper for DRBD options"""
1670 if min_version is not None and vrel >= min_version:
1673 raise errors.BlockDeviceError("Could not use the option %s as the"
1674 " DRBD version %d.%d.%d does not support"
1675 " it." % (option, vmaj, vmin, vrel))
1677 # the minimum version for each feature is encoded via pairs of (minor
1678 # version -> x) where x is version in which support for the option was
1680 meta_flush_supported = disk_flush_supported = {
1686 disk_drain_supported = {
1691 disk_barriers_supported = {
1696 if disable_meta_flush:
1697 _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1698 meta_flush_supported.get(vmin, None))
1701 if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1702 _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1703 disk_flush_supported.get(vmin, None))
1706 if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1707 _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1708 disk_drain_supported.get(vmin, None))
1711 if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1712 _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1713 disk_barriers_supported.get(vmin, None))
1717 def _AssembleNet(self, minor, net_info, protocol,
1718 dual_pri=False, hmac=None, secret=None):
1719 """Configure the network part of the device.
1722 lhost, lport, rhost, rport = net_info
1723 if None in net_info:
1724 # we don't want network connection and actually want to make
1726 self._ShutdownNet(minor)
1729 # Workaround for a race condition. When DRBD is doing its dance to
1730 # establish a connection with its peer, it also sends the
1731 # synchronization speed over the wire. In some cases setting the
1732 # sync speed only after setting up both sides can race with DRBD
1733 # connecting, hence we set it here before telling DRBD anything
1735 sync_errors = self._SetMinorSyncParams(minor, self.params)
1737 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1738 (minor, utils.CommaJoin(sync_errors)))
1740 if netutils.IP6Address.IsValid(lhost):
1741 if not netutils.IP6Address.IsValid(rhost):
1742 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1743 (minor, lhost, rhost))
1745 elif netutils.IP4Address.IsValid(lhost):
1746 if not netutils.IP4Address.IsValid(rhost):
1747 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1748 (minor, lhost, rhost))
1751 _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1753 args = ["drbdsetup", self._DevPath(minor), "net",
1754 "%s:%s:%s" % (family, lhost, lport),
1755 "%s:%s:%s" % (family, rhost, rport), protocol,
1756 "-A", "discard-zero-changes",
1763 args.extend(["-a", hmac, "-x", secret])
1765 if self.params[constants.LDP_NET_CUSTOM]:
1766 args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1768 result = utils.RunCmd(args)
1770 _ThrowError("drbd%d: can't setup network: %s - %s",
1771 minor, result.fail_reason, result.output)
1773 def _CheckNetworkConfig():
1774 info = self._GetDevInfo(self._GetShowData(minor))
1775 if not "local_addr" in info or not "remote_addr" in info:
1776 raise utils.RetryAgain()
1778 if (info["local_addr"] != (lhost, lport) or
1779 info["remote_addr"] != (rhost, rport)):
1780 raise utils.RetryAgain()
1783 utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1784 except utils.RetryTimeout:
1785 _ThrowError("drbd%d: timeout while configuring network", minor)
1787 def AddChildren(self, devices):
1788 """Add a disk to the DRBD device.
1791 if self.minor is None:
1792 _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1794 if len(devices) != 2:
1795 _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1796 info = self._GetDevInfo(self._GetShowData(self.minor))
1797 if "local_dev" in info:
1798 _ThrowError("drbd%d: already attached to a local disk", self.minor)
1799 backend, meta = devices
1800 if backend.dev_path is None or meta.dev_path is None:
1801 _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1804 self._CheckMetaSize(meta.dev_path)
1805 self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1807 self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1808 self._children = devices
1810 def RemoveChildren(self, devices):
1811 """Detach the drbd device from local storage.
1814 if self.minor is None:
1815 _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1817 # early return if we don't actually have backing storage
1818 info = self._GetDevInfo(self._GetShowData(self.minor))
1819 if "local_dev" not in info:
1821 if len(self._children) != 2:
1822 _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1824 if self._children.count(None) == 2: # we don't actually have children :)
1825 logging.warning("drbd%d: requested detach while detached", self.minor)
1827 if len(devices) != 2:
1828 _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1829 for child, dev in zip(self._children, devices):
1830 if dev != child.dev_path:
1831 _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1832 " RemoveChildren", self.minor, dev, child.dev_path)
1834 self._ShutdownLocal(self.minor)
1838 def _SetMinorSyncParams(cls, minor, params):
1839 """Set the parameters of the DRBD syncer.
1841 This is the low-level implementation.
1844 @param minor: the drbd minor whose settings we change
1846 @param params: LD level disk parameters related to the synchronization
1848 @return: a list of error messages
1852 args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1853 if params[constants.LDP_DYNAMIC_RESYNC]:
1854 version = cls._GetVersion(cls._GetProcData())
1855 vmin = version["k_minor"]
1856 vrel = version["k_point"]
1858 # By definition we are using 8.x, so just check the rest of the version
1860 if vmin != 3 or vrel < 9:
1861 msg = ("The current DRBD version (8.%d.%d) does not support the "
1862 "dynamic resync speed controller" % (vmin, vrel))
1866 if params[constants.LDP_PLAN_AHEAD] == 0:
1867 msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1868 " controller at DRBD level. If you want to disable it, please"
1869 " set the dynamic-resync disk parameter to False.")
1873 # add the c-* parameters to args
1874 args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1875 "--c-fill-target", params[constants.LDP_FILL_TARGET],
1876 "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1877 "--c-max-rate", params[constants.LDP_MAX_RATE],
1878 "--c-min-rate", params[constants.LDP_MIN_RATE],
1882 args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1884 args.append("--create-device")
1885 result = utils.RunCmd(args)
1887 msg = ("Can't change syncer rate: %s - %s" %
1888 (result.fail_reason, result.output))
1894 def SetSyncParams(self, params):
1895 """Set the synchronization parameters of the DRBD syncer.
1898 @param params: LD level disk parameters related to the synchronization
1900 @return: a list of error messages, emitted both by the current node and by
1901 children. An empty list means no errors
1904 if self.minor is None:
1905 err = "Not attached during SetSyncParams"
1909 children_result = super(DRBD8, self).SetSyncParams(params)
1910 children_result.extend(self._SetMinorSyncParams(self.minor, params))
1911 return children_result
1913 def PauseResumeSync(self, pause):
1914 """Pauses or resumes the sync of a DRBD device.
1916 @param pause: Wether to pause or resume
1917 @return: the success of the operation
1920 if self.minor is None:
1921 logging.info("Not attached during PauseSync")
1924 children_result = super(DRBD8, self).PauseResumeSync(pause)
1931 result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1933 logging.error("Can't %s: %s - %s", cmd,
1934 result.fail_reason, result.output)
1935 return not result.failed and children_result
1937 def GetProcStatus(self):
1938 """Return device data from /proc.
1941 if self.minor is None:
1942 _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1943 proc_info = self._MassageProcData(self._GetProcData())
1944 if self.minor not in proc_info:
1945 _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1946 return DRBD8Status(proc_info[self.minor])
1948 def GetSyncStatus(self):
1949 """Returns the sync status of the device.
1952 If sync_percent is None, it means all is ok
1953 If estimated_time is None, it means we can't estimate
1954 the time needed, otherwise it's the time left in seconds.
1957 We set the is_degraded parameter to True on two conditions:
1958 network not connected or local disk missing.
1960 We compute the ldisk parameter based on whether we have a local
1963 @rtype: objects.BlockDevStatus
1966 if self.minor is None and not self.Attach():
1967 _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1969 stats = self.GetProcStatus()
1970 is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1972 if stats.is_disk_uptodate:
1973 ldisk_status = constants.LDS_OKAY
1974 elif stats.is_diskless:
1975 ldisk_status = constants.LDS_FAULTY
1977 ldisk_status = constants.LDS_UNKNOWN
1979 return objects.BlockDevStatus(dev_path=self.dev_path,
1982 sync_percent=stats.sync_percent,
1983 estimated_time=stats.est_time,
1984 is_degraded=is_degraded,
1985 ldisk_status=ldisk_status)
1987 def Open(self, force=False):
1988 """Make the local state primary.
1990 If the 'force' parameter is given, the '-o' option is passed to
1991 drbdsetup. Since this is a potentially dangerous operation, the
1992 force flag should be only given after creation, when it actually
1996 if self.minor is None and not self.Attach():
1997 logging.error("DRBD cannot attach to a device during open")
1999 cmd = ["drbdsetup", self.dev_path, "primary"]
2002 result = utils.RunCmd(cmd)
2004 _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2008 """Make the local state secondary.
2010 This will, of course, fail if the device is in use.
2013 if self.minor is None and not self.Attach():
2014 _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2015 result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2017 _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2018 self.minor, result.output)
2020 def DisconnectNet(self):
2021 """Removes network configuration.
2023 This method shutdowns the network side of the device.
2025 The method will wait up to a hardcoded timeout for the device to
2026 go into standalone after the 'disconnect' command before
2027 re-configuring it, as sometimes it takes a while for the
2028 disconnect to actually propagate and thus we might issue a 'net'
2029 command while the device is still connected. If the device will
2030 still be attached to the network and we time out, we raise an
2034 if self.minor is None:
2035 _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2037 if None in (self._lhost, self._lport, self._rhost, self._rport):
2038 _ThrowError("drbd%d: DRBD disk missing network info in"
2039 " DisconnectNet()", self.minor)
2041 class _DisconnectStatus:
2042 def __init__(self, ever_disconnected):
2043 self.ever_disconnected = ever_disconnected
2045 dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2047 def _WaitForDisconnect():
2048 if self.GetProcStatus().is_standalone:
2051 # retry the disconnect, it seems possible that due to a well-time
2052 # disconnect on the peer, my disconnect command might be ignored and
2054 dstatus.ever_disconnected = \
2055 _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2057 raise utils.RetryAgain()
2060 start_time = time.time()
2063 # Start delay at 100 milliseconds and grow up to 2 seconds
2064 utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2065 self._NET_RECONFIG_TIMEOUT)
2066 except utils.RetryTimeout:
2067 if dstatus.ever_disconnected:
2068 msg = ("drbd%d: device did not react to the"
2069 " 'disconnect' command in a timely manner")
2071 msg = "drbd%d: can't shutdown network, even after multiple retries"
2073 _ThrowError(msg, self.minor)
2075 reconfig_time = time.time() - start_time
2076 if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2077 logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2078 self.minor, reconfig_time)
2080 def AttachNet(self, multimaster):
2081 """Reconnects the network.
2083 This method connects the network side of the device with a
2084 specified multi-master flag. The device needs to be 'Standalone'
2085 but have valid network configuration data.
2088 - multimaster: init the network in dual-primary mode
2091 if self.minor is None:
2092 _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2094 if None in (self._lhost, self._lport, self._rhost, self._rport):
2095 _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2097 status = self.GetProcStatus()
2099 if not status.is_standalone:
2100 _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2102 self._AssembleNet(self.minor,
2103 (self._lhost, self._lport, self._rhost, self._rport),
2104 constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2105 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2108 """Check if our minor is configured.
2110 This doesn't do any device configurations - it only checks if the
2111 minor is in a state different from Unconfigured.
2113 Note that this function will not change the state of the system in
2114 any way (except in case of side-effects caused by reading from
2118 used_devs = self.GetUsedDevs()
2119 if self._aminor in used_devs:
2120 minor = self._aminor
2124 self._SetFromMinor(minor)
2125 return minor is not None
2128 """Assemble the drbd.
2131 - if we have a configured device, we try to ensure that it matches
2133 - if not, we create it from zero
2134 - anyway, set the device parameters
2137 super(DRBD8, self).Assemble()
2140 if self.minor is None:
2141 # local device completely unconfigured
2142 self._FastAssemble()
2144 # we have to recheck the local and network status and try to fix
2146 self._SlowAssemble()
2148 sync_errors = self.SetSyncParams(self.params)
2150 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2151 (self.minor, utils.CommaJoin(sync_errors)))
2153 def _SlowAssemble(self):
2154 """Assembles the DRBD device from a (partially) configured device.
2156 In case of partially attached (local device matches but no network
2157 setup), we perform the network attach. If successful, we re-test
2158 the attach if can return success.
2161 # TODO: Rewrite to not use a for loop just because there is 'break'
2162 # pylint: disable=W0631
2163 net_data = (self._lhost, self._lport, self._rhost, self._rport)
2164 for minor in (self._aminor,):
2165 info = self._GetDevInfo(self._GetShowData(minor))
2166 match_l = self._MatchesLocal(info)
2167 match_r = self._MatchesNet(info)
2169 if match_l and match_r:
2170 # everything matches
2173 if match_l and not match_r and "local_addr" not in info:
2174 # disk matches, but not attached to network, attach and recheck
2175 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2176 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2177 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2180 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2181 " show' disagrees", minor)
2183 if match_r and "local_dev" not in info:
2184 # no local disk, but network attached and it matches
2185 self._AssembleLocal(minor, self._children[0].dev_path,
2186 self._children[1].dev_path, self.size)
2187 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2190 _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2191 " show' disagrees", minor)
2193 # this case must be considered only if we actually have local
2194 # storage, i.e. not in diskless mode, because all diskless
2195 # devices are equal from the point of view of local
2197 if (match_l and "local_dev" in info and
2198 not match_r and "local_addr" in info):
2199 # strange case - the device network part points to somewhere
2200 # else, even though its local storage is ours; as we own the
2201 # drbd space, we try to disconnect from the remote peer and
2202 # reconnect to our correct one
2204 self._ShutdownNet(minor)
2205 except errors.BlockDeviceError, err:
2206 _ThrowError("drbd%d: device has correct local storage, wrong"
2207 " remote peer and is unable to disconnect in order"
2208 " to attach to the correct peer: %s", minor, str(err))
2209 # note: _AssembleNet also handles the case when we don't want
2210 # local storage (i.e. one or more of the _[lr](host|port) is
2212 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2213 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2214 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2217 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2218 " show' disagrees", minor)
2223 self._SetFromMinor(minor)
2225 _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2228 def _FastAssemble(self):
2229 """Assemble the drbd device from zero.
2231 This is run when in Assemble we detect our minor is unused.
2234 minor = self._aminor
2235 if self._children and self._children[0] and self._children[1]:
2236 self._AssembleLocal(minor, self._children[0].dev_path,
2237 self._children[1].dev_path, self.size)
2238 if self._lhost and self._lport and self._rhost and self._rport:
2239 self._AssembleNet(minor,
2240 (self._lhost, self._lport, self._rhost, self._rport),
2241 constants.DRBD_NET_PROTOCOL,
2242 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2243 self._SetFromMinor(minor)
2246 def _ShutdownLocal(cls, minor):
2247 """Detach from the local device.
2249 I/Os will continue to be served from the remote device. If we
2250 don't have a remote device, this operation will fail.
2253 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2255 _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2258 def _ShutdownNet(cls, minor):
2259 """Disconnect from the remote peer.
2261 This fails if we don't have a local device.
2264 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2266 _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2269 def _ShutdownAll(cls, minor):
2270 """Deactivate the device.
2272 This will, of course, fail if the device is in use.
2275 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2277 _ThrowError("drbd%d: can't shutdown drbd device: %s",
2278 minor, result.output)
2281 """Shutdown the DRBD device.
2284 if self.minor is None and not self.Attach():
2285 logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2289 self.dev_path = None
2290 self._ShutdownAll(minor)
2293 """Stub remove for DRBD devices.
2299 def Create(cls, unique_id, children, size, params, excl_stor):
2300 """Create a new DRBD8 device.
2302 Since DRBD devices are not created per se, just assembled, this
2303 function only initializes the metadata.
2306 if len(children) != 2:
2307 raise errors.ProgrammerError("Invalid setup for the drbd device")
2309 raise errors.ProgrammerError("DRBD device requested with"
2310 " exclusive_storage")
2311 # check that the minor is unused
2312 aminor = unique_id[4]
2313 proc_info = cls._MassageProcData(cls._GetProcData())
2314 if aminor in proc_info:
2315 status = DRBD8Status(proc_info[aminor])
2316 in_use = status.is_in_use
2320 _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2323 if not meta.Attach():
2324 _ThrowError("drbd%d: can't attach to meta device '%s'",
2326 cls._CheckMetaSize(meta.dev_path)
2327 cls._InitMeta(aminor, meta.dev_path)
2328 return cls(unique_id, children, size, params)
2330 def Grow(self, amount, dryrun, backingstore):
2331 """Resize the DRBD device and its backing storage.
2334 if self.minor is None:
2335 _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2336 if len(self._children) != 2 or None in self._children:
2337 _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2338 self._children[0].Grow(amount, dryrun, backingstore)
2339 if dryrun or backingstore:
2340 # DRBD does not support dry-run mode and is not backing storage,
2341 # so we'll return here
2343 result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2344 "%dm" % (self.size + amount)])
2346 _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2349 class FileStorage(BlockDev):
2352 This class represents the a file storage backend device.
2354 The unique_id for the file device is a (file_driver, file_path) tuple.
2357 def __init__(self, unique_id, children, size, params):
2358 """Initalizes a file device backend.
2362 raise errors.BlockDeviceError("Invalid setup for file device")
2363 super(FileStorage, self).__init__(unique_id, children, size, params)
2364 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2365 raise ValueError("Invalid configuration data %s" % str(unique_id))
2366 self.driver = unique_id[0]
2367 self.dev_path = unique_id[1]
2369 CheckFileStoragePath(self.dev_path)
2374 """Assemble the device.
2376 Checks whether the file device exists, raises BlockDeviceError otherwise.
2379 if not os.path.exists(self.dev_path):
2380 _ThrowError("File device '%s' does not exist" % self.dev_path)
2383 """Shutdown the device.
2385 This is a no-op for the file type, as we don't deactivate
2386 the file on shutdown.
2391 def Open(self, force=False):
2392 """Make the device ready for I/O.
2394 This is a no-op for the file type.
2400 """Notifies that the device will no longer be used for I/O.
2402 This is a no-op for the file type.
2408 """Remove the file backing the block device.
2411 @return: True if the removal was successful
2415 os.remove(self.dev_path)
2416 except OSError, err:
2417 if err.errno != errno.ENOENT:
2418 _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2420 def Rename(self, new_id):
2421 """Renames the file.
2424 # TODO: implement rename for file-based storage
2425 _ThrowError("Rename is not supported for file-based storage")
2427 def Grow(self, amount, dryrun, backingstore):
2430 @param amount: the amount (in mebibytes) to grow with
2433 if not backingstore:
2435 # Check that the file exists
2437 current_size = self.GetActualSize()
2438 new_size = current_size + amount * 1024 * 1024
2439 assert new_size > current_size, "Cannot Grow with a negative amount"
2440 # We can't really simulate the growth
2444 f = open(self.dev_path, "a+")
2445 f.truncate(new_size)
2447 except EnvironmentError, err:
2448 _ThrowError("Error in file growth: %", str(err))
2451 """Attach to an existing file.
2453 Check if this file already exists.
2456 @return: True if file exists
2459 self.attached = os.path.exists(self.dev_path)
2460 return self.attached
2462 def GetActualSize(self):
2463 """Return the actual disk size.
2465 @note: the device needs to be active when this is called
2468 assert self.attached, "BlockDevice not attached in GetActualSize()"
2470 st = os.stat(self.dev_path)
2472 except OSError, err:
2473 _ThrowError("Can't stat %s: %s", self.dev_path, err)
2476 def Create(cls, unique_id, children, size, params, excl_stor):
2477 """Create a new file.
2479 @param size: the size of file in MiB
2481 @rtype: L{bdev.FileStorage}
2482 @return: an instance of FileStorage
2486 raise errors.ProgrammerError("FileStorage device requested with"
2487 " exclusive_storage")
2488 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2489 raise ValueError("Invalid configuration data %s" % str(unique_id))
2491 dev_path = unique_id[1]
2493 CheckFileStoragePath(dev_path)
2496 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2497 f = os.fdopen(fd, "w")
2498 f.truncate(size * 1024 * 1024)
2500 except EnvironmentError, err:
2501 if err.errno == errno.EEXIST:
2502 _ThrowError("File already existing: %s", dev_path)
2503 _ThrowError("Error in file creation: %", str(err))
2505 return FileStorage(unique_id, children, size, params)
2508 class PersistentBlockDevice(BlockDev):
2509 """A block device with persistent node
2511 May be either directly attached, or exposed through DM (e.g. dm-multipath).
2512 udev helpers are probably required to give persistent, human-friendly
2515 For the time being, pathnames are required to lie under /dev.
2518 def __init__(self, unique_id, children, size, params):
2519 """Attaches to a static block device.
2521 The unique_id is a path under /dev.
2524 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2526 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2527 raise ValueError("Invalid configuration data %s" % str(unique_id))
2528 self.dev_path = unique_id[1]
2529 if not os.path.realpath(self.dev_path).startswith("/dev/"):
2530 raise ValueError("Full path '%s' lies outside /dev" %
2531 os.path.realpath(self.dev_path))
2532 # TODO: this is just a safety guard checking that we only deal with devices
2533 # we know how to handle. In the future this will be integrated with
2534 # external storage backends and possible values will probably be collected
2535 # from the cluster configuration.
2536 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2537 raise ValueError("Got persistent block device of invalid type: %s" %
2540 self.major = self.minor = None
2544 def Create(cls, unique_id, children, size, params, excl_stor):
2545 """Create a new device
2547 This is a noop, we only return a PersistentBlockDevice instance
2551 raise errors.ProgrammerError("Persistent block device requested with"
2552 " exclusive_storage")
2553 return PersistentBlockDevice(unique_id, children, 0, params)
2563 def Rename(self, new_id):
2564 """Rename this device.
2567 _ThrowError("Rename is not supported for PersistentBlockDev storage")
2570 """Attach to an existing block device.
2574 self.attached = False
2576 st = os.stat(self.dev_path)
2577 except OSError, err:
2578 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2581 if not stat.S_ISBLK(st.st_mode):
2582 logging.error("%s is not a block device", self.dev_path)
2585 self.major = os.major(st.st_rdev)
2586 self.minor = os.minor(st.st_rdev)
2587 self.attached = True
2592 """Assemble the device.
2598 """Shutdown the device.
2603 def Open(self, force=False):
2604 """Make the device ready for I/O.
2610 """Notifies that the device will no longer be used for I/O.
2615 def Grow(self, amount, dryrun, backingstore):
2616 """Grow the logical volume.
2619 _ThrowError("Grow is not supported for PersistentBlockDev storage")
2622 class RADOSBlockDevice(BlockDev):
2623 """A RADOS Block Device (rbd).
2625 This class implements the RADOS Block Device for the backend. You need
2626 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2627 this to be functional.
2630 def __init__(self, unique_id, children, size, params):
2631 """Attaches to an rbd device.
2634 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2635 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2636 raise ValueError("Invalid configuration data %s" % str(unique_id))
2638 self.driver, self.rbd_name = unique_id
2640 self.major = self.minor = None
2644 def Create(cls, unique_id, children, size, params, excl_stor):
2645 """Create a new rbd device.
2647 Provision a new rbd volume inside a RADOS pool.
2650 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2651 raise errors.ProgrammerError("Invalid configuration data %s" %
2654 raise errors.ProgrammerError("RBD device requested with"
2655 " exclusive_storage")
2656 rbd_pool = params[constants.LDP_POOL]
2657 rbd_name = unique_id[1]
2659 # Provision a new rbd volume (Image) inside the RADOS cluster.
2660 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2661 rbd_name, "--size", "%s" % size]
2662 result = utils.RunCmd(cmd)
2664 _ThrowError("rbd creation failed (%s): %s",
2665 result.fail_reason, result.output)
2667 return RADOSBlockDevice(unique_id, children, size, params)
2670 """Remove the rbd device.
2673 rbd_pool = self.params[constants.LDP_POOL]
2674 rbd_name = self.unique_id[1]
2676 if not self.minor and not self.Attach():
2677 # The rbd device doesn't exist.
2680 # First shutdown the device (remove mappings).
2683 # Remove the actual Volume (Image) from the RADOS cluster.
2684 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2685 result = utils.RunCmd(cmd)
2687 _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2688 result.fail_reason, result.output)
2690 def Rename(self, new_id):
2691 """Rename this device.
2697 """Attach to an existing rbd device.
2699 This method maps the rbd volume that matches our name with
2700 an rbd device and then attaches to this device.
2703 self.attached = False
2705 # Map the rbd volume to a block device under /dev
2706 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2709 st = os.stat(self.dev_path)
2710 except OSError, err:
2711 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2714 if not stat.S_ISBLK(st.st_mode):
2715 logging.error("%s is not a block device", self.dev_path)
2718 self.major = os.major(st.st_rdev)
2719 self.minor = os.minor(st.st_rdev)
2720 self.attached = True
2724 def _MapVolumeToBlockdev(self, unique_id):
2725 """Maps existing rbd volumes to block devices.
2727 This method should be idempotent if the mapping already exists.
2730 @return: the block device path that corresponds to the volume
2733 pool = self.params[constants.LDP_POOL]
2736 # Check if the mapping already exists.
2737 rbd_dev = self._VolumeToBlockdev(pool, name)
2739 # The mapping exists. Return it.
2742 # The mapping doesn't exist. Create it.
2743 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2744 result = utils.RunCmd(map_cmd)
2746 _ThrowError("rbd map failed (%s): %s",
2747 result.fail_reason, result.output)
2749 # Find the corresponding rbd device.
2750 rbd_dev = self._VolumeToBlockdev(pool, name)
2752 _ThrowError("rbd map succeeded, but could not find the rbd block"
2753 " device in output of showmapped, for volume: %s", name)
2755 # The device was successfully mapped. Return it.
2759 def _VolumeToBlockdev(cls, pool, volume_name):
2760 """Do the 'volume name'-to-'rbd block device' resolving.
2763 @param pool: RADOS pool to use
2764 @type volume_name: string
2765 @param volume_name: the name of the volume whose device we search for
2766 @rtype: string or None
2767 @return: block device path if the volume is mapped, else None
2771 # Newer versions of the rbd tool support json output formatting. Use it
2781 result = utils.RunCmd(showmap_cmd)
2783 logging.error("rbd JSON output formatting returned error (%s): %s,"
2784 "falling back to plain output parsing",
2785 result.fail_reason, result.output)
2786 raise RbdShowmappedJsonError
2788 return cls._ParseRbdShowmappedJson(result.output, volume_name)
2789 except RbdShowmappedJsonError:
2790 # For older versions of rbd, we have to parse the plain / text output
2792 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2793 result = utils.RunCmd(showmap_cmd)
2795 _ThrowError("rbd showmapped failed (%s): %s",
2796 result.fail_reason, result.output)
2798 return cls._ParseRbdShowmappedPlain(result.output, volume_name)
2801 def _ParseRbdShowmappedJson(output, volume_name):
2802 """Parse the json output of `rbd showmapped'.
2804 This method parses the json output of `rbd showmapped' and returns the rbd
2805 block device path (e.g. /dev/rbd0) that matches the given rbd volume.
2807 @type output: string
2808 @param output: the json output of `rbd showmapped'
2809 @type volume_name: string
2810 @param volume_name: the name of the volume whose device we search for
2811 @rtype: string or None
2812 @return: block device path if the volume is mapped, else None
2816 devices = serializer.LoadJson(output)
2817 except ValueError, err:
2818 _ThrowError("Unable to parse JSON data: %s" % err)
2821 for d in devices.values(): # pylint: disable=E1103
2825 _ThrowError("'name' key missing from json object %s", devices)
2827 if name == volume_name:
2828 if rbd_dev is not None:
2829 _ThrowError("rbd volume %s is mapped more than once", volume_name)
2831 rbd_dev = d["device"]
2836 def _ParseRbdShowmappedPlain(output, volume_name):
2837 """Parse the (plain / text) output of `rbd showmapped'.
2839 This method parses the output of `rbd showmapped' and returns
2840 the rbd block device path (e.g. /dev/rbd0) that matches the
2843 @type output: string
2844 @param output: the plain text output of `rbd showmapped'
2845 @type volume_name: string
2846 @param volume_name: the name of the volume whose device we search for
2847 @rtype: string or None
2848 @return: block device path if the volume is mapped, else None
2855 lines = output.splitlines()
2857 # Try parsing the new output format (ceph >= 0.55).
2858 splitted_lines = map(lambda l: l.split(), lines)
2860 # Check for empty output.
2861 if not splitted_lines:
2864 # Check showmapped output, to determine number of fields.
2865 field_cnt = len(splitted_lines[0])
2866 if field_cnt != allfields:
2867 # Parsing the new format failed. Fallback to parsing the old output
2869 splitted_lines = map(lambda l: l.split("\t"), lines)
2870 if field_cnt != allfields:
2871 _ThrowError("Cannot parse rbd showmapped output expected %s fields,"
2872 " found %s", allfields, field_cnt)
2875 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2878 if len(matched_lines) > 1:
2879 _ThrowError("rbd volume %s mapped more than once", volume_name)
2882 # rbd block device found. Return it.
2883 rbd_dev = matched_lines[0][devicefield]
2886 # The given volume is not mapped.
2890 """Assemble the device.
2896 """Shutdown the device.
2899 if not self.minor and not self.Attach():
2900 # The rbd device doesn't exist.
2903 # Unmap the block device from the Volume.
2904 self._UnmapVolumeFromBlockdev(self.unique_id)
2907 self.dev_path = None
2909 def _UnmapVolumeFromBlockdev(self, unique_id):
2910 """Unmaps the rbd device from the Volume it is mapped.
2912 Unmaps the rbd device from the Volume it was previously mapped to.
2913 This method should be idempotent if the Volume isn't mapped.
2916 pool = self.params[constants.LDP_POOL]
2919 # Check if the mapping already exists.
2920 rbd_dev = self._VolumeToBlockdev(pool, name)
2923 # The mapping exists. Unmap the rbd device.
2924 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2925 result = utils.RunCmd(unmap_cmd)
2927 _ThrowError("rbd unmap failed (%s): %s",
2928 result.fail_reason, result.output)
2930 def Open(self, force=False):
2931 """Make the device ready for I/O.
2937 """Notifies that the device will no longer be used for I/O.
2942 def Grow(self, amount, dryrun, backingstore):
2945 @type amount: integer
2946 @param amount: the amount (in mebibytes) to grow with
2947 @type dryrun: boolean
2948 @param dryrun: whether to execute the operation in simulation mode
2949 only, without actually increasing the size
2952 if not backingstore:
2954 if not self.Attach():
2955 _ThrowError("Can't attach to rbd device during Grow()")
2958 # the rbd tool does not support dry runs of resize operations.
2959 # Since rbd volumes are thinly provisioned, we assume
2960 # there is always enough free space for the operation.
2963 rbd_pool = self.params[constants.LDP_POOL]
2964 rbd_name = self.unique_id[1]
2965 new_size = self.size + amount
2967 # Resize the rbd volume (Image) inside the RADOS cluster.
2968 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2969 rbd_name, "--size", "%s" % new_size]
2970 result = utils.RunCmd(cmd)
2972 _ThrowError("rbd resize failed (%s): %s",
2973 result.fail_reason, result.output)
2976 class ExtStorageDevice(BlockDev):
2977 """A block device provided by an ExtStorage Provider.
2979 This class implements the External Storage Interface, which means
2980 handling of the externally provided block devices.
2983 def __init__(self, unique_id, children, size, params):
2984 """Attaches to an extstorage block device.
2987 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2988 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2989 raise ValueError("Invalid configuration data %s" % str(unique_id))
2991 self.driver, self.vol_name = unique_id
2992 self.ext_params = params
2994 self.major = self.minor = None
2998 def Create(cls, unique_id, children, size, params, excl_stor):
2999 """Create a new extstorage device.
3001 Provision a new volume using an extstorage provider, which will
3002 then be mapped to a block device.
3005 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
3006 raise errors.ProgrammerError("Invalid configuration data %s" %
3009 raise errors.ProgrammerError("extstorage device requested with"
3010 " exclusive_storage")
3012 # Call the External Storage's create script,
3013 # to provision a new Volume inside the External Storage
3014 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
3017 return ExtStorageDevice(unique_id, children, size, params)
3020 """Remove the extstorage device.
3023 if not self.minor and not self.Attach():
3024 # The extstorage device doesn't exist.
3027 # First shutdown the device (remove mappings).
3030 # Call the External Storage's remove script,
3031 # to remove the Volume from the External Storage
3032 _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
3035 def Rename(self, new_id):
3036 """Rename this device.
3042 """Attach to an existing extstorage device.
3044 This method maps the extstorage volume that matches our name with
3045 a corresponding block device and then attaches to this device.
3048 self.attached = False
3050 # Call the External Storage's attach script,
3051 # to attach an existing Volume to a block device under /dev
3052 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
3053 self.unique_id, self.ext_params)
3056 st = os.stat(self.dev_path)
3057 except OSError, err:
3058 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
3061 if not stat.S_ISBLK(st.st_mode):
3062 logging.error("%s is not a block device", self.dev_path)
3065 self.major = os.major(st.st_rdev)
3066 self.minor = os.minor(st.st_rdev)
3067 self.attached = True
3072 """Assemble the device.
3078 """Shutdown the device.
3081 if not self.minor and not self.Attach():
3082 # The extstorage device doesn't exist.
3085 # Call the External Storage's detach script,
3086 # to detach an existing Volume from it's block device under /dev
3087 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3091 self.dev_path = None
3093 def Open(self, force=False):
3094 """Make the device ready for I/O.
3100 """Notifies that the device will no longer be used for I/O.
3105 def Grow(self, amount, dryrun, backingstore):
3108 @type amount: integer
3109 @param amount: the amount (in mebibytes) to grow with
3110 @type dryrun: boolean
3111 @param dryrun: whether to execute the operation in simulation mode
3112 only, without actually increasing the size
3115 if not backingstore:
3117 if not self.Attach():
3118 _ThrowError("Can't attach to extstorage device during Grow()")
3121 # we do not support dry runs of resize operations for now.
3124 new_size = self.size + amount
3126 # Call the External Storage's grow script,
3127 # to grow an existing Volume inside the External Storage
3128 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3129 self.ext_params, str(self.size), grow=str(new_size))
3131 def SetInfo(self, text):
3132 """Update metadata with info text.
3135 # Replace invalid characters
3136 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3137 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3139 # Only up to 128 characters are allowed
3142 # Call the External Storage's setinfo script,
3143 # to set metadata for an existing Volume inside the External Storage
3144 _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3145 self.ext_params, metadata=text)
3148 def _ExtStorageAction(action, unique_id, ext_params,
3149 size=None, grow=None, metadata=None):
3150 """Take an External Storage action.
3152 Take an External Storage action concerning or affecting
3153 a specific Volume inside the External Storage.
3155 @type action: string
3156 @param action: which action to perform. One of:
3157 create / remove / grow / attach / detach
3158 @type unique_id: tuple (driver, vol_name)
3159 @param unique_id: a tuple containing the type of ExtStorage (driver)
3161 @type ext_params: dict
3162 @param ext_params: ExtStorage parameters
3164 @param size: the size of the Volume in mebibytes
3166 @param grow: the new size in mebibytes (after grow)
3167 @type metadata: string
3168 @param metadata: metadata info of the Volume, for use by the provider
3169 @rtype: None or a block device path (during attach)
3172 driver, vol_name = unique_id
3174 # Create an External Storage instance of type `driver'
3175 status, inst_es = ExtStorageFromDisk(driver)
3177 _ThrowError("%s" % inst_es)
3179 # Create the basic environment for the driver's scripts
3180 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3183 # Do not use log file for action `attach' as we need
3184 # to get the output from RunResult
3185 # TODO: find a way to have a log file for attach too
3187 if action is not constants.ES_ACTION_ATTACH:
3188 logfile = _VolumeLogName(action, driver, vol_name)
3190 # Make sure the given action results in a valid script
3191 if action not in constants.ES_SCRIPTS:
3192 _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3195 # Find out which external script to run according the given action
3196 script_name = action + "_script"
3197 script = getattr(inst_es, script_name)
3199 # Run the external script
3200 result = utils.RunCmd([script], env=create_env,
3201 cwd=inst_es.path, output=logfile,)
3203 logging.error("External storage's %s command '%s' returned"
3204 " error: %s, logfile: %s, output: %s",
3205 action, result.cmd, result.fail_reason,
3206 logfile, result.output)
3208 # If logfile is 'None' (during attach), it breaks TailFile
3209 # TODO: have a log file for attach too
3210 if action is not constants.ES_ACTION_ATTACH:
3211 lines = [utils.SafeEncode(val)
3212 for val in utils.TailFile(logfile, lines=20)]
3214 lines = result.output[-20:]
3216 _ThrowError("External storage's %s script failed (%s), last"
3217 " lines of output:\n%s",
3218 action, result.fail_reason, "\n".join(lines))
3220 if action == constants.ES_ACTION_ATTACH:
3221 return result.stdout
3224 def ExtStorageFromDisk(name, base_dir=None):
3225 """Create an ExtStorage instance from disk.
3227 This function will return an ExtStorage instance
3228 if the given name is a valid ExtStorage name.
3230 @type base_dir: string
3231 @keyword base_dir: Base directory containing ExtStorage installations.
3232 Defaults to a search in all the ES_SEARCH_PATH dirs.
3234 @return: True and the ExtStorage instance if we find a valid one, or
3235 False and the diagnose message on error
3238 if base_dir is None:
3239 es_base_dir = pathutils.ES_SEARCH_PATH
3241 es_base_dir = [base_dir]
3243 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3246 return False, ("Directory for External Storage Provider %s not"
3247 " found in search path" % name)
3249 # ES Files dictionary, we will populate it with the absolute path
3250 # names; if the value is True, then it is a required file, otherwise
3252 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3254 es_files[constants.ES_PARAMETERS_FILE] = True
3256 for (filename, _) in es_files.items():
3257 es_files[filename] = utils.PathJoin(es_dir, filename)
3260 st = os.stat(es_files[filename])
3261 except EnvironmentError, err:
3262 return False, ("File '%s' under path '%s' is missing (%s)" %
3263 (filename, es_dir, utils.ErrnoOrStr(err)))
3265 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3266 return False, ("File '%s' under path '%s' is not a regular file" %
3269 if filename in constants.ES_SCRIPTS:
3270 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3271 return False, ("File '%s' under path '%s' is not executable" %
3275 if constants.ES_PARAMETERS_FILE in es_files:
3276 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3278 parameters = utils.ReadFile(parameters_file).splitlines()
3279 except EnvironmentError, err:
3280 return False, ("Error while reading the EXT parameters file at %s: %s" %
3281 (parameters_file, utils.ErrnoOrStr(err)))
3282 parameters = [v.split(None, 1) for v in parameters]
3285 objects.ExtStorage(name=name, path=es_dir,
3286 create_script=es_files[constants.ES_SCRIPT_CREATE],
3287 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3288 grow_script=es_files[constants.ES_SCRIPT_GROW],
3289 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3290 detach_script=es_files[constants.ES_SCRIPT_DETACH],
3291 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3292 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3293 supported_parameters=parameters)
3297 def _ExtStorageEnvironment(unique_id, ext_params,
3298 size=None, grow=None, metadata=None):
3299 """Calculate the environment for an External Storage script.
3301 @type unique_id: tuple (driver, vol_name)
3302 @param unique_id: ExtStorage pool and name of the Volume
3303 @type ext_params: dict
3304 @param ext_params: the EXT parameters
3306 @param size: size of the Volume (in mebibytes)
3308 @param grow: new size of Volume after grow (in mebibytes)
3309 @type metadata: string
3310 @param metadata: metadata info of the Volume
3312 @return: dict of environment variables
3315 vol_name = unique_id[1]
3318 result["VOL_NAME"] = vol_name
3321 for pname, pvalue in ext_params.items():
3322 result["EXTP_%s" % pname.upper()] = str(pvalue)
3324 if size is not None:
3325 result["VOL_SIZE"] = size
3327 if grow is not None:
3328 result["VOL_NEW_SIZE"] = grow
3330 if metadata is not None:
3331 result["VOL_METADATA"] = metadata
3336 def _VolumeLogName(kind, es_name, volume):
3337 """Compute the ExtStorage log filename for a given Volume and operation.
3340 @param kind: the operation type (e.g. create, remove etc.)
3341 @type es_name: string
3342 @param es_name: the ExtStorage name
3343 @type volume: string
3344 @param volume: the name of the Volume inside the External Storage
3347 # Check if the extstorage log dir is a valid dir
3348 if not os.path.isdir(pathutils.LOG_ES_DIR):
3349 _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3351 # TODO: Use tempfile.mkstemp to create unique filename
3352 base = ("%s-%s-%s-%s.log" %
3353 (kind, es_name, volume, utils.TimestampForFilename()))
3354 return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3358 constants.LD_LV: LogicalVolume,
3359 constants.LD_DRBD8: DRBD8,
3360 constants.LD_BLOCKDEV: PersistentBlockDevice,
3361 constants.LD_RBD: RADOSBlockDevice,
3362 constants.LD_EXT: ExtStorageDevice,
3365 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3366 DEV_MAP[constants.LD_FILE] = FileStorage
3369 def _VerifyDiskType(dev_type):
3370 if dev_type not in DEV_MAP:
3371 raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3374 def _VerifyDiskParams(disk):
3375 """Verifies if all disk parameters are set.
3378 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3380 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3384 def FindDevice(disk, children):
3385 """Search for an existing, assembled device.
3387 This will succeed only if the device exists and is assembled, but it
3388 does not do any actions in order to activate the device.
3390 @type disk: L{objects.Disk}
3391 @param disk: the disk object to find
3392 @type children: list of L{bdev.BlockDev}
3393 @param children: the list of block devices that are children of the device
3394 represented by the disk parameter
3397 _VerifyDiskType(disk.dev_type)
3398 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3400 if not device.attached:
3405 def Assemble(disk, children):
3406 """Try to attach or assemble an existing device.
3408 This will attach to assemble the device, as needed, to bring it
3409 fully up. It must be safe to run on already-assembled devices.
3411 @type disk: L{objects.Disk}
3412 @param disk: the disk object to assemble
3413 @type children: list of L{bdev.BlockDev}
3414 @param children: the list of block devices that are children of the device
3415 represented by the disk parameter
3418 _VerifyDiskType(disk.dev_type)
3419 _VerifyDiskParams(disk)
3420 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3426 def Create(disk, children, excl_stor):
3429 @type disk: L{objects.Disk}
3430 @param disk: the disk object to create
3431 @type children: list of L{bdev.BlockDev}
3432 @param children: the list of block devices that are children of the device
3433 represented by the disk parameter
3434 @type excl_stor: boolean
3435 @param excl_stor: Whether exclusive_storage is active
3438 _VerifyDiskType(disk.dev_type)
3439 _VerifyDiskParams(disk)
3440 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3441 disk.params, excl_stor)