4 # Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Block device abstraction"""
29 import pyparsing as pyp
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import objects
38 from ganeti import compat
39 from ganeti import netutils
40 from ganeti import pathutils
41 from ganeti import serializer
44 # Size of reads in _CanReadDevice
45 _DEVICE_READ_SIZE = 128 * 1024
48 class RbdShowmappedJsonError(Exception):
49 """`rbd showmmapped' JSON formatting error Exception class.
55 def _IgnoreError(fn, *args, **kwargs):
56 """Executes the given function, ignoring BlockDeviceErrors.
58 This is used in order to simplify the execution of cleanup or
62 @return: True when fn didn't raise an exception, False otherwise
68 except errors.BlockDeviceError, err:
69 logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
73 def _ThrowError(msg, *args):
74 """Log an error to the node daemon and the raise an exception.
77 @param msg: the text of the exception
78 @raise errors.BlockDeviceError
84 raise errors.BlockDeviceError(msg)
87 def _CheckResult(result):
88 """Throws an error if the given result is a failed one.
90 @param result: result from RunCmd
94 _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
98 def _CanReadDevice(path):
99 """Check if we can read from the given device.
101 This tries to read the first 128k of the device.
105 utils.ReadFile(path, size=_DEVICE_READ_SIZE)
107 except EnvironmentError:
108 logging.warning("Can't read from device %s", path, exc_info=True)
112 def _GetForbiddenFileStoragePaths():
113 """Builds a list of path prefixes which shouldn't be used for file storage.
128 for prefix in ["", "/usr", "/usr/local"]:
129 paths.update(map(lambda s: "%s/%s" % (prefix, s),
130 ["bin", "lib", "lib32", "lib64", "sbin"]))
132 return compat.UniqueFrozenset(map(os.path.normpath, paths))
135 def _ComputeWrongFileStoragePaths(paths,
136 _forbidden=_GetForbiddenFileStoragePaths()):
137 """Cross-checks a list of paths for prefixes considered bad.
139 Some paths, e.g. "/bin", should not be used for file storage.
142 @param paths: List of paths to be checked
144 @return: Sorted list of paths for which the user should be warned
148 return (not os.path.isabs(path) or
149 path in _forbidden or
150 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
152 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
155 def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
156 """Returns a list of file storage paths whose prefix is considered bad.
158 See L{_ComputeWrongFileStoragePaths}.
161 return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
164 def _CheckFileStoragePath(path, allowed):
165 """Checks if a path is in a list of allowed paths for file storage.
168 @param path: Path to check
170 @param allowed: List of allowed paths
171 @raise errors.FileStoragePathError: If the path is not allowed
174 if not os.path.isabs(path):
175 raise errors.FileStoragePathError("File storage path must be absolute,"
179 if not os.path.isabs(i):
180 logging.info("Ignoring relative path '%s' for file storage", i)
183 if utils.IsBelowDir(i, path):
186 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
187 " storage. A possible fix might be to add"
188 " it to /etc/ganeti/file-storage-paths"
189 " on all nodes." % path)
192 def _LoadAllowedFileStoragePaths(filename):
193 """Loads file containing allowed file storage paths.
196 @return: List of allowed paths (can be an empty list)
200 contents = utils.ReadFile(filename)
201 except EnvironmentError:
204 return utils.FilterEmptyLinesAndComments(contents)
207 def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
208 """Checks if a path is allowed for file storage.
211 @param path: Path to check
212 @raise errors.FileStoragePathError: If the path is not allowed
215 allowed = _LoadAllowedFileStoragePaths(_filename)
217 if _ComputeWrongFileStoragePaths([path]):
218 raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
221 _CheckFileStoragePath(path, allowed)
224 class BlockDev(object):
225 """Block device abstract class.
227 A block device can be in the following states:
228 - not existing on the system, and by `Create()` it goes into:
229 - existing but not setup/not active, and by `Assemble()` goes into:
230 - active read-write and by `Open()` it goes into
231 - online (=used, or ready for use)
233 A device can also be online but read-only, however we are not using
234 the readonly state (LV has it, if needed in the future) and we are
235 usually looking at this like at a stack, so it's easier to
236 conceptualise the transition from not-existing to online and back
239 The many different states of the device are due to the fact that we
240 need to cover many device types:
241 - logical volumes are created, lvchange -a y $lv, and used
242 - drbd devices are attached to a local disk/remote peer and made primary
244 A block device is identified by three items:
245 - the /dev path of the device (dynamic)
246 - a unique ID of the device (static)
247 - it's major/minor pair (dynamic)
249 Not all devices implement both the first two as distinct items. LVM
250 logical volumes have their unique ID (the pair volume group, logical
251 volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
252 the /dev path is again dynamic and the unique id is the pair (host1,
253 dev1), (host2, dev2).
255 You can get to a device in two ways:
256 - creating the (real) device, which returns you
257 an attached instance (lvcreate)
258 - attaching of a python instance to an existing (real) device
260 The second point, the attachment to a device, is different
261 depending on whether the device is assembled or not. At init() time,
262 we search for a device with the same unique_id as us. If found,
263 good. It also means that the device is already assembled. If not,
264 after assembly we'll have our correct major/minor.
267 def __init__(self, unique_id, children, size, params):
268 self._children = children
270 self.unique_id = unique_id
273 self.attached = False
278 """Assemble the device from its components.
280 Implementations of this method by child classes must ensure that:
281 - after the device has been assembled, it knows its major/minor
282 numbers; this allows other devices (usually parents) to probe
283 correctly for their children
284 - calling this method on an existing, in-use device is safe
285 - if the device is already configured (and in an OK state),
286 this method is idempotent
292 """Find a device which matches our config and attach to it.
295 raise NotImplementedError
298 """Notifies that the device will no longer be used for I/O.
301 raise NotImplementedError
304 def Create(cls, unique_id, children, size, params, excl_stor):
305 """Create the device.
307 If the device cannot be created, it will return None
308 instead. Error messages go to the logging system.
310 Note that for some devices, the unique_id is used, and for other,
311 the children. The idea is that these two, taken together, are
312 enough for both creation and assembly (later).
315 raise NotImplementedError
318 """Remove this device.
320 This makes sense only for some of the device types: LV and file
321 storage. Also note that if the device can't attach, the removal
325 raise NotImplementedError
327 def Rename(self, new_id):
328 """Rename this device.
330 This may or may not make sense for a given device type.
333 raise NotImplementedError
335 def Open(self, force=False):
336 """Make the device ready for use.
338 This makes the device ready for I/O. For now, just the DRBD
341 The force parameter signifies that if the device has any kind of
342 --force thing, it should be used, we know what we are doing.
345 raise NotImplementedError
348 """Shut down the device, freeing its children.
350 This undoes the `Assemble()` work, except for the child
351 assembling; as such, the children on the device are still
352 assembled after this call.
355 raise NotImplementedError
357 def SetSyncParams(self, params):
358 """Adjust the synchronization parameters of the mirror.
360 In case this is not a mirroring device, this is no-op.
362 @param params: dictionary of LD level disk parameters related to the
365 @return: a list of error messages, emitted both by the current node and by
366 children. An empty list means no errors.
371 for child in self._children:
372 result.extend(child.SetSyncParams(params))
375 def PauseResumeSync(self, pause):
376 """Pause/Resume the sync of the mirror.
378 In case this is not a mirroring device, this is no-op.
380 @param pause: Whether to pause or resume
385 for child in self._children:
386 result = result and child.PauseResumeSync(pause)
389 def GetSyncStatus(self):
390 """Returns the sync status of the device.
392 If this device is a mirroring device, this function returns the
393 status of the mirror.
395 If sync_percent is None, it means the device is not syncing.
397 If estimated_time is None, it means we can't estimate
398 the time needed, otherwise it's the time left in seconds.
400 If is_degraded is True, it means the device is missing
401 redundancy. This is usually a sign that something went wrong in
402 the device setup, if sync_percent is None.
404 The ldisk parameter represents the degradation of the local
405 data. This is only valid for some devices, the rest will always
406 return False (not degraded).
408 @rtype: objects.BlockDevStatus
411 return objects.BlockDevStatus(dev_path=self.dev_path,
417 ldisk_status=constants.LDS_OKAY)
419 def CombinedSyncStatus(self):
420 """Calculate the mirror status recursively for our children.
422 The return value is the same as for `GetSyncStatus()` except the
423 minimum percent and maximum time are calculated across our
426 @rtype: objects.BlockDevStatus
429 status = self.GetSyncStatus()
431 min_percent = status.sync_percent
432 max_time = status.estimated_time
433 is_degraded = status.is_degraded
434 ldisk_status = status.ldisk_status
437 for child in self._children:
438 child_status = child.GetSyncStatus()
440 if min_percent is None:
441 min_percent = child_status.sync_percent
442 elif child_status.sync_percent is not None:
443 min_percent = min(min_percent, child_status.sync_percent)
446 max_time = child_status.estimated_time
447 elif child_status.estimated_time is not None:
448 max_time = max(max_time, child_status.estimated_time)
450 is_degraded = is_degraded or child_status.is_degraded
452 if ldisk_status is None:
453 ldisk_status = child_status.ldisk_status
454 elif child_status.ldisk_status is not None:
455 ldisk_status = max(ldisk_status, child_status.ldisk_status)
457 return objects.BlockDevStatus(dev_path=self.dev_path,
460 sync_percent=min_percent,
461 estimated_time=max_time,
462 is_degraded=is_degraded,
463 ldisk_status=ldisk_status)
465 def SetInfo(self, text):
466 """Update metadata with info text.
468 Only supported for some device types.
471 for child in self._children:
474 def Grow(self, amount, dryrun, backingstore):
475 """Grow the block device.
477 @type amount: integer
478 @param amount: the amount (in mebibytes) to grow with
479 @type dryrun: boolean
480 @param dryrun: whether to execute the operation in simulation mode
481 only, without actually increasing the size
482 @param backingstore: whether to execute the operation on backing storage
483 only, or on "logical" storage only; e.g. DRBD is logical storage,
484 whereas LVM, file, RBD are backing storage
487 raise NotImplementedError
489 def GetActualSize(self):
490 """Return the actual disk size.
492 @note: the device needs to be active when this is called
495 assert self.attached, "BlockDevice not attached in GetActualSize()"
496 result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
498 _ThrowError("blockdev failed (%s): %s",
499 result.fail_reason, result.output)
501 sz = int(result.output.strip())
502 except (ValueError, TypeError), err:
503 _ThrowError("Failed to parse blockdev output: %s", str(err))
507 return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
508 (self.__class__, self.unique_id, self._children,
509 self.major, self.minor, self.dev_path))
512 class LogicalVolume(BlockDev):
513 """Logical Volume block device.
516 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
517 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
518 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
520 def __init__(self, unique_id, children, size, params):
521 """Attaches to a LV device.
523 The unique_id is a tuple (vg_name, lv_name)
526 super(LogicalVolume, self).__init__(unique_id, children, size, params)
527 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
528 raise ValueError("Invalid configuration data %s" % str(unique_id))
529 self._vg_name, self._lv_name = unique_id
530 self._ValidateName(self._vg_name)
531 self._ValidateName(self._lv_name)
532 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
533 self._degraded = True
534 self.major = self.minor = self.pe_size = self.stripe_count = None
538 def _GetStdPvSize(pvs_info):
539 """Return the the standard PV size (used with exclusive storage).
541 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
546 assert len(pvs_info) > 0
547 smallest = min([pv.size for pv in pvs_info])
548 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
551 def _ComputeNumPvs(size, pvs_info):
552 """Compute the number of PVs needed for an LV (with exclusive storage).
555 @param size: LV size in MiB
556 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
558 @return: number of PVs needed
560 assert len(pvs_info) > 0
561 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
562 return int(math.ceil(float(size) / pv_size))
565 def _GetEmptyPvNames(pvs_info, max_pvs=None):
566 """Return a list of empty PVs, by name.
569 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
570 if max_pvs is not None:
571 empty_pvs = empty_pvs[:max_pvs]
572 return map((lambda pv: pv.name), empty_pvs)
575 def Create(cls, unique_id, children, size, params, excl_stor):
576 """Create a new logical volume.
579 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
580 raise errors.ProgrammerError("Invalid configuration data %s" %
582 vg_name, lv_name = unique_id
583 cls._ValidateName(vg_name)
584 cls._ValidateName(lv_name)
585 pvs_info = cls.GetPVInfo([vg_name])
588 msg = "No (empty) PVs found"
590 msg = "Can't compute PV info for vg %s" % vg_name
592 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
594 pvlist = [pv.name for pv in pvs_info]
595 if compat.any(":" in v for v in pvlist):
596 _ThrowError("Some of your PVs have the invalid character ':' in their"
597 " name, this is not supported - please filter them out"
598 " in lvm.conf using either 'filter' or 'preferred_names'")
600 current_pvs = len(pvlist)
601 desired_stripes = params[constants.LDP_STRIPES]
602 stripes = min(current_pvs, desired_stripes)
605 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
609 req_pvs = cls._ComputeNumPvs(size, pvs_info)
610 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
611 current_pvs = len(pvlist)
612 if current_pvs < req_pvs:
613 _ThrowError("Not enough empty PVs to create a disk of %d MB:"
614 " %d available, %d needed", size, current_pvs, req_pvs)
615 assert current_pvs == len(pvlist)
616 if stripes > current_pvs:
617 # No warning issued for this, as it's no surprise
618 stripes = current_pvs
621 if stripes < desired_stripes:
622 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
623 " available.", desired_stripes, vg_name, current_pvs)
624 free_size = sum([pv.free for pv in pvs_info])
625 # The size constraint should have been checked from the master before
626 # calling the create function.
628 _ThrowError("Not enough free space: required %s,"
629 " available %s", size, free_size)
631 # If the free space is not well distributed, we won't be able to
632 # create an optimally-striped volume; in that case, we want to try
633 # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
635 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
636 for stripes_arg in range(stripes, 0, -1):
637 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
638 if not result.failed:
641 _ThrowError("LV create failed (%s): %s",
642 result.fail_reason, result.output)
643 return LogicalVolume(unique_id, children, size, params)
646 def _GetVolumeInfo(lvm_cmd, fields):
647 """Returns LVM Volumen infos using lvm_cmd
649 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
650 @param fields: Fields to return
651 @return: A list of dicts each with the parsed fields
655 raise errors.ProgrammerError("No fields specified")
658 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
659 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
661 result = utils.RunCmd(cmd)
663 raise errors.CommandError("Can't get the volume information: %s - %s" %
664 (result.fail_reason, result.output))
667 for line in result.stdout.splitlines():
668 splitted_fields = line.strip().split(sep)
670 if len(fields) != len(splitted_fields):
671 raise errors.CommandError("Can't parse %s output: line '%s'" %
674 data.append(splitted_fields)
679 def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
680 """Get the free space info for PVs in a volume group.
682 @param vg_names: list of volume group names, if empty all will be returned
683 @param filter_allocatable: whether to skip over unallocatable PVs
684 @param include_lvs: whether to include a list of LVs hosted on each PV
687 @return: list of objects.LvmPvInfo objects
690 # We request "lv_name" field only if we care about LVs, so we don't get
691 # a long list of entries with many duplicates unless we really have to.
692 # The duplicate "pv_name" field will be ignored.
698 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
699 "pv_attr", "pv_size", lvfield])
700 except errors.GenericError, err:
701 logging.error("Can't get PV information: %s", err)
704 # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
705 # pair. We sort entries by PV name and then LV name, so it's easy to weed
708 info.sort(key=(lambda i: (i[0], i[5])))
711 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
712 # (possibly) skip over pvs which are not allocatable
713 if filter_allocatable and pv_attr[0] != "a":
715 # (possibly) skip over pvs which are not in the right volume group(s)
716 if vg_names and vg_name not in vg_names:
718 # Beware of duplicates (check before inserting)
719 if lastpvi and lastpvi.name == pv_name:
720 if include_lvs and lv_name:
721 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
722 lastpvi.lv_list.append(lv_name)
724 if include_lvs and lv_name:
728 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
729 size=float(pv_size), free=float(pv_free),
730 attributes=pv_attr, lv_list=lvl)
736 def _GetExclusiveStorageVgFree(cls, vg_name):
737 """Return the free disk space in the given VG, in exclusive storage mode.
739 @type vg_name: string
740 @param vg_name: VG name
742 @return: free space in MiB
744 pvs_info = cls.GetPVInfo([vg_name])
747 pv_size = cls._GetStdPvSize(pvs_info)
748 num_pvs = len(cls._GetEmptyPvNames(pvs_info))
749 return pv_size * num_pvs
752 def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
753 """Get the free space info for specific VGs.
755 @param vg_names: list of volume group names, if empty all will be returned
756 @param excl_stor: whether exclusive_storage is enabled
757 @param filter_readonly: whether to skip over readonly VGs
760 @return: list of tuples (free_space, total_size, name) with free_space in
765 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
767 except errors.GenericError, err:
768 logging.error("Can't get VG information: %s", err)
772 for vg_name, vg_free, vg_attr, vg_size in info:
773 # (possibly) skip over vgs which are not writable
774 if filter_readonly and vg_attr[0] == "r":
776 # (possibly) skip over vgs which are not in the right volume group(s)
777 if vg_names and vg_name not in vg_names:
779 # Exclusive storage needs a different concept of free space
781 es_free = cls._GetExclusiveStorageVgFree(vg_name)
782 assert es_free <= vg_free
784 data.append((float(vg_free), float(vg_size), vg_name))
789 def _ValidateName(cls, name):
790 """Validates that a given name is valid as VG or LV name.
792 The list of valid characters and restricted names is taken out of
793 the lvm(8) manpage, with the simplification that we enforce both
794 VG and LV restrictions on the names.
797 if (not cls._VALID_NAME_RE.match(name) or
798 name in cls._INVALID_NAMES or
799 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
800 _ThrowError("Invalid LVM name '%s'", name)
803 """Remove this logical volume.
806 if not self.minor and not self.Attach():
807 # the LV does not exist
809 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
810 (self._vg_name, self._lv_name)])
812 _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
814 def Rename(self, new_id):
815 """Rename this logical volume.
818 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
819 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
820 new_vg, new_name = new_id
821 if new_vg != self._vg_name:
822 raise errors.ProgrammerError("Can't move a logical volume across"
823 " volume groups (from %s to to %s)" %
824 (self._vg_name, new_vg))
825 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
827 _ThrowError("Failed to rename the logical volume: %s", result.output)
828 self._lv_name = new_name
829 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
832 """Attach to an existing LV.
834 This method will try to see if an existing and active LV exists
835 which matches our name. If so, its major/minor will be
839 self.attached = False
840 result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
841 "--units=k", "--nosuffix",
842 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
843 "vg_extent_size,stripes", self.dev_path])
845 logging.error("Can't find LV %s: %s, %s",
846 self.dev_path, result.fail_reason, result.output)
848 # the output can (and will) have multiple lines for multi-segment
849 # LVs, as the 'stripes' parameter is a segment one, so we take
850 # only the last entry, which is the one we're interested in; note
851 # that with LVM2 anyway the 'stripes' value must be constant
852 # across segments, so this is a no-op actually
853 out = result.stdout.splitlines()
854 if not out: # totally empty result? splitlines() returns at least
855 # one line for any non-empty string
856 logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
858 out = out[-1].strip().rstrip(",")
861 logging.error("Can't parse LVS output, len(%s) != 5", str(out))
864 status, major, minor, pe_size, stripes = out
866 logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
872 except (TypeError, ValueError), err:
873 logging.error("lvs major/minor cannot be parsed: %s", str(err))
876 pe_size = int(float(pe_size))
877 except (TypeError, ValueError), err:
878 logging.error("Can't parse vg extent size: %s", err)
882 stripes = int(stripes)
883 except (TypeError, ValueError), err:
884 logging.error("Can't parse the number of stripes: %s", err)
889 self.pe_size = pe_size
890 self.stripe_count = stripes
891 self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
897 """Assemble the device.
899 We always run `lvchange -ay` on the LV to ensure it's active before
900 use, as there were cases when xenvg was not active after boot
901 (also possibly after disk issues).
904 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
906 _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
909 """Shutdown the device.
911 This is a no-op for the LV device type, as we don't deactivate the
917 def GetSyncStatus(self):
918 """Returns the sync status of the device.
920 If this device is a mirroring device, this function returns the
921 status of the mirror.
923 For logical volumes, sync_percent and estimated_time are always
924 None (no recovery in progress, as we don't handle the mirrored LV
925 case). The is_degraded parameter is the inverse of the ldisk
928 For the ldisk parameter, we check if the logical volume has the
929 'virtual' type, which means it's not backed by existing storage
930 anymore (read from it return I/O error). This happens after a
931 physical disk failure and subsequent 'vgreduce --removemissing' on
934 The status was already read in Attach, so we just return it.
936 @rtype: objects.BlockDevStatus
940 ldisk_status = constants.LDS_FAULTY
942 ldisk_status = constants.LDS_OKAY
944 return objects.BlockDevStatus(dev_path=self.dev_path,
949 is_degraded=self._degraded,
950 ldisk_status=ldisk_status)
952 def Open(self, force=False):
953 """Make the device ready for I/O.
955 This is a no-op for the LV device type.
961 """Notifies that the device will no longer be used for I/O.
963 This is a no-op for the LV device type.
968 def Snapshot(self, size):
969 """Create a snapshot copy of an lvm block device.
971 @returns: tuple (vg, lv)
974 snap_name = self._lv_name + ".snap"
976 # remove existing snapshot if found
977 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
978 _IgnoreError(snap.Remove)
980 vg_info = self.GetVGInfo([self._vg_name], False)
982 _ThrowError("Can't compute VG info for vg %s", self._vg_name)
983 free_size, _, _ = vg_info[0]
985 _ThrowError("Not enough free space: required %s,"
986 " available %s", size, free_size)
988 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
989 "-n%s" % snap_name, self.dev_path]))
991 return (self._vg_name, snap_name)
993 def _RemoveOldInfo(self):
994 """Try to remove old tags from the lv.
997 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
1001 raw_tags = result.stdout.strip()
1003 for tag in raw_tags.split(","):
1004 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1005 tag.strip(), self.dev_path]))
1007 def SetInfo(self, text):
1008 """Update metadata with info text.
1011 BlockDev.SetInfo(self, text)
1013 self._RemoveOldInfo()
1015 # Replace invalid characters
1016 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1017 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1019 # Only up to 128 characters are allowed
1022 _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1024 def Grow(self, amount, dryrun, backingstore):
1025 """Grow the logical volume.
1028 if not backingstore:
1030 if self.pe_size is None or self.stripe_count is None:
1031 if not self.Attach():
1032 _ThrowError("Can't attach to LV during Grow()")
1033 full_stripe_size = self.pe_size * self.stripe_count
1036 rest = amount % full_stripe_size
1038 amount += full_stripe_size - rest
1039 cmd = ["lvextend", "-L", "+%dk" % amount]
1041 cmd.append("--test")
1042 # we try multiple algorithms since the 'best' ones might not have
1043 # space available in the right place, but later ones might (since
1044 # they have less constraints); also note that only recent LVM
1046 for alloc_policy in "contiguous", "cling", "normal":
1047 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1048 if not result.failed:
1050 _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1053 class DRBD8Status(object):
1054 """A DRBD status representation class.
1056 Note that this doesn't support unconfigured devices (cs:Unconfigured).
1059 UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1060 LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1061 "\s+ds:([^/]+)/(\S+)\s+.*$")
1062 SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1063 # Due to a bug in drbd in the kernel, introduced in
1064 # commit 4b0715f096 (still unfixed as of 2011-08-22)
1066 "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1068 CS_UNCONFIGURED = "Unconfigured"
1069 CS_STANDALONE = "StandAlone"
1070 CS_WFCONNECTION = "WFConnection"
1071 CS_WFREPORTPARAMS = "WFReportParams"
1072 CS_CONNECTED = "Connected"
1073 CS_STARTINGSYNCS = "StartingSyncS"
1074 CS_STARTINGSYNCT = "StartingSyncT"
1075 CS_WFBITMAPS = "WFBitMapS"
1076 CS_WFBITMAPT = "WFBitMapT"
1077 CS_WFSYNCUUID = "WFSyncUUID"
1078 CS_SYNCSOURCE = "SyncSource"
1079 CS_SYNCTARGET = "SyncTarget"
1080 CS_PAUSEDSYNCS = "PausedSyncS"
1081 CS_PAUSEDSYNCT = "PausedSyncT"
1082 CSET_SYNC = compat.UniqueFrozenset([
1095 DS_DISKLESS = "Diskless"
1096 DS_ATTACHING = "Attaching" # transient state
1097 DS_FAILED = "Failed" # transient state, next: diskless
1098 DS_NEGOTIATING = "Negotiating" # transient state
1099 DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1100 DS_OUTDATED = "Outdated"
1101 DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1102 DS_CONSISTENT = "Consistent"
1103 DS_UPTODATE = "UpToDate" # normal state
1105 RO_PRIMARY = "Primary"
1106 RO_SECONDARY = "Secondary"
1107 RO_UNKNOWN = "Unknown"
1109 def __init__(self, procline):
1110 u = self.UNCONF_RE.match(procline)
1112 self.cstatus = self.CS_UNCONFIGURED
1113 self.lrole = self.rrole = self.ldisk = self.rdisk = None
1115 m = self.LINE_RE.match(procline)
1117 raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1118 self.cstatus = m.group(1)
1119 self.lrole = m.group(2)
1120 self.rrole = m.group(3)
1121 self.ldisk = m.group(4)
1122 self.rdisk = m.group(5)
1124 # end reading of data from the LINE_RE or UNCONF_RE
1126 self.is_standalone = self.cstatus == self.CS_STANDALONE
1127 self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1128 self.is_connected = self.cstatus == self.CS_CONNECTED
1129 self.is_primary = self.lrole == self.RO_PRIMARY
1130 self.is_secondary = self.lrole == self.RO_SECONDARY
1131 self.peer_primary = self.rrole == self.RO_PRIMARY
1132 self.peer_secondary = self.rrole == self.RO_SECONDARY
1133 self.both_primary = self.is_primary and self.peer_primary
1134 self.both_secondary = self.is_secondary and self.peer_secondary
1136 self.is_diskless = self.ldisk == self.DS_DISKLESS
1137 self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1139 self.is_in_resync = self.cstatus in self.CSET_SYNC
1140 self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1142 m = self.SYNC_RE.match(procline)
1144 self.sync_percent = float(m.group(1))
1145 hours = int(m.group(2))
1146 minutes = int(m.group(3))
1147 seconds = int(m.group(4))
1148 self.est_time = hours * 3600 + minutes * 60 + seconds
1150 # we have (in this if branch) no percent information, but if
1151 # we're resyncing we need to 'fake' a sync percent information,
1152 # as this is how cmdlib determines if it makes sense to wait for
1154 if self.is_in_resync:
1155 self.sync_percent = 0
1157 self.sync_percent = None
1158 self.est_time = None
1161 class BaseDRBD(BlockDev): # pylint: disable=W0223
1164 This class contains a few bits of common functionality between the
1165 0.7 and 8.x versions of DRBD.
1168 _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1169 r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1170 _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1171 _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1174 _ST_UNCONFIGURED = "Unconfigured"
1175 _ST_WFCONNECTION = "WFConnection"
1176 _ST_CONNECTED = "Connected"
1178 _STATUS_FILE = constants.DRBD_STATUS_FILE
1179 _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1182 def _GetProcData(filename=_STATUS_FILE):
1183 """Return data from /proc/drbd.
1187 data = utils.ReadFile(filename).splitlines()
1188 except EnvironmentError, err:
1189 if err.errno == errno.ENOENT:
1190 _ThrowError("The file %s cannot be opened, check if the module"
1191 " is loaded (%s)", filename, str(err))
1193 _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1195 _ThrowError("Can't read any data from %s", filename)
1199 def _MassageProcData(cls, data):
1200 """Transform the output of _GetProdData into a nicer form.
1202 @return: a dictionary of minor: joined lines from /proc/drbd
1207 old_minor = old_line = None
1209 if not line: # completely empty lines, as can be returned by drbd8.0+
1211 lresult = cls._VALID_LINE_RE.match(line)
1212 if lresult is not None:
1213 if old_minor is not None:
1214 results[old_minor] = old_line
1215 old_minor = int(lresult.group(1))
1218 if old_minor is not None:
1219 old_line += " " + line.strip()
1221 if old_minor is not None:
1222 results[old_minor] = old_line
1226 def _GetVersion(cls, proc_data):
1227 """Return the DRBD version.
1229 This will return a dict with keys:
1235 - proto2 (only on drbd > 8.2.X)
1238 first_line = proc_data[0].strip()
1239 version = cls._VERSION_RE.match(first_line)
1241 raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1244 values = version.groups()
1246 "k_major": int(values[0]),
1247 "k_minor": int(values[1]),
1248 "k_point": int(values[2]),
1249 "api": int(values[3]),
1250 "proto": int(values[4]),
1252 if values[5] is not None:
1253 retval["proto2"] = values[5]
1258 def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1259 """Returns DRBD usermode_helper currently set.
1263 helper = utils.ReadFile(filename).splitlines()[0]
1264 except EnvironmentError, err:
1265 if err.errno == errno.ENOENT:
1266 _ThrowError("The file %s cannot be opened, check if the module"
1267 " is loaded (%s)", filename, str(err))
1269 _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1271 _ThrowError("Can't read any data from %s", filename)
1275 def _DevPath(minor):
1276 """Return the path to a drbd device for a given minor.
1279 return "/dev/drbd%d" % minor
1282 def GetUsedDevs(cls):
1283 """Compute the list of used DRBD devices.
1286 data = cls._GetProcData()
1290 match = cls._VALID_LINE_RE.match(line)
1293 minor = int(match.group(1))
1294 state = match.group(2)
1295 if state == cls._ST_UNCONFIGURED:
1297 used_devs[minor] = state, line
1301 def _SetFromMinor(self, minor):
1302 """Set our parameters based on the given minor.
1304 This sets our minor variable and our dev_path.
1308 self.minor = self.dev_path = None
1309 self.attached = False
1312 self.dev_path = self._DevPath(minor)
1313 self.attached = True
1316 def _CheckMetaSize(meta_device):
1317 """Check if the given meta device looks like a valid one.
1319 This currently only checks the size, which must be around
1323 result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1325 _ThrowError("Failed to get device size: %s - %s",
1326 result.fail_reason, result.output)
1328 sectors = int(result.stdout)
1329 except (TypeError, ValueError):
1330 _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1331 num_bytes = sectors * 512
1332 if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1333 _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1334 # the maximum *valid* size of the meta device when living on top
1335 # of LVM is hard to compute: it depends on the number of stripes
1336 # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1337 # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1338 # size meta device; as such, we restrict it to 1GB (a little bit
1339 # too generous, but making assumptions about PE size is hard)
1340 if num_bytes > 1024 * 1024 * 1024:
1341 _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1343 def Rename(self, new_id):
1346 This is not supported for drbd devices.
1349 raise errors.ProgrammerError("Can't rename a drbd device")
1352 class DRBD8(BaseDRBD):
1353 """DRBD v8.x block device.
1355 This implements the local host part of the DRBD device, i.e. it
1356 doesn't do anything to the supposed peer. If you need a fully
1357 connected DRBD pair, you need to use this class on both hosts.
1359 The unique_id for the drbd device is a (local_ip, local_port,
1360 remote_ip, remote_port, local_minor, secret) tuple, and it must have
1361 two children: the data device and the meta_device. The meta device
1362 is checked for valid size and is zeroed on create.
1369 _NET_RECONFIG_TIMEOUT = 60
1371 # command line options for barriers
1372 _DISABLE_DISK_OPTION = "--no-disk-barrier" # -a
1373 _DISABLE_DRAIN_OPTION = "--no-disk-drain" # -D
1374 _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1375 _DISABLE_META_FLUSH_OPTION = "--no-md-flushes" # -m
1377 def __init__(self, unique_id, children, size, params):
1378 if children and children.count(None) > 0:
1380 if len(children) not in (0, 2):
1381 raise ValueError("Invalid configuration data %s" % str(children))
1382 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1383 raise ValueError("Invalid configuration data %s" % str(unique_id))
1384 (self._lhost, self._lport,
1385 self._rhost, self._rport,
1386 self._aminor, self._secret) = unique_id
1388 if not _CanReadDevice(children[1].dev_path):
1389 logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1391 super(DRBD8, self).__init__(unique_id, children, size, params)
1392 self.major = self._DRBD_MAJOR
1393 version = self._GetVersion(self._GetProcData())
1394 if version["k_major"] != 8:
1395 _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1396 " usage: kernel is %s.%s, ganeti wants 8.x",
1397 version["k_major"], version["k_minor"])
1399 if (self._lhost is not None and self._lhost == self._rhost and
1400 self._lport == self._rport):
1401 raise ValueError("Invalid configuration data, same local/remote %s" %
1406 def _InitMeta(cls, minor, dev_path):
1407 """Initialize a meta device.
1409 This will not work if the given minor is in use.
1412 # Zero the metadata first, in order to make sure drbdmeta doesn't
1413 # try to auto-detect existing filesystems or similar (see
1414 # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1415 # care about the first 128MB of data in the device, even though it
1417 result = utils.RunCmd([constants.DD_CMD,
1418 "if=/dev/zero", "of=%s" % dev_path,
1419 "bs=1048576", "count=128", "oflag=direct"])
1421 _ThrowError("Can't wipe the meta device: %s", result.output)
1423 result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1424 "v08", dev_path, "0", "create-md"])
1426 _ThrowError("Can't initialize meta device: %s", result.output)
1429 def _FindUnusedMinor(cls):
1430 """Find an unused DRBD device.
1432 This is specific to 8.x as the minors are allocated dynamically,
1433 so non-existing numbers up to a max minor count are actually free.
1436 data = cls._GetProcData()
1440 match = cls._UNUSED_LINE_RE.match(line)
1442 return int(match.group(1))
1443 match = cls._VALID_LINE_RE.match(line)
1445 minor = int(match.group(1))
1446 highest = max(highest, minor)
1447 if highest is None: # there are no minors in use at all
1449 if highest >= cls._MAX_MINORS:
1450 logging.error("Error: no free drbd minors!")
1451 raise errors.BlockDeviceError("Can't find a free DRBD minor")
1455 def _GetShowParser(cls):
1456 """Return a parser for `drbd show` output.
1458 This will either create or return an already-created parser for the
1459 output of the command `drbd show`.
1462 if cls._PARSE_SHOW is not None:
1463 return cls._PARSE_SHOW
1466 lbrace = pyp.Literal("{").suppress()
1467 rbrace = pyp.Literal("}").suppress()
1468 lbracket = pyp.Literal("[").suppress()
1469 rbracket = pyp.Literal("]").suppress()
1470 semi = pyp.Literal(";").suppress()
1471 colon = pyp.Literal(":").suppress()
1472 # this also converts the value to an int
1473 number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1475 comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1476 defa = pyp.Literal("_is_default").suppress()
1477 dbl_quote = pyp.Literal('"').suppress()
1479 keyword = pyp.Word(pyp.alphanums + "-")
1482 value = pyp.Word(pyp.alphanums + "_-/.:")
1483 quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1484 ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1485 pyp.Word(pyp.nums + ".") + colon + number)
1486 ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1487 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1488 pyp.Optional(rbracket) + colon + number)
1489 # meta device, extended syntax
1490 meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1491 # device name, extended syntax
1492 device_value = pyp.Literal("minor").suppress() + number
1495 stmt = (~rbrace + keyword + ~lbrace +
1496 pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1498 pyp.Optional(defa) + semi +
1499 pyp.Optional(pyp.restOfLine).suppress())
1502 section_name = pyp.Word(pyp.alphas + "_")
1503 section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1505 bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1508 cls._PARSE_SHOW = bnf
1513 def _GetShowData(cls, minor):
1514 """Return the `drbdsetup show` data for a minor.
1517 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1519 logging.error("Can't display the drbd config: %s - %s",
1520 result.fail_reason, result.output)
1522 return result.stdout
1525 def _GetDevInfo(cls, out):
1526 """Parse details about a given DRBD minor.
1528 This return, if available, the local backing device (as a path)
1529 and the local and remote (ip, port) information from a string
1530 containing the output of the `drbdsetup show` command as returned
1538 bnf = cls._GetShowParser()
1542 results = bnf.parseString(out)
1543 except pyp.ParseException, err:
1544 _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1546 # and massage the results into our desired format
1547 for section in results:
1549 if sname == "_this_host":
1550 for lst in section[1:]:
1551 if lst[0] == "disk":
1552 data["local_dev"] = lst[1]
1553 elif lst[0] == "meta-disk":
1554 data["meta_dev"] = lst[1]
1555 data["meta_index"] = lst[2]
1556 elif lst[0] == "address":
1557 data["local_addr"] = tuple(lst[1:])
1558 elif sname == "_remote_host":
1559 for lst in section[1:]:
1560 if lst[0] == "address":
1561 data["remote_addr"] = tuple(lst[1:])
1564 def _MatchesLocal(self, info):
1565 """Test if our local config matches with an existing device.
1567 The parameter should be as returned from `_GetDevInfo()`. This
1568 method tests if our local backing device is the same as the one in
1569 the info parameter, in effect testing if we look like the given
1574 backend, meta = self._children
1576 backend = meta = None
1578 if backend is not None:
1579 retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1581 retval = ("local_dev" not in info)
1583 if meta is not None:
1584 retval = retval and ("meta_dev" in info and
1585 info["meta_dev"] == meta.dev_path)
1586 retval = retval and ("meta_index" in info and
1587 info["meta_index"] == 0)
1589 retval = retval and ("meta_dev" not in info and
1590 "meta_index" not in info)
1593 def _MatchesNet(self, info):
1594 """Test if our network config matches with an existing device.
1596 The parameter should be as returned from `_GetDevInfo()`. This
1597 method tests if our network configuration is the same as the one
1598 in the info parameter, in effect testing if we look like the given
1602 if (((self._lhost is None and not ("local_addr" in info)) and
1603 (self._rhost is None and not ("remote_addr" in info)))):
1606 if self._lhost is None:
1609 if not ("local_addr" in info and
1610 "remote_addr" in info):
1613 retval = (info["local_addr"] == (self._lhost, self._lport))
1614 retval = (retval and
1615 info["remote_addr"] == (self._rhost, self._rport))
1618 def _AssembleLocal(self, minor, backend, meta, size):
1619 """Configure the local part of a DRBD device.
1622 args = ["drbdsetup", self._DevPath(minor), "disk",
1627 args.extend(["-d", "%sm" % size])
1629 version = self._GetVersion(self._GetProcData())
1630 vmaj = version["k_major"]
1631 vmin = version["k_minor"]
1632 vrel = version["k_point"]
1635 self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1636 self.params[constants.LDP_BARRIERS],
1637 self.params[constants.LDP_NO_META_FLUSH])
1638 args.extend(barrier_args)
1640 if self.params[constants.LDP_DISK_CUSTOM]:
1641 args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1643 result = utils.RunCmd(args)
1645 _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1648 def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1649 disable_meta_flush):
1650 """Compute the DRBD command line parameters for disk barriers
1652 Returns a list of the disk barrier parameters as requested via the
1653 disabled_barriers and disable_meta_flush arguments, and according to the
1654 supported ones in the DRBD version vmaj.vmin.vrel
1656 If the desired option is unsupported, raises errors.BlockDeviceError.
1659 disabled_barriers_set = frozenset(disabled_barriers)
1660 if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1661 raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1662 " barriers" % disabled_barriers)
1666 # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1668 if not vmaj == 8 and vmin in (0, 2, 3):
1669 raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1672 def _AppendOrRaise(option, min_version):
1673 """Helper for DRBD options"""
1674 if min_version is not None and vrel >= min_version:
1677 raise errors.BlockDeviceError("Could not use the option %s as the"
1678 " DRBD version %d.%d.%d does not support"
1679 " it." % (option, vmaj, vmin, vrel))
1681 # the minimum version for each feature is encoded via pairs of (minor
1682 # version -> x) where x is version in which support for the option was
1684 meta_flush_supported = disk_flush_supported = {
1690 disk_drain_supported = {
1695 disk_barriers_supported = {
1700 if disable_meta_flush:
1701 _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1702 meta_flush_supported.get(vmin, None))
1705 if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1706 _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1707 disk_flush_supported.get(vmin, None))
1710 if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1711 _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1712 disk_drain_supported.get(vmin, None))
1715 if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1716 _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1717 disk_barriers_supported.get(vmin, None))
1721 def _AssembleNet(self, minor, net_info, protocol,
1722 dual_pri=False, hmac=None, secret=None):
1723 """Configure the network part of the device.
1726 lhost, lport, rhost, rport = net_info
1727 if None in net_info:
1728 # we don't want network connection and actually want to make
1730 self._ShutdownNet(minor)
1733 # Workaround for a race condition. When DRBD is doing its dance to
1734 # establish a connection with its peer, it also sends the
1735 # synchronization speed over the wire. In some cases setting the
1736 # sync speed only after setting up both sides can race with DRBD
1737 # connecting, hence we set it here before telling DRBD anything
1739 sync_errors = self._SetMinorSyncParams(minor, self.params)
1741 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1742 (minor, utils.CommaJoin(sync_errors)))
1744 if netutils.IP6Address.IsValid(lhost):
1745 if not netutils.IP6Address.IsValid(rhost):
1746 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1747 (minor, lhost, rhost))
1749 elif netutils.IP4Address.IsValid(lhost):
1750 if not netutils.IP4Address.IsValid(rhost):
1751 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1752 (minor, lhost, rhost))
1755 _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1757 args = ["drbdsetup", self._DevPath(minor), "net",
1758 "%s:%s:%s" % (family, lhost, lport),
1759 "%s:%s:%s" % (family, rhost, rport), protocol,
1760 "-A", "discard-zero-changes",
1767 args.extend(["-a", hmac, "-x", secret])
1769 if self.params[constants.LDP_NET_CUSTOM]:
1770 args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1772 result = utils.RunCmd(args)
1774 _ThrowError("drbd%d: can't setup network: %s - %s",
1775 minor, result.fail_reason, result.output)
1777 def _CheckNetworkConfig():
1778 info = self._GetDevInfo(self._GetShowData(minor))
1779 if not "local_addr" in info or not "remote_addr" in info:
1780 raise utils.RetryAgain()
1782 if (info["local_addr"] != (lhost, lport) or
1783 info["remote_addr"] != (rhost, rport)):
1784 raise utils.RetryAgain()
1787 utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1788 except utils.RetryTimeout:
1789 _ThrowError("drbd%d: timeout while configuring network", minor)
1791 def AddChildren(self, devices):
1792 """Add a disk to the DRBD device.
1795 if self.minor is None:
1796 _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1798 if len(devices) != 2:
1799 _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1800 info = self._GetDevInfo(self._GetShowData(self.minor))
1801 if "local_dev" in info:
1802 _ThrowError("drbd%d: already attached to a local disk", self.minor)
1803 backend, meta = devices
1804 if backend.dev_path is None or meta.dev_path is None:
1805 _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1808 self._CheckMetaSize(meta.dev_path)
1809 self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1811 self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1812 self._children = devices
1814 def RemoveChildren(self, devices):
1815 """Detach the drbd device from local storage.
1818 if self.minor is None:
1819 _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1821 # early return if we don't actually have backing storage
1822 info = self._GetDevInfo(self._GetShowData(self.minor))
1823 if "local_dev" not in info:
1825 if len(self._children) != 2:
1826 _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1828 if self._children.count(None) == 2: # we don't actually have children :)
1829 logging.warning("drbd%d: requested detach while detached", self.minor)
1831 if len(devices) != 2:
1832 _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1833 for child, dev in zip(self._children, devices):
1834 if dev != child.dev_path:
1835 _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1836 " RemoveChildren", self.minor, dev, child.dev_path)
1838 self._ShutdownLocal(self.minor)
1842 def _SetMinorSyncParams(cls, minor, params):
1843 """Set the parameters of the DRBD syncer.
1845 This is the low-level implementation.
1848 @param minor: the drbd minor whose settings we change
1850 @param params: LD level disk parameters related to the synchronization
1852 @return: a list of error messages
1856 args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1857 if params[constants.LDP_DYNAMIC_RESYNC]:
1858 version = cls._GetVersion(cls._GetProcData())
1859 vmin = version["k_minor"]
1860 vrel = version["k_point"]
1862 # By definition we are using 8.x, so just check the rest of the version
1864 if vmin != 3 or vrel < 9:
1865 msg = ("The current DRBD version (8.%d.%d) does not support the "
1866 "dynamic resync speed controller" % (vmin, vrel))
1870 if params[constants.LDP_PLAN_AHEAD] == 0:
1871 msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1872 " controller at DRBD level. If you want to disable it, please"
1873 " set the dynamic-resync disk parameter to False.")
1877 # add the c-* parameters to args
1878 args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1879 "--c-fill-target", params[constants.LDP_FILL_TARGET],
1880 "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1881 "--c-max-rate", params[constants.LDP_MAX_RATE],
1882 "--c-min-rate", params[constants.LDP_MIN_RATE],
1886 args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1888 args.append("--create-device")
1889 result = utils.RunCmd(args)
1891 msg = ("Can't change syncer rate: %s - %s" %
1892 (result.fail_reason, result.output))
1898 def SetSyncParams(self, params):
1899 """Set the synchronization parameters of the DRBD syncer.
1902 @param params: LD level disk parameters related to the synchronization
1904 @return: a list of error messages, emitted both by the current node and by
1905 children. An empty list means no errors
1908 if self.minor is None:
1909 err = "Not attached during SetSyncParams"
1913 children_result = super(DRBD8, self).SetSyncParams(params)
1914 children_result.extend(self._SetMinorSyncParams(self.minor, params))
1915 return children_result
1917 def PauseResumeSync(self, pause):
1918 """Pauses or resumes the sync of a DRBD device.
1920 @param pause: Wether to pause or resume
1921 @return: the success of the operation
1924 if self.minor is None:
1925 logging.info("Not attached during PauseSync")
1928 children_result = super(DRBD8, self).PauseResumeSync(pause)
1935 result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1937 logging.error("Can't %s: %s - %s", cmd,
1938 result.fail_reason, result.output)
1939 return not result.failed and children_result
1941 def GetProcStatus(self):
1942 """Return device data from /proc.
1945 if self.minor is None:
1946 _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1947 proc_info = self._MassageProcData(self._GetProcData())
1948 if self.minor not in proc_info:
1949 _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1950 return DRBD8Status(proc_info[self.minor])
1952 def GetSyncStatus(self):
1953 """Returns the sync status of the device.
1956 If sync_percent is None, it means all is ok
1957 If estimated_time is None, it means we can't estimate
1958 the time needed, otherwise it's the time left in seconds.
1961 We set the is_degraded parameter to True on two conditions:
1962 network not connected or local disk missing.
1964 We compute the ldisk parameter based on whether we have a local
1967 @rtype: objects.BlockDevStatus
1970 if self.minor is None and not self.Attach():
1971 _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1973 stats = self.GetProcStatus()
1974 is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1976 if stats.is_disk_uptodate:
1977 ldisk_status = constants.LDS_OKAY
1978 elif stats.is_diskless:
1979 ldisk_status = constants.LDS_FAULTY
1981 ldisk_status = constants.LDS_UNKNOWN
1983 return objects.BlockDevStatus(dev_path=self.dev_path,
1986 sync_percent=stats.sync_percent,
1987 estimated_time=stats.est_time,
1988 is_degraded=is_degraded,
1989 ldisk_status=ldisk_status)
1991 def Open(self, force=False):
1992 """Make the local state primary.
1994 If the 'force' parameter is given, the '-o' option is passed to
1995 drbdsetup. Since this is a potentially dangerous operation, the
1996 force flag should be only given after creation, when it actually
2000 if self.minor is None and not self.Attach():
2001 logging.error("DRBD cannot attach to a device during open")
2003 cmd = ["drbdsetup", self.dev_path, "primary"]
2006 result = utils.RunCmd(cmd)
2008 _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2012 """Make the local state secondary.
2014 This will, of course, fail if the device is in use.
2017 if self.minor is None and not self.Attach():
2018 _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2019 result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2021 _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2022 self.minor, result.output)
2024 def DisconnectNet(self):
2025 """Removes network configuration.
2027 This method shutdowns the network side of the device.
2029 The method will wait up to a hardcoded timeout for the device to
2030 go into standalone after the 'disconnect' command before
2031 re-configuring it, as sometimes it takes a while for the
2032 disconnect to actually propagate and thus we might issue a 'net'
2033 command while the device is still connected. If the device will
2034 still be attached to the network and we time out, we raise an
2038 if self.minor is None:
2039 _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2041 if None in (self._lhost, self._lport, self._rhost, self._rport):
2042 _ThrowError("drbd%d: DRBD disk missing network info in"
2043 " DisconnectNet()", self.minor)
2045 class _DisconnectStatus:
2046 def __init__(self, ever_disconnected):
2047 self.ever_disconnected = ever_disconnected
2049 dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2051 def _WaitForDisconnect():
2052 if self.GetProcStatus().is_standalone:
2055 # retry the disconnect, it seems possible that due to a well-time
2056 # disconnect on the peer, my disconnect command might be ignored and
2058 dstatus.ever_disconnected = \
2059 _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2061 raise utils.RetryAgain()
2064 start_time = time.time()
2067 # Start delay at 100 milliseconds and grow up to 2 seconds
2068 utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2069 self._NET_RECONFIG_TIMEOUT)
2070 except utils.RetryTimeout:
2071 if dstatus.ever_disconnected:
2072 msg = ("drbd%d: device did not react to the"
2073 " 'disconnect' command in a timely manner")
2075 msg = "drbd%d: can't shutdown network, even after multiple retries"
2077 _ThrowError(msg, self.minor)
2079 reconfig_time = time.time() - start_time
2080 if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2081 logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2082 self.minor, reconfig_time)
2084 def AttachNet(self, multimaster):
2085 """Reconnects the network.
2087 This method connects the network side of the device with a
2088 specified multi-master flag. The device needs to be 'Standalone'
2089 but have valid network configuration data.
2092 - multimaster: init the network in dual-primary mode
2095 if self.minor is None:
2096 _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2098 if None in (self._lhost, self._lport, self._rhost, self._rport):
2099 _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2101 status = self.GetProcStatus()
2103 if not status.is_standalone:
2104 _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2106 self._AssembleNet(self.minor,
2107 (self._lhost, self._lport, self._rhost, self._rport),
2108 constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2109 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2112 """Check if our minor is configured.
2114 This doesn't do any device configurations - it only checks if the
2115 minor is in a state different from Unconfigured.
2117 Note that this function will not change the state of the system in
2118 any way (except in case of side-effects caused by reading from
2122 used_devs = self.GetUsedDevs()
2123 if self._aminor in used_devs:
2124 minor = self._aminor
2128 self._SetFromMinor(minor)
2129 return minor is not None
2132 """Assemble the drbd.
2135 - if we have a configured device, we try to ensure that it matches
2137 - if not, we create it from zero
2138 - anyway, set the device parameters
2141 super(DRBD8, self).Assemble()
2144 if self.minor is None:
2145 # local device completely unconfigured
2146 self._FastAssemble()
2148 # we have to recheck the local and network status and try to fix
2150 self._SlowAssemble()
2152 sync_errors = self.SetSyncParams(self.params)
2154 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2155 (self.minor, utils.CommaJoin(sync_errors)))
2157 def _SlowAssemble(self):
2158 """Assembles the DRBD device from a (partially) configured device.
2160 In case of partially attached (local device matches but no network
2161 setup), we perform the network attach. If successful, we re-test
2162 the attach if can return success.
2165 # TODO: Rewrite to not use a for loop just because there is 'break'
2166 # pylint: disable=W0631
2167 net_data = (self._lhost, self._lport, self._rhost, self._rport)
2168 for minor in (self._aminor,):
2169 info = self._GetDevInfo(self._GetShowData(minor))
2170 match_l = self._MatchesLocal(info)
2171 match_r = self._MatchesNet(info)
2173 if match_l and match_r:
2174 # everything matches
2177 if match_l and not match_r and "local_addr" not in info:
2178 # disk matches, but not attached to network, attach and recheck
2179 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2180 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2181 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2184 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2185 " show' disagrees", minor)
2187 if match_r and "local_dev" not in info:
2188 # no local disk, but network attached and it matches
2189 self._AssembleLocal(minor, self._children[0].dev_path,
2190 self._children[1].dev_path, self.size)
2191 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2194 _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2195 " show' disagrees", minor)
2197 # this case must be considered only if we actually have local
2198 # storage, i.e. not in diskless mode, because all diskless
2199 # devices are equal from the point of view of local
2201 if (match_l and "local_dev" in info and
2202 not match_r and "local_addr" in info):
2203 # strange case - the device network part points to somewhere
2204 # else, even though its local storage is ours; as we own the
2205 # drbd space, we try to disconnect from the remote peer and
2206 # reconnect to our correct one
2208 self._ShutdownNet(minor)
2209 except errors.BlockDeviceError, err:
2210 _ThrowError("drbd%d: device has correct local storage, wrong"
2211 " remote peer and is unable to disconnect in order"
2212 " to attach to the correct peer: %s", minor, str(err))
2213 # note: _AssembleNet also handles the case when we don't want
2214 # local storage (i.e. one or more of the _[lr](host|port) is
2216 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2217 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2218 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2221 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2222 " show' disagrees", minor)
2227 self._SetFromMinor(minor)
2229 _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2232 def _FastAssemble(self):
2233 """Assemble the drbd device from zero.
2235 This is run when in Assemble we detect our minor is unused.
2238 minor = self._aminor
2239 if self._children and self._children[0] and self._children[1]:
2240 self._AssembleLocal(minor, self._children[0].dev_path,
2241 self._children[1].dev_path, self.size)
2242 if self._lhost and self._lport and self._rhost and self._rport:
2243 self._AssembleNet(minor,
2244 (self._lhost, self._lport, self._rhost, self._rport),
2245 constants.DRBD_NET_PROTOCOL,
2246 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2247 self._SetFromMinor(minor)
2250 def _ShutdownLocal(cls, minor):
2251 """Detach from the local device.
2253 I/Os will continue to be served from the remote device. If we
2254 don't have a remote device, this operation will fail.
2257 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2259 _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2262 def _ShutdownNet(cls, minor):
2263 """Disconnect from the remote peer.
2265 This fails if we don't have a local device.
2268 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2270 _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2273 def _ShutdownAll(cls, minor):
2274 """Deactivate the device.
2276 This will, of course, fail if the device is in use.
2279 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2281 _ThrowError("drbd%d: can't shutdown drbd device: %s",
2282 minor, result.output)
2285 """Shutdown the DRBD device.
2288 if self.minor is None and not self.Attach():
2289 logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2293 self.dev_path = None
2294 self._ShutdownAll(minor)
2297 """Stub remove for DRBD devices.
2303 def Create(cls, unique_id, children, size, params, excl_stor):
2304 """Create a new DRBD8 device.
2306 Since DRBD devices are not created per se, just assembled, this
2307 function only initializes the metadata.
2310 if len(children) != 2:
2311 raise errors.ProgrammerError("Invalid setup for the drbd device")
2313 raise errors.ProgrammerError("DRBD device requested with"
2314 " exclusive_storage")
2315 # check that the minor is unused
2316 aminor = unique_id[4]
2317 proc_info = cls._MassageProcData(cls._GetProcData())
2318 if aminor in proc_info:
2319 status = DRBD8Status(proc_info[aminor])
2320 in_use = status.is_in_use
2324 _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2327 if not meta.Attach():
2328 _ThrowError("drbd%d: can't attach to meta device '%s'",
2330 cls._CheckMetaSize(meta.dev_path)
2331 cls._InitMeta(aminor, meta.dev_path)
2332 return cls(unique_id, children, size, params)
2334 def Grow(self, amount, dryrun, backingstore):
2335 """Resize the DRBD device and its backing storage.
2338 if self.minor is None:
2339 _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2340 if len(self._children) != 2 or None in self._children:
2341 _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2342 self._children[0].Grow(amount, dryrun, backingstore)
2343 if dryrun or backingstore:
2344 # DRBD does not support dry-run mode and is not backing storage,
2345 # so we'll return here
2347 result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2348 "%dm" % (self.size + amount)])
2350 _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2353 class FileStorage(BlockDev):
2356 This class represents the a file storage backend device.
2358 The unique_id for the file device is a (file_driver, file_path) tuple.
2361 def __init__(self, unique_id, children, size, params):
2362 """Initalizes a file device backend.
2366 raise errors.BlockDeviceError("Invalid setup for file device")
2367 super(FileStorage, self).__init__(unique_id, children, size, params)
2368 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2369 raise ValueError("Invalid configuration data %s" % str(unique_id))
2370 self.driver = unique_id[0]
2371 self.dev_path = unique_id[1]
2373 CheckFileStoragePath(self.dev_path)
2378 """Assemble the device.
2380 Checks whether the file device exists, raises BlockDeviceError otherwise.
2383 if not os.path.exists(self.dev_path):
2384 _ThrowError("File device '%s' does not exist" % self.dev_path)
2387 """Shutdown the device.
2389 This is a no-op for the file type, as we don't deactivate
2390 the file on shutdown.
2395 def Open(self, force=False):
2396 """Make the device ready for I/O.
2398 This is a no-op for the file type.
2404 """Notifies that the device will no longer be used for I/O.
2406 This is a no-op for the file type.
2412 """Remove the file backing the block device.
2415 @return: True if the removal was successful
2419 os.remove(self.dev_path)
2420 except OSError, err:
2421 if err.errno != errno.ENOENT:
2422 _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2424 def Rename(self, new_id):
2425 """Renames the file.
2428 # TODO: implement rename for file-based storage
2429 _ThrowError("Rename is not supported for file-based storage")
2431 def Grow(self, amount, dryrun, backingstore):
2434 @param amount: the amount (in mebibytes) to grow with
2437 if not backingstore:
2439 # Check that the file exists
2441 current_size = self.GetActualSize()
2442 new_size = current_size + amount * 1024 * 1024
2443 assert new_size > current_size, "Cannot Grow with a negative amount"
2444 # We can't really simulate the growth
2448 f = open(self.dev_path, "a+")
2449 f.truncate(new_size)
2451 except EnvironmentError, err:
2452 _ThrowError("Error in file growth: %", str(err))
2455 """Attach to an existing file.
2457 Check if this file already exists.
2460 @return: True if file exists
2463 self.attached = os.path.exists(self.dev_path)
2464 return self.attached
2466 def GetActualSize(self):
2467 """Return the actual disk size.
2469 @note: the device needs to be active when this is called
2472 assert self.attached, "BlockDevice not attached in GetActualSize()"
2474 st = os.stat(self.dev_path)
2476 except OSError, err:
2477 _ThrowError("Can't stat %s: %s", self.dev_path, err)
2480 def Create(cls, unique_id, children, size, params, excl_stor):
2481 """Create a new file.
2483 @param size: the size of file in MiB
2485 @rtype: L{bdev.FileStorage}
2486 @return: an instance of FileStorage
2490 raise errors.ProgrammerError("FileStorage device requested with"
2491 " exclusive_storage")
2492 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2493 raise ValueError("Invalid configuration data %s" % str(unique_id))
2495 dev_path = unique_id[1]
2497 CheckFileStoragePath(dev_path)
2500 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2501 f = os.fdopen(fd, "w")
2502 f.truncate(size * 1024 * 1024)
2504 except EnvironmentError, err:
2505 if err.errno == errno.EEXIST:
2506 _ThrowError("File already existing: %s", dev_path)
2507 _ThrowError("Error in file creation: %", str(err))
2509 return FileStorage(unique_id, children, size, params)
2512 class PersistentBlockDevice(BlockDev):
2513 """A block device with persistent node
2515 May be either directly attached, or exposed through DM (e.g. dm-multipath).
2516 udev helpers are probably required to give persistent, human-friendly
2519 For the time being, pathnames are required to lie under /dev.
2522 def __init__(self, unique_id, children, size, params):
2523 """Attaches to a static block device.
2525 The unique_id is a path under /dev.
2528 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2530 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2531 raise ValueError("Invalid configuration data %s" % str(unique_id))
2532 self.dev_path = unique_id[1]
2533 if not os.path.realpath(self.dev_path).startswith("/dev/"):
2534 raise ValueError("Full path '%s' lies outside /dev" %
2535 os.path.realpath(self.dev_path))
2536 # TODO: this is just a safety guard checking that we only deal with devices
2537 # we know how to handle. In the future this will be integrated with
2538 # external storage backends and possible values will probably be collected
2539 # from the cluster configuration.
2540 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2541 raise ValueError("Got persistent block device of invalid type: %s" %
2544 self.major = self.minor = None
2548 def Create(cls, unique_id, children, size, params, excl_stor):
2549 """Create a new device
2551 This is a noop, we only return a PersistentBlockDevice instance
2555 raise errors.ProgrammerError("Persistent block device requested with"
2556 " exclusive_storage")
2557 return PersistentBlockDevice(unique_id, children, 0, params)
2567 def Rename(self, new_id):
2568 """Rename this device.
2571 _ThrowError("Rename is not supported for PersistentBlockDev storage")
2574 """Attach to an existing block device.
2578 self.attached = False
2580 st = os.stat(self.dev_path)
2581 except OSError, err:
2582 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2585 if not stat.S_ISBLK(st.st_mode):
2586 logging.error("%s is not a block device", self.dev_path)
2589 self.major = os.major(st.st_rdev)
2590 self.minor = os.minor(st.st_rdev)
2591 self.attached = True
2596 """Assemble the device.
2602 """Shutdown the device.
2607 def Open(self, force=False):
2608 """Make the device ready for I/O.
2614 """Notifies that the device will no longer be used for I/O.
2619 def Grow(self, amount, dryrun, backingstore):
2620 """Grow the logical volume.
2623 _ThrowError("Grow is not supported for PersistentBlockDev storage")
2626 class RADOSBlockDevice(BlockDev):
2627 """A RADOS Block Device (rbd).
2629 This class implements the RADOS Block Device for the backend. You need
2630 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2631 this to be functional.
2634 def __init__(self, unique_id, children, size, params):
2635 """Attaches to an rbd device.
2638 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2639 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2640 raise ValueError("Invalid configuration data %s" % str(unique_id))
2642 self.driver, self.rbd_name = unique_id
2644 self.major = self.minor = None
2648 def Create(cls, unique_id, children, size, params, excl_stor):
2649 """Create a new rbd device.
2651 Provision a new rbd volume inside a RADOS pool.
2654 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2655 raise errors.ProgrammerError("Invalid configuration data %s" %
2658 raise errors.ProgrammerError("RBD device requested with"
2659 " exclusive_storage")
2660 rbd_pool = params[constants.LDP_POOL]
2661 rbd_name = unique_id[1]
2663 # Provision a new rbd volume (Image) inside the RADOS cluster.
2664 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2665 rbd_name, "--size", "%s" % size]
2666 result = utils.RunCmd(cmd)
2668 _ThrowError("rbd creation failed (%s): %s",
2669 result.fail_reason, result.output)
2671 return RADOSBlockDevice(unique_id, children, size, params)
2674 """Remove the rbd device.
2677 rbd_pool = self.params[constants.LDP_POOL]
2678 rbd_name = self.unique_id[1]
2680 if not self.minor and not self.Attach():
2681 # The rbd device doesn't exist.
2684 # First shutdown the device (remove mappings).
2687 # Remove the actual Volume (Image) from the RADOS cluster.
2688 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2689 result = utils.RunCmd(cmd)
2691 _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2692 result.fail_reason, result.output)
2694 def Rename(self, new_id):
2695 """Rename this device.
2701 """Attach to an existing rbd device.
2703 This method maps the rbd volume that matches our name with
2704 an rbd device and then attaches to this device.
2707 self.attached = False
2709 # Map the rbd volume to a block device under /dev
2710 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2713 st = os.stat(self.dev_path)
2714 except OSError, err:
2715 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2718 if not stat.S_ISBLK(st.st_mode):
2719 logging.error("%s is not a block device", self.dev_path)
2722 self.major = os.major(st.st_rdev)
2723 self.minor = os.minor(st.st_rdev)
2724 self.attached = True
2728 def _MapVolumeToBlockdev(self, unique_id):
2729 """Maps existing rbd volumes to block devices.
2731 This method should be idempotent if the mapping already exists.
2734 @return: the block device path that corresponds to the volume
2737 pool = self.params[constants.LDP_POOL]
2740 # Check if the mapping already exists.
2741 rbd_dev = self._VolumeToBlockdev(pool, name)
2743 # The mapping exists. Return it.
2746 # The mapping doesn't exist. Create it.
2747 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2748 result = utils.RunCmd(map_cmd)
2750 _ThrowError("rbd map failed (%s): %s",
2751 result.fail_reason, result.output)
2753 # Find the corresponding rbd device.
2754 rbd_dev = self._VolumeToBlockdev(pool, name)
2756 _ThrowError("rbd map succeeded, but could not find the rbd block"
2757 " device in output of showmapped, for volume: %s", name)
2759 # The device was successfully mapped. Return it.
2763 def _VolumeToBlockdev(cls, pool, volume_name):
2764 """Do the 'volume name'-to-'rbd block device' resolving.
2767 @param pool: RADOS pool to use
2768 @type volume_name: string
2769 @param volume_name: the name of the volume whose device we search for
2770 @rtype: string or None
2771 @return: block device path if the volume is mapped, else None
2775 # Newer versions of the rbd tool support json output formatting. Use it
2785 result = utils.RunCmd(showmap_cmd)
2787 logging.error("rbd JSON output formatting returned error (%s): %s,"
2788 "falling back to plain output parsing",
2789 result.fail_reason, result.output)
2790 raise RbdShowmappedJsonError
2792 return cls._ParseRbdShowmappedJson(result.output, volume_name)
2793 except RbdShowmappedJsonError:
2794 # For older versions of rbd, we have to parse the plain / text output
2796 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2797 result = utils.RunCmd(showmap_cmd)
2799 _ThrowError("rbd showmapped failed (%s): %s",
2800 result.fail_reason, result.output)
2802 return cls._ParseRbdShowmappedPlain(result.output, volume_name)
2805 def _ParseRbdShowmappedJson(output, volume_name):
2806 """Parse the json output of `rbd showmapped'.
2808 This method parses the json output of `rbd showmapped' and returns the rbd
2809 block device path (e.g. /dev/rbd0) that matches the given rbd volume.
2811 @type output: string
2812 @param output: the json output of `rbd showmapped'
2813 @type volume_name: string
2814 @param volume_name: the name of the volume whose device we search for
2815 @rtype: string or None
2816 @return: block device path if the volume is mapped, else None
2820 devices = serializer.LoadJson(output)
2821 except ValueError, err:
2822 _ThrowError("Unable to parse JSON data: %s" % err)
2825 for d in devices.values(): # pylint: disable=E1103
2829 _ThrowError("'name' key missing from json object %s", devices)
2831 if name == volume_name:
2832 if rbd_dev is not None:
2833 _ThrowError("rbd volume %s is mapped more than once", volume_name)
2835 rbd_dev = d["device"]
2840 def _ParseRbdShowmappedPlain(output, volume_name):
2841 """Parse the (plain / text) output of `rbd showmapped'.
2843 This method parses the output of `rbd showmapped' and returns
2844 the rbd block device path (e.g. /dev/rbd0) that matches the
2847 @type output: string
2848 @param output: the plain text output of `rbd showmapped'
2849 @type volume_name: string
2850 @param volume_name: the name of the volume whose device we search for
2851 @rtype: string or None
2852 @return: block device path if the volume is mapped, else None
2859 lines = output.splitlines()
2861 # Try parsing the new output format (ceph >= 0.55).
2862 splitted_lines = map(lambda l: l.split(), lines)
2864 # Check for empty output.
2865 if not splitted_lines:
2868 # Check showmapped output, to determine number of fields.
2869 field_cnt = len(splitted_lines[0])
2870 if field_cnt != allfields:
2871 # Parsing the new format failed. Fallback to parsing the old output
2873 splitted_lines = map(lambda l: l.split("\t"), lines)
2874 if field_cnt != allfields:
2875 _ThrowError("Cannot parse rbd showmapped output expected %s fields,"
2876 " found %s", allfields, field_cnt)
2879 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2882 if len(matched_lines) > 1:
2883 _ThrowError("rbd volume %s mapped more than once", volume_name)
2886 # rbd block device found. Return it.
2887 rbd_dev = matched_lines[0][devicefield]
2890 # The given volume is not mapped.
2894 """Assemble the device.
2900 """Shutdown the device.
2903 if not self.minor and not self.Attach():
2904 # The rbd device doesn't exist.
2907 # Unmap the block device from the Volume.
2908 self._UnmapVolumeFromBlockdev(self.unique_id)
2911 self.dev_path = None
2913 def _UnmapVolumeFromBlockdev(self, unique_id):
2914 """Unmaps the rbd device from the Volume it is mapped.
2916 Unmaps the rbd device from the Volume it was previously mapped to.
2917 This method should be idempotent if the Volume isn't mapped.
2920 pool = self.params[constants.LDP_POOL]
2923 # Check if the mapping already exists.
2924 rbd_dev = self._VolumeToBlockdev(pool, name)
2927 # The mapping exists. Unmap the rbd device.
2928 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2929 result = utils.RunCmd(unmap_cmd)
2931 _ThrowError("rbd unmap failed (%s): %s",
2932 result.fail_reason, result.output)
2934 def Open(self, force=False):
2935 """Make the device ready for I/O.
2941 """Notifies that the device will no longer be used for I/O.
2946 def Grow(self, amount, dryrun, backingstore):
2949 @type amount: integer
2950 @param amount: the amount (in mebibytes) to grow with
2951 @type dryrun: boolean
2952 @param dryrun: whether to execute the operation in simulation mode
2953 only, without actually increasing the size
2956 if not backingstore:
2958 if not self.Attach():
2959 _ThrowError("Can't attach to rbd device during Grow()")
2962 # the rbd tool does not support dry runs of resize operations.
2963 # Since rbd volumes are thinly provisioned, we assume
2964 # there is always enough free space for the operation.
2967 rbd_pool = self.params[constants.LDP_POOL]
2968 rbd_name = self.unique_id[1]
2969 new_size = self.size + amount
2971 # Resize the rbd volume (Image) inside the RADOS cluster.
2972 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2973 rbd_name, "--size", "%s" % new_size]
2974 result = utils.RunCmd(cmd)
2976 _ThrowError("rbd resize failed (%s): %s",
2977 result.fail_reason, result.output)
2980 class ExtStorageDevice(BlockDev):
2981 """A block device provided by an ExtStorage Provider.
2983 This class implements the External Storage Interface, which means
2984 handling of the externally provided block devices.
2987 def __init__(self, unique_id, children, size, params):
2988 """Attaches to an extstorage block device.
2991 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2992 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2993 raise ValueError("Invalid configuration data %s" % str(unique_id))
2995 self.driver, self.vol_name = unique_id
2996 self.ext_params = params
2998 self.major = self.minor = None
3002 def Create(cls, unique_id, children, size, params, excl_stor):
3003 """Create a new extstorage device.
3005 Provision a new volume using an extstorage provider, which will
3006 then be mapped to a block device.
3009 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
3010 raise errors.ProgrammerError("Invalid configuration data %s" %
3013 raise errors.ProgrammerError("extstorage device requested with"
3014 " exclusive_storage")
3016 # Call the External Storage's create script,
3017 # to provision a new Volume inside the External Storage
3018 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
3021 return ExtStorageDevice(unique_id, children, size, params)
3024 """Remove the extstorage device.
3027 if not self.minor and not self.Attach():
3028 # The extstorage device doesn't exist.
3031 # First shutdown the device (remove mappings).
3034 # Call the External Storage's remove script,
3035 # to remove the Volume from the External Storage
3036 _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
3039 def Rename(self, new_id):
3040 """Rename this device.
3046 """Attach to an existing extstorage device.
3048 This method maps the extstorage volume that matches our name with
3049 a corresponding block device and then attaches to this device.
3052 self.attached = False
3054 # Call the External Storage's attach script,
3055 # to attach an existing Volume to a block device under /dev
3056 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
3057 self.unique_id, self.ext_params)
3060 st = os.stat(self.dev_path)
3061 except OSError, err:
3062 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
3065 if not stat.S_ISBLK(st.st_mode):
3066 logging.error("%s is not a block device", self.dev_path)
3069 self.major = os.major(st.st_rdev)
3070 self.minor = os.minor(st.st_rdev)
3071 self.attached = True
3076 """Assemble the device.
3082 """Shutdown the device.
3085 if not self.minor and not self.Attach():
3086 # The extstorage device doesn't exist.
3089 # Call the External Storage's detach script,
3090 # to detach an existing Volume from it's block device under /dev
3091 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3095 self.dev_path = None
3097 def Open(self, force=False):
3098 """Make the device ready for I/O.
3104 """Notifies that the device will no longer be used for I/O.
3109 def Grow(self, amount, dryrun, backingstore):
3112 @type amount: integer
3113 @param amount: the amount (in mebibytes) to grow with
3114 @type dryrun: boolean
3115 @param dryrun: whether to execute the operation in simulation mode
3116 only, without actually increasing the size
3119 if not backingstore:
3121 if not self.Attach():
3122 _ThrowError("Can't attach to extstorage device during Grow()")
3125 # we do not support dry runs of resize operations for now.
3128 new_size = self.size + amount
3130 # Call the External Storage's grow script,
3131 # to grow an existing Volume inside the External Storage
3132 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3133 self.ext_params, str(self.size), grow=str(new_size))
3135 def SetInfo(self, text):
3136 """Update metadata with info text.
3139 # Replace invalid characters
3140 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3141 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3143 # Only up to 128 characters are allowed
3146 # Call the External Storage's setinfo script,
3147 # to set metadata for an existing Volume inside the External Storage
3148 _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3149 self.ext_params, metadata=text)
3152 def _ExtStorageAction(action, unique_id, ext_params,
3153 size=None, grow=None, metadata=None):
3154 """Take an External Storage action.
3156 Take an External Storage action concerning or affecting
3157 a specific Volume inside the External Storage.
3159 @type action: string
3160 @param action: which action to perform. One of:
3161 create / remove / grow / attach / detach
3162 @type unique_id: tuple (driver, vol_name)
3163 @param unique_id: a tuple containing the type of ExtStorage (driver)
3165 @type ext_params: dict
3166 @param ext_params: ExtStorage parameters
3168 @param size: the size of the Volume in mebibytes
3170 @param grow: the new size in mebibytes (after grow)
3171 @type metadata: string
3172 @param metadata: metadata info of the Volume, for use by the provider
3173 @rtype: None or a block device path (during attach)
3176 driver, vol_name = unique_id
3178 # Create an External Storage instance of type `driver'
3179 status, inst_es = ExtStorageFromDisk(driver)
3181 _ThrowError("%s" % inst_es)
3183 # Create the basic environment for the driver's scripts
3184 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3187 # Do not use log file for action `attach' as we need
3188 # to get the output from RunResult
3189 # TODO: find a way to have a log file for attach too
3191 if action is not constants.ES_ACTION_ATTACH:
3192 logfile = _VolumeLogName(action, driver, vol_name)
3194 # Make sure the given action results in a valid script
3195 if action not in constants.ES_SCRIPTS:
3196 _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3199 # Find out which external script to run according the given action
3200 script_name = action + "_script"
3201 script = getattr(inst_es, script_name)
3203 # Run the external script
3204 result = utils.RunCmd([script], env=create_env,
3205 cwd=inst_es.path, output=logfile,)
3207 logging.error("External storage's %s command '%s' returned"
3208 " error: %s, logfile: %s, output: %s",
3209 action, result.cmd, result.fail_reason,
3210 logfile, result.output)
3212 # If logfile is 'None' (during attach), it breaks TailFile
3213 # TODO: have a log file for attach too
3214 if action is not constants.ES_ACTION_ATTACH:
3215 lines = [utils.SafeEncode(val)
3216 for val in utils.TailFile(logfile, lines=20)]
3218 lines = result.output[-20:]
3220 _ThrowError("External storage's %s script failed (%s), last"
3221 " lines of output:\n%s",
3222 action, result.fail_reason, "\n".join(lines))
3224 if action == constants.ES_ACTION_ATTACH:
3225 return result.stdout
3228 def ExtStorageFromDisk(name, base_dir=None):
3229 """Create an ExtStorage instance from disk.
3231 This function will return an ExtStorage instance
3232 if the given name is a valid ExtStorage name.
3234 @type base_dir: string
3235 @keyword base_dir: Base directory containing ExtStorage installations.
3236 Defaults to a search in all the ES_SEARCH_PATH dirs.
3238 @return: True and the ExtStorage instance if we find a valid one, or
3239 False and the diagnose message on error
3242 if base_dir is None:
3243 es_base_dir = pathutils.ES_SEARCH_PATH
3245 es_base_dir = [base_dir]
3247 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3250 return False, ("Directory for External Storage Provider %s not"
3251 " found in search path" % name)
3253 # ES Files dictionary, we will populate it with the absolute path
3254 # names; if the value is True, then it is a required file, otherwise
3256 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3258 es_files[constants.ES_PARAMETERS_FILE] = True
3260 for (filename, _) in es_files.items():
3261 es_files[filename] = utils.PathJoin(es_dir, filename)
3264 st = os.stat(es_files[filename])
3265 except EnvironmentError, err:
3266 return False, ("File '%s' under path '%s' is missing (%s)" %
3267 (filename, es_dir, utils.ErrnoOrStr(err)))
3269 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3270 return False, ("File '%s' under path '%s' is not a regular file" %
3273 if filename in constants.ES_SCRIPTS:
3274 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3275 return False, ("File '%s' under path '%s' is not executable" %
3279 if constants.ES_PARAMETERS_FILE in es_files:
3280 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3282 parameters = utils.ReadFile(parameters_file).splitlines()
3283 except EnvironmentError, err:
3284 return False, ("Error while reading the EXT parameters file at %s: %s" %
3285 (parameters_file, utils.ErrnoOrStr(err)))
3286 parameters = [v.split(None, 1) for v in parameters]
3289 objects.ExtStorage(name=name, path=es_dir,
3290 create_script=es_files[constants.ES_SCRIPT_CREATE],
3291 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3292 grow_script=es_files[constants.ES_SCRIPT_GROW],
3293 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3294 detach_script=es_files[constants.ES_SCRIPT_DETACH],
3295 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3296 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3297 supported_parameters=parameters)
3301 def _ExtStorageEnvironment(unique_id, ext_params,
3302 size=None, grow=None, metadata=None):
3303 """Calculate the environment for an External Storage script.
3305 @type unique_id: tuple (driver, vol_name)
3306 @param unique_id: ExtStorage pool and name of the Volume
3307 @type ext_params: dict
3308 @param ext_params: the EXT parameters
3310 @param size: size of the Volume (in mebibytes)
3312 @param grow: new size of Volume after grow (in mebibytes)
3313 @type metadata: string
3314 @param metadata: metadata info of the Volume
3316 @return: dict of environment variables
3319 vol_name = unique_id[1]
3322 result["VOL_NAME"] = vol_name
3325 for pname, pvalue in ext_params.items():
3326 result["EXTP_%s" % pname.upper()] = str(pvalue)
3328 if size is not None:
3329 result["VOL_SIZE"] = size
3331 if grow is not None:
3332 result["VOL_NEW_SIZE"] = grow
3334 if metadata is not None:
3335 result["VOL_METADATA"] = metadata
3340 def _VolumeLogName(kind, es_name, volume):
3341 """Compute the ExtStorage log filename for a given Volume and operation.
3344 @param kind: the operation type (e.g. create, remove etc.)
3345 @type es_name: string
3346 @param es_name: the ExtStorage name
3347 @type volume: string
3348 @param volume: the name of the Volume inside the External Storage
3351 # Check if the extstorage log dir is a valid dir
3352 if not os.path.isdir(pathutils.LOG_ES_DIR):
3353 _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3355 # TODO: Use tempfile.mkstemp to create unique filename
3356 base = ("%s-%s-%s-%s.log" %
3357 (kind, es_name, volume, utils.TimestampForFilename()))
3358 return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3362 constants.LD_LV: LogicalVolume,
3363 constants.LD_DRBD8: DRBD8,
3364 constants.LD_BLOCKDEV: PersistentBlockDevice,
3365 constants.LD_RBD: RADOSBlockDevice,
3366 constants.LD_EXT: ExtStorageDevice,
3369 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3370 DEV_MAP[constants.LD_FILE] = FileStorage
3373 def _VerifyDiskType(dev_type):
3374 if dev_type not in DEV_MAP:
3375 raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3378 def _VerifyDiskParams(disk):
3379 """Verifies if all disk parameters are set.
3382 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3384 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3388 def FindDevice(disk, children):
3389 """Search for an existing, assembled device.
3391 This will succeed only if the device exists and is assembled, but it
3392 does not do any actions in order to activate the device.
3394 @type disk: L{objects.Disk}
3395 @param disk: the disk object to find
3396 @type children: list of L{bdev.BlockDev}
3397 @param children: the list of block devices that are children of the device
3398 represented by the disk parameter
3401 _VerifyDiskType(disk.dev_type)
3402 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3404 if not device.attached:
3409 def Assemble(disk, children):
3410 """Try to attach or assemble an existing device.
3412 This will attach to assemble the device, as needed, to bring it
3413 fully up. It must be safe to run on already-assembled devices.
3415 @type disk: L{objects.Disk}
3416 @param disk: the disk object to assemble
3417 @type children: list of L{bdev.BlockDev}
3418 @param children: the list of block devices that are children of the device
3419 represented by the disk parameter
3422 _VerifyDiskType(disk.dev_type)
3423 _VerifyDiskParams(disk)
3424 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3430 def Create(disk, children, excl_stor):
3433 @type disk: L{objects.Disk}
3434 @param disk: the disk object to create
3435 @type children: list of L{bdev.BlockDev}
3436 @param children: the list of block devices that are children of the device
3437 represented by the disk parameter
3438 @type excl_stor: boolean
3439 @param excl_stor: Whether exclusive_storage is active
3442 _VerifyDiskType(disk.dev_type)
3443 _VerifyDiskParams(disk)
3444 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3445 disk.params, excl_stor)