4 # Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Block device abstraction"""
29 import pyparsing as pyp
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import objects
38 from ganeti import compat
39 from ganeti import netutils
40 from ganeti import pathutils
43 # Size of reads in _CanReadDevice
44 _DEVICE_READ_SIZE = 128 * 1024
47 def _IgnoreError(fn, *args, **kwargs):
48 """Executes the given function, ignoring BlockDeviceErrors.
50 This is used in order to simplify the execution of cleanup or
54 @return: True when fn didn't raise an exception, False otherwise
60 except errors.BlockDeviceError, err:
61 logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
65 def _ThrowError(msg, *args):
66 """Log an error to the node daemon and the raise an exception.
69 @param msg: the text of the exception
70 @raise errors.BlockDeviceError
76 raise errors.BlockDeviceError(msg)
79 def _CheckResult(result):
80 """Throws an error if the given result is a failed one.
82 @param result: result from RunCmd
86 _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
90 def _CanReadDevice(path):
91 """Check if we can read from the given device.
93 This tries to read the first 128k of the device.
97 utils.ReadFile(path, size=_DEVICE_READ_SIZE)
99 except EnvironmentError:
100 logging.warning("Can't read from device %s", path, exc_info=True)
104 def _GetForbiddenFileStoragePaths():
105 """Builds a list of path prefixes which shouldn't be used for file storage.
120 for prefix in ["", "/usr", "/usr/local"]:
121 paths.update(map(lambda s: "%s/%s" % (prefix, s),
122 ["bin", "lib", "lib32", "lib64", "sbin"]))
124 return compat.UniqueFrozenset(map(os.path.normpath, paths))
127 def _ComputeWrongFileStoragePaths(paths,
128 _forbidden=_GetForbiddenFileStoragePaths()):
129 """Cross-checks a list of paths for prefixes considered bad.
131 Some paths, e.g. "/bin", should not be used for file storage.
134 @param paths: List of paths to be checked
136 @return: Sorted list of paths for which the user should be warned
140 return (not os.path.isabs(path) or
141 path in _forbidden or
142 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
144 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
147 def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
148 """Returns a list of file storage paths whose prefix is considered bad.
150 See L{_ComputeWrongFileStoragePaths}.
153 return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
156 def _CheckFileStoragePath(path, allowed):
157 """Checks if a path is in a list of allowed paths for file storage.
160 @param path: Path to check
162 @param allowed: List of allowed paths
163 @raise errors.FileStoragePathError: If the path is not allowed
166 if not os.path.isabs(path):
167 raise errors.FileStoragePathError("File storage path must be absolute,"
171 if not os.path.isabs(i):
172 logging.info("Ignoring relative path '%s' for file storage", i)
175 if utils.IsBelowDir(i, path):
178 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
182 def _LoadAllowedFileStoragePaths(filename):
183 """Loads file containing allowed file storage paths.
186 @return: List of allowed paths (can be an empty list)
190 contents = utils.ReadFile(filename)
191 except EnvironmentError:
194 return utils.FilterEmptyLinesAndComments(contents)
197 def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
198 """Checks if a path is allowed for file storage.
201 @param path: Path to check
202 @raise errors.FileStoragePathError: If the path is not allowed
205 allowed = _LoadAllowedFileStoragePaths(_filename)
207 if _ComputeWrongFileStoragePaths([path]):
208 raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
211 _CheckFileStoragePath(path, allowed)
214 class BlockDev(object):
215 """Block device abstract class.
217 A block device can be in the following states:
218 - not existing on the system, and by `Create()` it goes into:
219 - existing but not setup/not active, and by `Assemble()` goes into:
220 - active read-write and by `Open()` it goes into
221 - online (=used, or ready for use)
223 A device can also be online but read-only, however we are not using
224 the readonly state (LV has it, if needed in the future) and we are
225 usually looking at this like at a stack, so it's easier to
226 conceptualise the transition from not-existing to online and back
229 The many different states of the device are due to the fact that we
230 need to cover many device types:
231 - logical volumes are created, lvchange -a y $lv, and used
232 - drbd devices are attached to a local disk/remote peer and made primary
234 A block device is identified by three items:
235 - the /dev path of the device (dynamic)
236 - a unique ID of the device (static)
237 - it's major/minor pair (dynamic)
239 Not all devices implement both the first two as distinct items. LVM
240 logical volumes have their unique ID (the pair volume group, logical
241 volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
242 the /dev path is again dynamic and the unique id is the pair (host1,
243 dev1), (host2, dev2).
245 You can get to a device in two ways:
246 - creating the (real) device, which returns you
247 an attached instance (lvcreate)
248 - attaching of a python instance to an existing (real) device
250 The second point, the attachement to a device, is different
251 depending on whether the device is assembled or not. At init() time,
252 we search for a device with the same unique_id as us. If found,
253 good. It also means that the device is already assembled. If not,
254 after assembly we'll have our correct major/minor.
257 def __init__(self, unique_id, children, size, params):
258 self._children = children
260 self.unique_id = unique_id
263 self.attached = False
268 """Assemble the device from its components.
270 Implementations of this method by child classes must ensure that:
271 - after the device has been assembled, it knows its major/minor
272 numbers; this allows other devices (usually parents) to probe
273 correctly for their children
274 - calling this method on an existing, in-use device is safe
275 - if the device is already configured (and in an OK state),
276 this method is idempotent
282 """Find a device which matches our config and attach to it.
285 raise NotImplementedError
288 """Notifies that the device will no longer be used for I/O.
291 raise NotImplementedError
294 def Create(cls, unique_id, children, size, params, excl_stor):
295 """Create the device.
297 If the device cannot be created, it will return None
298 instead. Error messages go to the logging system.
300 Note that for some devices, the unique_id is used, and for other,
301 the children. The idea is that these two, taken together, are
302 enough for both creation and assembly (later).
305 raise NotImplementedError
308 """Remove this device.
310 This makes sense only for some of the device types: LV and file
311 storage. Also note that if the device can't attach, the removal
315 raise NotImplementedError
317 def Rename(self, new_id):
318 """Rename this device.
320 This may or may not make sense for a given device type.
323 raise NotImplementedError
325 def Open(self, force=False):
326 """Make the device ready for use.
328 This makes the device ready for I/O. For now, just the DRBD
331 The force parameter signifies that if the device has any kind of
332 --force thing, it should be used, we know what we are doing.
335 raise NotImplementedError
338 """Shut down the device, freeing its children.
340 This undoes the `Assemble()` work, except for the child
341 assembling; as such, the children on the device are still
342 assembled after this call.
345 raise NotImplementedError
347 def SetSyncParams(self, params):
348 """Adjust the synchronization parameters of the mirror.
350 In case this is not a mirroring device, this is no-op.
352 @param params: dictionary of LD level disk parameters related to the
355 @return: a list of error messages, emitted both by the current node and by
356 children. An empty list means no errors.
361 for child in self._children:
362 result.extend(child.SetSyncParams(params))
365 def PauseResumeSync(self, pause):
366 """Pause/Resume the sync of the mirror.
368 In case this is not a mirroring device, this is no-op.
370 @param pause: Whether to pause or resume
375 for child in self._children:
376 result = result and child.PauseResumeSync(pause)
379 def GetSyncStatus(self):
380 """Returns the sync status of the device.
382 If this device is a mirroring device, this function returns the
383 status of the mirror.
385 If sync_percent is None, it means the device is not syncing.
387 If estimated_time is None, it means we can't estimate
388 the time needed, otherwise it's the time left in seconds.
390 If is_degraded is True, it means the device is missing
391 redundancy. This is usually a sign that something went wrong in
392 the device setup, if sync_percent is None.
394 The ldisk parameter represents the degradation of the local
395 data. This is only valid for some devices, the rest will always
396 return False (not degraded).
398 @rtype: objects.BlockDevStatus
401 return objects.BlockDevStatus(dev_path=self.dev_path,
407 ldisk_status=constants.LDS_OKAY)
409 def CombinedSyncStatus(self):
410 """Calculate the mirror status recursively for our children.
412 The return value is the same as for `GetSyncStatus()` except the
413 minimum percent and maximum time are calculated across our
416 @rtype: objects.BlockDevStatus
419 status = self.GetSyncStatus()
421 min_percent = status.sync_percent
422 max_time = status.estimated_time
423 is_degraded = status.is_degraded
424 ldisk_status = status.ldisk_status
427 for child in self._children:
428 child_status = child.GetSyncStatus()
430 if min_percent is None:
431 min_percent = child_status.sync_percent
432 elif child_status.sync_percent is not None:
433 min_percent = min(min_percent, child_status.sync_percent)
436 max_time = child_status.estimated_time
437 elif child_status.estimated_time is not None:
438 max_time = max(max_time, child_status.estimated_time)
440 is_degraded = is_degraded or child_status.is_degraded
442 if ldisk_status is None:
443 ldisk_status = child_status.ldisk_status
444 elif child_status.ldisk_status is not None:
445 ldisk_status = max(ldisk_status, child_status.ldisk_status)
447 return objects.BlockDevStatus(dev_path=self.dev_path,
450 sync_percent=min_percent,
451 estimated_time=max_time,
452 is_degraded=is_degraded,
453 ldisk_status=ldisk_status)
455 def SetInfo(self, text):
456 """Update metadata with info text.
458 Only supported for some device types.
461 for child in self._children:
464 def Grow(self, amount, dryrun, backingstore):
465 """Grow the block device.
467 @type amount: integer
468 @param amount: the amount (in mebibytes) to grow with
469 @type dryrun: boolean
470 @param dryrun: whether to execute the operation in simulation mode
471 only, without actually increasing the size
472 @param backingstore: whether to execute the operation on backing storage
473 only, or on "logical" storage only; e.g. DRBD is logical storage,
474 whereas LVM, file, RBD are backing storage
477 raise NotImplementedError
479 def GetActualSize(self):
480 """Return the actual disk size.
482 @note: the device needs to be active when this is called
485 assert self.attached, "BlockDevice not attached in GetActualSize()"
486 result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
488 _ThrowError("blockdev failed (%s): %s",
489 result.fail_reason, result.output)
491 sz = int(result.output.strip())
492 except (ValueError, TypeError), err:
493 _ThrowError("Failed to parse blockdev output: %s", str(err))
497 return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
498 (self.__class__, self.unique_id, self._children,
499 self.major, self.minor, self.dev_path))
502 class LogicalVolume(BlockDev):
503 """Logical Volume block device.
506 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
507 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
508 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
510 def __init__(self, unique_id, children, size, params):
511 """Attaches to a LV device.
513 The unique_id is a tuple (vg_name, lv_name)
516 super(LogicalVolume, self).__init__(unique_id, children, size, params)
517 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
518 raise ValueError("Invalid configuration data %s" % str(unique_id))
519 self._vg_name, self._lv_name = unique_id
520 self._ValidateName(self._vg_name)
521 self._ValidateName(self._lv_name)
522 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
523 self._degraded = True
524 self.major = self.minor = self.pe_size = self.stripe_count = None
528 def _GetStdPvSize(pvs_info):
529 """Return the the standard PV size (used with exclusive storage).
531 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
536 assert len(pvs_info) > 0
537 smallest = min([pv.size for pv in pvs_info])
538 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
541 def _ComputeNumPvs(size, pvs_info):
542 """Compute the number of PVs needed for an LV (with exclusive storage).
545 @param size: LV size in MiB
546 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
548 @return: number of PVs needed
550 assert len(pvs_info) > 0
551 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
552 return int(math.ceil(float(size) / pv_size))
555 def _GetEmptyPvNames(pvs_info, max_pvs=None):
556 """Return a list of empty PVs, by name.
559 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
560 if max_pvs is not None:
561 empty_pvs = empty_pvs[:max_pvs]
562 return map((lambda pv: pv.name), empty_pvs)
565 def Create(cls, unique_id, children, size, params, excl_stor):
566 """Create a new logical volume.
569 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
570 raise errors.ProgrammerError("Invalid configuration data %s" %
572 vg_name, lv_name = unique_id
573 cls._ValidateName(vg_name)
574 cls._ValidateName(lv_name)
575 pvs_info = cls.GetPVInfo([vg_name])
578 msg = "No (empty) PVs found"
580 msg = "Can't compute PV info for vg %s" % vg_name
582 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
584 pvlist = [pv.name for pv in pvs_info]
585 if compat.any(":" in v for v in pvlist):
586 _ThrowError("Some of your PVs have the invalid character ':' in their"
587 " name, this is not supported - please filter them out"
588 " in lvm.conf using either 'filter' or 'preferred_names'")
590 current_pvs = len(pvlist)
591 desired_stripes = params[constants.LDP_STRIPES]
592 stripes = min(current_pvs, desired_stripes)
595 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
599 req_pvs = cls._ComputeNumPvs(size, pvs_info)
600 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
601 current_pvs = len(pvlist)
602 if current_pvs < req_pvs:
603 _ThrowError("Not enough empty PVs to create a disk of %d MB:"
604 " %d available, %d needed", size, current_pvs, req_pvs)
605 assert current_pvs == len(pvlist)
606 if stripes > current_pvs:
607 # No warning issued for this, as it's no surprise
608 stripes = current_pvs
611 if stripes < desired_stripes:
612 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
613 " available.", desired_stripes, vg_name, current_pvs)
614 free_size = sum([pv.free for pv in pvs_info])
615 # The size constraint should have been checked from the master before
616 # calling the create function.
618 _ThrowError("Not enough free space: required %s,"
619 " available %s", size, free_size)
621 # If the free space is not well distributed, we won't be able to
622 # create an optimally-striped volume; in that case, we want to try
623 # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
625 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
626 for stripes_arg in range(stripes, 0, -1):
627 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
628 if not result.failed:
631 _ThrowError("LV create failed (%s): %s",
632 result.fail_reason, result.output)
633 return LogicalVolume(unique_id, children, size, params)
636 def _GetVolumeInfo(lvm_cmd, fields):
637 """Returns LVM Volumen infos using lvm_cmd
639 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
640 @param fields: Fields to return
641 @return: A list of dicts each with the parsed fields
645 raise errors.ProgrammerError("No fields specified")
648 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
649 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
651 result = utils.RunCmd(cmd)
653 raise errors.CommandError("Can't get the volume information: %s - %s" %
654 (result.fail_reason, result.output))
657 for line in result.stdout.splitlines():
658 splitted_fields = line.strip().split(sep)
660 if len(fields) != len(splitted_fields):
661 raise errors.CommandError("Can't parse %s output: line '%s'" %
664 data.append(splitted_fields)
669 def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
670 """Get the free space info for PVs in a volume group.
672 @param vg_names: list of volume group names, if empty all will be returned
673 @param filter_allocatable: whether to skip over unallocatable PVs
674 @param include_lvs: whether to include a list of LVs hosted on each PV
677 @return: list of objects.LvmPvInfo objects
680 # We request "lv_name" field only if we care about LVs, so we don't get
681 # a long list of entries with many duplicates unless we really have to.
682 # The duplicate "pv_name" field will be ignored.
688 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
689 "pv_attr", "pv_size", lvfield])
690 except errors.GenericError, err:
691 logging.error("Can't get PV information: %s", err)
694 # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
695 # pair. We sort entries by PV name and then LV name, so it's easy to weed
698 info.sort(key=(lambda i: (i[0], i[5])))
701 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
702 # (possibly) skip over pvs which are not allocatable
703 if filter_allocatable and pv_attr[0] != "a":
705 # (possibly) skip over pvs which are not in the right volume group(s)
706 if vg_names and vg_name not in vg_names:
708 # Beware of duplicates (check before inserting)
709 if lastpvi and lastpvi.name == pv_name:
710 if include_lvs and lv_name:
711 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
712 lastpvi.lv_list.append(lv_name)
714 if include_lvs and lv_name:
718 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
719 size=float(pv_size), free=float(pv_free),
720 attributes=pv_attr, lv_list=lvl)
726 def _GetExclusiveStorageVgFree(cls, vg_name):
727 """Return the free disk space in the given VG, in exclusive storage mode.
729 @type vg_name: string
730 @param vg_name: VG name
732 @return: free space in MiB
734 pvs_info = cls.GetPVInfo([vg_name])
737 pv_size = cls._GetStdPvSize(pvs_info)
738 num_pvs = len(cls._GetEmptyPvNames(pvs_info))
739 return pv_size * num_pvs
742 def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
743 """Get the free space info for specific VGs.
745 @param vg_names: list of volume group names, if empty all will be returned
746 @param excl_stor: whether exclusive_storage is enabled
747 @param filter_readonly: whether to skip over readonly VGs
750 @return: list of tuples (free_space, total_size, name) with free_space in
755 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
757 except errors.GenericError, err:
758 logging.error("Can't get VG information: %s", err)
762 for vg_name, vg_free, vg_attr, vg_size in info:
763 # (possibly) skip over vgs which are not writable
764 if filter_readonly and vg_attr[0] == "r":
766 # (possibly) skip over vgs which are not in the right volume group(s)
767 if vg_names and vg_name not in vg_names:
769 # Exclusive storage needs a different concept of free space
771 es_free = cls._GetExclusiveStorageVgFree(vg_name)
772 assert es_free <= vg_free
774 data.append((float(vg_free), float(vg_size), vg_name))
779 def _ValidateName(cls, name):
780 """Validates that a given name is valid as VG or LV name.
782 The list of valid characters and restricted names is taken out of
783 the lvm(8) manpage, with the simplification that we enforce both
784 VG and LV restrictions on the names.
787 if (not cls._VALID_NAME_RE.match(name) or
788 name in cls._INVALID_NAMES or
789 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
790 _ThrowError("Invalid LVM name '%s'", name)
793 """Remove this logical volume.
796 if not self.minor and not self.Attach():
797 # the LV does not exist
799 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
800 (self._vg_name, self._lv_name)])
802 _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
804 def Rename(self, new_id):
805 """Rename this logical volume.
808 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
809 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
810 new_vg, new_name = new_id
811 if new_vg != self._vg_name:
812 raise errors.ProgrammerError("Can't move a logical volume across"
813 " volume groups (from %s to to %s)" %
814 (self._vg_name, new_vg))
815 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
817 _ThrowError("Failed to rename the logical volume: %s", result.output)
818 self._lv_name = new_name
819 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
822 """Attach to an existing LV.
824 This method will try to see if an existing and active LV exists
825 which matches our name. If so, its major/minor will be
829 self.attached = False
830 result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
831 "--units=m", "--nosuffix",
832 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
833 "vg_extent_size,stripes", self.dev_path])
835 logging.error("Can't find LV %s: %s, %s",
836 self.dev_path, result.fail_reason, result.output)
838 # the output can (and will) have multiple lines for multi-segment
839 # LVs, as the 'stripes' parameter is a segment one, so we take
840 # only the last entry, which is the one we're interested in; note
841 # that with LVM2 anyway the 'stripes' value must be constant
842 # across segments, so this is a no-op actually
843 out = result.stdout.splitlines()
844 if not out: # totally empty result? splitlines() returns at least
845 # one line for any non-empty string
846 logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
848 out = out[-1].strip().rstrip(",")
851 logging.error("Can't parse LVS output, len(%s) != 5", str(out))
854 status, major, minor, pe_size, stripes = out
856 logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
862 except (TypeError, ValueError), err:
863 logging.error("lvs major/minor cannot be parsed: %s", str(err))
866 pe_size = int(float(pe_size))
867 except (TypeError, ValueError), err:
868 logging.error("Can't parse vg extent size: %s", err)
872 stripes = int(stripes)
873 except (TypeError, ValueError), err:
874 logging.error("Can't parse the number of stripes: %s", err)
879 self.pe_size = pe_size
880 self.stripe_count = stripes
881 self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
887 """Assemble the device.
889 We always run `lvchange -ay` on the LV to ensure it's active before
890 use, as there were cases when xenvg was not active after boot
891 (also possibly after disk issues).
894 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
896 _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
899 """Shutdown the device.
901 This is a no-op for the LV device type, as we don't deactivate the
907 def GetSyncStatus(self):
908 """Returns the sync status of the device.
910 If this device is a mirroring device, this function returns the
911 status of the mirror.
913 For logical volumes, sync_percent and estimated_time are always
914 None (no recovery in progress, as we don't handle the mirrored LV
915 case). The is_degraded parameter is the inverse of the ldisk
918 For the ldisk parameter, we check if the logical volume has the
919 'virtual' type, which means it's not backed by existing storage
920 anymore (read from it return I/O error). This happens after a
921 physical disk failure and subsequent 'vgreduce --removemissing' on
924 The status was already read in Attach, so we just return it.
926 @rtype: objects.BlockDevStatus
930 ldisk_status = constants.LDS_FAULTY
932 ldisk_status = constants.LDS_OKAY
934 return objects.BlockDevStatus(dev_path=self.dev_path,
939 is_degraded=self._degraded,
940 ldisk_status=ldisk_status)
942 def Open(self, force=False):
943 """Make the device ready for I/O.
945 This is a no-op for the LV device type.
951 """Notifies that the device will no longer be used for I/O.
953 This is a no-op for the LV device type.
958 def Snapshot(self, size):
959 """Create a snapshot copy of an lvm block device.
961 @returns: tuple (vg, lv)
964 snap_name = self._lv_name + ".snap"
966 # remove existing snapshot if found
967 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
968 _IgnoreError(snap.Remove)
970 vg_info = self.GetVGInfo([self._vg_name], False)
972 _ThrowError("Can't compute VG info for vg %s", self._vg_name)
973 free_size, _, _ = vg_info[0]
975 _ThrowError("Not enough free space: required %s,"
976 " available %s", size, free_size)
978 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
979 "-n%s" % snap_name, self.dev_path]))
981 return (self._vg_name, snap_name)
983 def _RemoveOldInfo(self):
984 """Try to remove old tags from the lv.
987 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
991 raw_tags = result.stdout.strip()
993 for tag in raw_tags.split(","):
994 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
995 tag.strip(), self.dev_path]))
997 def SetInfo(self, text):
998 """Update metadata with info text.
1001 BlockDev.SetInfo(self, text)
1003 self._RemoveOldInfo()
1005 # Replace invalid characters
1006 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1007 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1009 # Only up to 128 characters are allowed
1012 _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1014 def Grow(self, amount, dryrun, backingstore):
1015 """Grow the logical volume.
1018 if not backingstore:
1020 if self.pe_size is None or self.stripe_count is None:
1021 if not self.Attach():
1022 _ThrowError("Can't attach to LV during Grow()")
1023 full_stripe_size = self.pe_size * self.stripe_count
1024 rest = amount % full_stripe_size
1026 amount += full_stripe_size - rest
1027 cmd = ["lvextend", "-L", "+%dm" % amount]
1029 cmd.append("--test")
1030 # we try multiple algorithms since the 'best' ones might not have
1031 # space available in the right place, but later ones might (since
1032 # they have less constraints); also note that only recent LVM
1034 for alloc_policy in "contiguous", "cling", "normal":
1035 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1036 if not result.failed:
1038 _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1041 class DRBD8Status(object):
1042 """A DRBD status representation class.
1044 Note that this doesn't support unconfigured devices (cs:Unconfigured).
1047 UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1048 LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1049 "\s+ds:([^/]+)/(\S+)\s+.*$")
1050 SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1051 # Due to a bug in drbd in the kernel, introduced in
1052 # commit 4b0715f096 (still unfixed as of 2011-08-22)
1054 "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1056 CS_UNCONFIGURED = "Unconfigured"
1057 CS_STANDALONE = "StandAlone"
1058 CS_WFCONNECTION = "WFConnection"
1059 CS_WFREPORTPARAMS = "WFReportParams"
1060 CS_CONNECTED = "Connected"
1061 CS_STARTINGSYNCS = "StartingSyncS"
1062 CS_STARTINGSYNCT = "StartingSyncT"
1063 CS_WFBITMAPS = "WFBitMapS"
1064 CS_WFBITMAPT = "WFBitMapT"
1065 CS_WFSYNCUUID = "WFSyncUUID"
1066 CS_SYNCSOURCE = "SyncSource"
1067 CS_SYNCTARGET = "SyncTarget"
1068 CS_PAUSEDSYNCS = "PausedSyncS"
1069 CS_PAUSEDSYNCT = "PausedSyncT"
1070 CSET_SYNC = compat.UniqueFrozenset([
1083 DS_DISKLESS = "Diskless"
1084 DS_ATTACHING = "Attaching" # transient state
1085 DS_FAILED = "Failed" # transient state, next: diskless
1086 DS_NEGOTIATING = "Negotiating" # transient state
1087 DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1088 DS_OUTDATED = "Outdated"
1089 DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1090 DS_CONSISTENT = "Consistent"
1091 DS_UPTODATE = "UpToDate" # normal state
1093 RO_PRIMARY = "Primary"
1094 RO_SECONDARY = "Secondary"
1095 RO_UNKNOWN = "Unknown"
1097 def __init__(self, procline):
1098 u = self.UNCONF_RE.match(procline)
1100 self.cstatus = self.CS_UNCONFIGURED
1101 self.lrole = self.rrole = self.ldisk = self.rdisk = None
1103 m = self.LINE_RE.match(procline)
1105 raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1106 self.cstatus = m.group(1)
1107 self.lrole = m.group(2)
1108 self.rrole = m.group(3)
1109 self.ldisk = m.group(4)
1110 self.rdisk = m.group(5)
1112 # end reading of data from the LINE_RE or UNCONF_RE
1114 self.is_standalone = self.cstatus == self.CS_STANDALONE
1115 self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1116 self.is_connected = self.cstatus == self.CS_CONNECTED
1117 self.is_primary = self.lrole == self.RO_PRIMARY
1118 self.is_secondary = self.lrole == self.RO_SECONDARY
1119 self.peer_primary = self.rrole == self.RO_PRIMARY
1120 self.peer_secondary = self.rrole == self.RO_SECONDARY
1121 self.both_primary = self.is_primary and self.peer_primary
1122 self.both_secondary = self.is_secondary and self.peer_secondary
1124 self.is_diskless = self.ldisk == self.DS_DISKLESS
1125 self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1127 self.is_in_resync = self.cstatus in self.CSET_SYNC
1128 self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1130 m = self.SYNC_RE.match(procline)
1132 self.sync_percent = float(m.group(1))
1133 hours = int(m.group(2))
1134 minutes = int(m.group(3))
1135 seconds = int(m.group(4))
1136 self.est_time = hours * 3600 + minutes * 60 + seconds
1138 # we have (in this if branch) no percent information, but if
1139 # we're resyncing we need to 'fake' a sync percent information,
1140 # as this is how cmdlib determines if it makes sense to wait for
1142 if self.is_in_resync:
1143 self.sync_percent = 0
1145 self.sync_percent = None
1146 self.est_time = None
1149 class BaseDRBD(BlockDev): # pylint: disable=W0223
1152 This class contains a few bits of common functionality between the
1153 0.7 and 8.x versions of DRBD.
1156 _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1157 r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1158 _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1159 _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1162 _ST_UNCONFIGURED = "Unconfigured"
1163 _ST_WFCONNECTION = "WFConnection"
1164 _ST_CONNECTED = "Connected"
1166 _STATUS_FILE = constants.DRBD_STATUS_FILE
1167 _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1170 def _GetProcData(filename=_STATUS_FILE):
1171 """Return data from /proc/drbd.
1175 data = utils.ReadFile(filename).splitlines()
1176 except EnvironmentError, err:
1177 if err.errno == errno.ENOENT:
1178 _ThrowError("The file %s cannot be opened, check if the module"
1179 " is loaded (%s)", filename, str(err))
1181 _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1183 _ThrowError("Can't read any data from %s", filename)
1187 def _MassageProcData(cls, data):
1188 """Transform the output of _GetProdData into a nicer form.
1190 @return: a dictionary of minor: joined lines from /proc/drbd
1195 old_minor = old_line = None
1197 if not line: # completely empty lines, as can be returned by drbd8.0+
1199 lresult = cls._VALID_LINE_RE.match(line)
1200 if lresult is not None:
1201 if old_minor is not None:
1202 results[old_minor] = old_line
1203 old_minor = int(lresult.group(1))
1206 if old_minor is not None:
1207 old_line += " " + line.strip()
1209 if old_minor is not None:
1210 results[old_minor] = old_line
1214 def _GetVersion(cls, proc_data):
1215 """Return the DRBD version.
1217 This will return a dict with keys:
1223 - proto2 (only on drbd > 8.2.X)
1226 first_line = proc_data[0].strip()
1227 version = cls._VERSION_RE.match(first_line)
1229 raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1232 values = version.groups()
1234 "k_major": int(values[0]),
1235 "k_minor": int(values[1]),
1236 "k_point": int(values[2]),
1237 "api": int(values[3]),
1238 "proto": int(values[4]),
1240 if values[5] is not None:
1241 retval["proto2"] = values[5]
1246 def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1247 """Returns DRBD usermode_helper currently set.
1251 helper = utils.ReadFile(filename).splitlines()[0]
1252 except EnvironmentError, err:
1253 if err.errno == errno.ENOENT:
1254 _ThrowError("The file %s cannot be opened, check if the module"
1255 " is loaded (%s)", filename, str(err))
1257 _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1259 _ThrowError("Can't read any data from %s", filename)
1263 def _DevPath(minor):
1264 """Return the path to a drbd device for a given minor.
1267 return "/dev/drbd%d" % minor
1270 def GetUsedDevs(cls):
1271 """Compute the list of used DRBD devices.
1274 data = cls._GetProcData()
1278 match = cls._VALID_LINE_RE.match(line)
1281 minor = int(match.group(1))
1282 state = match.group(2)
1283 if state == cls._ST_UNCONFIGURED:
1285 used_devs[minor] = state, line
1289 def _SetFromMinor(self, minor):
1290 """Set our parameters based on the given minor.
1292 This sets our minor variable and our dev_path.
1296 self.minor = self.dev_path = None
1297 self.attached = False
1300 self.dev_path = self._DevPath(minor)
1301 self.attached = True
1304 def _CheckMetaSize(meta_device):
1305 """Check if the given meta device looks like a valid one.
1307 This currently only checks the size, which must be around
1311 result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1313 _ThrowError("Failed to get device size: %s - %s",
1314 result.fail_reason, result.output)
1316 sectors = int(result.stdout)
1317 except (TypeError, ValueError):
1318 _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1319 num_bytes = sectors * 512
1320 if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1321 _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1322 # the maximum *valid* size of the meta device when living on top
1323 # of LVM is hard to compute: it depends on the number of stripes
1324 # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1325 # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1326 # size meta device; as such, we restrict it to 1GB (a little bit
1327 # too generous, but making assumptions about PE size is hard)
1328 if num_bytes > 1024 * 1024 * 1024:
1329 _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1331 def Rename(self, new_id):
1334 This is not supported for drbd devices.
1337 raise errors.ProgrammerError("Can't rename a drbd device")
1340 class DRBD8(BaseDRBD):
1341 """DRBD v8.x block device.
1343 This implements the local host part of the DRBD device, i.e. it
1344 doesn't do anything to the supposed peer. If you need a fully
1345 connected DRBD pair, you need to use this class on both hosts.
1347 The unique_id for the drbd device is a (local_ip, local_port,
1348 remote_ip, remote_port, local_minor, secret) tuple, and it must have
1349 two children: the data device and the meta_device. The meta device
1350 is checked for valid size and is zeroed on create.
1357 _NET_RECONFIG_TIMEOUT = 60
1359 # command line options for barriers
1360 _DISABLE_DISK_OPTION = "--no-disk-barrier" # -a
1361 _DISABLE_DRAIN_OPTION = "--no-disk-drain" # -D
1362 _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1363 _DISABLE_META_FLUSH_OPTION = "--no-md-flushes" # -m
1365 def __init__(self, unique_id, children, size, params):
1366 if children and children.count(None) > 0:
1368 if len(children) not in (0, 2):
1369 raise ValueError("Invalid configuration data %s" % str(children))
1370 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1371 raise ValueError("Invalid configuration data %s" % str(unique_id))
1372 (self._lhost, self._lport,
1373 self._rhost, self._rport,
1374 self._aminor, self._secret) = unique_id
1376 if not _CanReadDevice(children[1].dev_path):
1377 logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1379 super(DRBD8, self).__init__(unique_id, children, size, params)
1380 self.major = self._DRBD_MAJOR
1381 version = self._GetVersion(self._GetProcData())
1382 if version["k_major"] != 8:
1383 _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1384 " usage: kernel is %s.%s, ganeti wants 8.x",
1385 version["k_major"], version["k_minor"])
1387 if (self._lhost is not None and self._lhost == self._rhost and
1388 self._lport == self._rport):
1389 raise ValueError("Invalid configuration data, same local/remote %s" %
1394 def _InitMeta(cls, minor, dev_path):
1395 """Initialize a meta device.
1397 This will not work if the given minor is in use.
1400 # Zero the metadata first, in order to make sure drbdmeta doesn't
1401 # try to auto-detect existing filesystems or similar (see
1402 # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1403 # care about the first 128MB of data in the device, even though it
1405 result = utils.RunCmd([constants.DD_CMD,
1406 "if=/dev/zero", "of=%s" % dev_path,
1407 "bs=1048576", "count=128", "oflag=direct"])
1409 _ThrowError("Can't wipe the meta device: %s", result.output)
1411 result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1412 "v08", dev_path, "0", "create-md"])
1414 _ThrowError("Can't initialize meta device: %s", result.output)
1417 def _FindUnusedMinor(cls):
1418 """Find an unused DRBD device.
1420 This is specific to 8.x as the minors are allocated dynamically,
1421 so non-existing numbers up to a max minor count are actually free.
1424 data = cls._GetProcData()
1428 match = cls._UNUSED_LINE_RE.match(line)
1430 return int(match.group(1))
1431 match = cls._VALID_LINE_RE.match(line)
1433 minor = int(match.group(1))
1434 highest = max(highest, minor)
1435 if highest is None: # there are no minors in use at all
1437 if highest >= cls._MAX_MINORS:
1438 logging.error("Error: no free drbd minors!")
1439 raise errors.BlockDeviceError("Can't find a free DRBD minor")
1443 def _GetShowParser(cls):
1444 """Return a parser for `drbd show` output.
1446 This will either create or return an already-created parser for the
1447 output of the command `drbd show`.
1450 if cls._PARSE_SHOW is not None:
1451 return cls._PARSE_SHOW
1454 lbrace = pyp.Literal("{").suppress()
1455 rbrace = pyp.Literal("}").suppress()
1456 lbracket = pyp.Literal("[").suppress()
1457 rbracket = pyp.Literal("]").suppress()
1458 semi = pyp.Literal(";").suppress()
1459 colon = pyp.Literal(":").suppress()
1460 # this also converts the value to an int
1461 number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1463 comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1464 defa = pyp.Literal("_is_default").suppress()
1465 dbl_quote = pyp.Literal('"').suppress()
1467 keyword = pyp.Word(pyp.alphanums + "-")
1470 value = pyp.Word(pyp.alphanums + "_-/.:")
1471 quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1472 ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1473 pyp.Word(pyp.nums + ".") + colon + number)
1474 ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1475 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1476 pyp.Optional(rbracket) + colon + number)
1477 # meta device, extended syntax
1478 meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1479 # device name, extended syntax
1480 device_value = pyp.Literal("minor").suppress() + number
1483 stmt = (~rbrace + keyword + ~lbrace +
1484 pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1486 pyp.Optional(defa) + semi +
1487 pyp.Optional(pyp.restOfLine).suppress())
1490 section_name = pyp.Word(pyp.alphas + "_")
1491 section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1493 bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1496 cls._PARSE_SHOW = bnf
1501 def _GetShowData(cls, minor):
1502 """Return the `drbdsetup show` data for a minor.
1505 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1507 logging.error("Can't display the drbd config: %s - %s",
1508 result.fail_reason, result.output)
1510 return result.stdout
1513 def _GetDevInfo(cls, out):
1514 """Parse details about a given DRBD minor.
1516 This return, if available, the local backing device (as a path)
1517 and the local and remote (ip, port) information from a string
1518 containing the output of the `drbdsetup show` command as returned
1526 bnf = cls._GetShowParser()
1530 results = bnf.parseString(out)
1531 except pyp.ParseException, err:
1532 _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1534 # and massage the results into our desired format
1535 for section in results:
1537 if sname == "_this_host":
1538 for lst in section[1:]:
1539 if lst[0] == "disk":
1540 data["local_dev"] = lst[1]
1541 elif lst[0] == "meta-disk":
1542 data["meta_dev"] = lst[1]
1543 data["meta_index"] = lst[2]
1544 elif lst[0] == "address":
1545 data["local_addr"] = tuple(lst[1:])
1546 elif sname == "_remote_host":
1547 for lst in section[1:]:
1548 if lst[0] == "address":
1549 data["remote_addr"] = tuple(lst[1:])
1552 def _MatchesLocal(self, info):
1553 """Test if our local config matches with an existing device.
1555 The parameter should be as returned from `_GetDevInfo()`. This
1556 method tests if our local backing device is the same as the one in
1557 the info parameter, in effect testing if we look like the given
1562 backend, meta = self._children
1564 backend = meta = None
1566 if backend is not None:
1567 retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1569 retval = ("local_dev" not in info)
1571 if meta is not None:
1572 retval = retval and ("meta_dev" in info and
1573 info["meta_dev"] == meta.dev_path)
1574 retval = retval and ("meta_index" in info and
1575 info["meta_index"] == 0)
1577 retval = retval and ("meta_dev" not in info and
1578 "meta_index" not in info)
1581 def _MatchesNet(self, info):
1582 """Test if our network config matches with an existing device.
1584 The parameter should be as returned from `_GetDevInfo()`. This
1585 method tests if our network configuration is the same as the one
1586 in the info parameter, in effect testing if we look like the given
1590 if (((self._lhost is None and not ("local_addr" in info)) and
1591 (self._rhost is None and not ("remote_addr" in info)))):
1594 if self._lhost is None:
1597 if not ("local_addr" in info and
1598 "remote_addr" in info):
1601 retval = (info["local_addr"] == (self._lhost, self._lport))
1602 retval = (retval and
1603 info["remote_addr"] == (self._rhost, self._rport))
1606 def _AssembleLocal(self, minor, backend, meta, size):
1607 """Configure the local part of a DRBD device.
1610 args = ["drbdsetup", self._DevPath(minor), "disk",
1615 args.extend(["-d", "%sm" % size])
1617 version = self._GetVersion(self._GetProcData())
1618 vmaj = version["k_major"]
1619 vmin = version["k_minor"]
1620 vrel = version["k_point"]
1623 self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1624 self.params[constants.LDP_BARRIERS],
1625 self.params[constants.LDP_NO_META_FLUSH])
1626 args.extend(barrier_args)
1628 if self.params[constants.LDP_DISK_CUSTOM]:
1629 args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1631 result = utils.RunCmd(args)
1633 _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1636 def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1637 disable_meta_flush):
1638 """Compute the DRBD command line parameters for disk barriers
1640 Returns a list of the disk barrier parameters as requested via the
1641 disabled_barriers and disable_meta_flush arguments, and according to the
1642 supported ones in the DRBD version vmaj.vmin.vrel
1644 If the desired option is unsupported, raises errors.BlockDeviceError.
1647 disabled_barriers_set = frozenset(disabled_barriers)
1648 if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1649 raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1650 " barriers" % disabled_barriers)
1654 # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1656 if not vmaj == 8 and vmin in (0, 2, 3):
1657 raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1660 def _AppendOrRaise(option, min_version):
1661 """Helper for DRBD options"""
1662 if min_version is not None and vrel >= min_version:
1665 raise errors.BlockDeviceError("Could not use the option %s as the"
1666 " DRBD version %d.%d.%d does not support"
1667 " it." % (option, vmaj, vmin, vrel))
1669 # the minimum version for each feature is encoded via pairs of (minor
1670 # version -> x) where x is version in which support for the option was
1672 meta_flush_supported = disk_flush_supported = {
1678 disk_drain_supported = {
1683 disk_barriers_supported = {
1688 if disable_meta_flush:
1689 _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1690 meta_flush_supported.get(vmin, None))
1693 if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1694 _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1695 disk_flush_supported.get(vmin, None))
1698 if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1699 _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1700 disk_drain_supported.get(vmin, None))
1703 if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1704 _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1705 disk_barriers_supported.get(vmin, None))
1709 def _AssembleNet(self, minor, net_info, protocol,
1710 dual_pri=False, hmac=None, secret=None):
1711 """Configure the network part of the device.
1714 lhost, lport, rhost, rport = net_info
1715 if None in net_info:
1716 # we don't want network connection and actually want to make
1718 self._ShutdownNet(minor)
1721 # Workaround for a race condition. When DRBD is doing its dance to
1722 # establish a connection with its peer, it also sends the
1723 # synchronization speed over the wire. In some cases setting the
1724 # sync speed only after setting up both sides can race with DRBD
1725 # connecting, hence we set it here before telling DRBD anything
1727 sync_errors = self._SetMinorSyncParams(minor, self.params)
1729 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1730 (minor, utils.CommaJoin(sync_errors)))
1732 if netutils.IP6Address.IsValid(lhost):
1733 if not netutils.IP6Address.IsValid(rhost):
1734 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1735 (minor, lhost, rhost))
1737 elif netutils.IP4Address.IsValid(lhost):
1738 if not netutils.IP4Address.IsValid(rhost):
1739 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1740 (minor, lhost, rhost))
1743 _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1745 args = ["drbdsetup", self._DevPath(minor), "net",
1746 "%s:%s:%s" % (family, lhost, lport),
1747 "%s:%s:%s" % (family, rhost, rport), protocol,
1748 "-A", "discard-zero-changes",
1755 args.extend(["-a", hmac, "-x", secret])
1757 if self.params[constants.LDP_NET_CUSTOM]:
1758 args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1760 result = utils.RunCmd(args)
1762 _ThrowError("drbd%d: can't setup network: %s - %s",
1763 minor, result.fail_reason, result.output)
1765 def _CheckNetworkConfig():
1766 info = self._GetDevInfo(self._GetShowData(minor))
1767 if not "local_addr" in info or not "remote_addr" in info:
1768 raise utils.RetryAgain()
1770 if (info["local_addr"] != (lhost, lport) or
1771 info["remote_addr"] != (rhost, rport)):
1772 raise utils.RetryAgain()
1775 utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1776 except utils.RetryTimeout:
1777 _ThrowError("drbd%d: timeout while configuring network", minor)
1779 def AddChildren(self, devices):
1780 """Add a disk to the DRBD device.
1783 if self.minor is None:
1784 _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1786 if len(devices) != 2:
1787 _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1788 info = self._GetDevInfo(self._GetShowData(self.minor))
1789 if "local_dev" in info:
1790 _ThrowError("drbd%d: already attached to a local disk", self.minor)
1791 backend, meta = devices
1792 if backend.dev_path is None or meta.dev_path is None:
1793 _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1796 self._CheckMetaSize(meta.dev_path)
1797 self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1799 self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1800 self._children = devices
1802 def RemoveChildren(self, devices):
1803 """Detach the drbd device from local storage.
1806 if self.minor is None:
1807 _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1809 # early return if we don't actually have backing storage
1810 info = self._GetDevInfo(self._GetShowData(self.minor))
1811 if "local_dev" not in info:
1813 if len(self._children) != 2:
1814 _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1816 if self._children.count(None) == 2: # we don't actually have children :)
1817 logging.warning("drbd%d: requested detach while detached", self.minor)
1819 if len(devices) != 2:
1820 _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1821 for child, dev in zip(self._children, devices):
1822 if dev != child.dev_path:
1823 _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1824 " RemoveChildren", self.minor, dev, child.dev_path)
1826 self._ShutdownLocal(self.minor)
1830 def _SetMinorSyncParams(cls, minor, params):
1831 """Set the parameters of the DRBD syncer.
1833 This is the low-level implementation.
1836 @param minor: the drbd minor whose settings we change
1838 @param params: LD level disk parameters related to the synchronization
1840 @return: a list of error messages
1844 args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1845 if params[constants.LDP_DYNAMIC_RESYNC]:
1846 version = cls._GetVersion(cls._GetProcData())
1847 vmin = version["k_minor"]
1848 vrel = version["k_point"]
1850 # By definition we are using 8.x, so just check the rest of the version
1852 if vmin != 3 or vrel < 9:
1853 msg = ("The current DRBD version (8.%d.%d) does not support the "
1854 "dynamic resync speed controller" % (vmin, vrel))
1858 if params[constants.LDP_PLAN_AHEAD] == 0:
1859 msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1860 " controller at DRBD level. If you want to disable it, please"
1861 " set the dynamic-resync disk parameter to False.")
1865 # add the c-* parameters to args
1866 args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1867 "--c-fill-target", params[constants.LDP_FILL_TARGET],
1868 "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1869 "--c-max-rate", params[constants.LDP_MAX_RATE],
1870 "--c-min-rate", params[constants.LDP_MIN_RATE],
1874 args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1876 args.append("--create-device")
1877 result = utils.RunCmd(args)
1879 msg = ("Can't change syncer rate: %s - %s" %
1880 (result.fail_reason, result.output))
1886 def SetSyncParams(self, params):
1887 """Set the synchronization parameters of the DRBD syncer.
1890 @param params: LD level disk parameters related to the synchronization
1892 @return: a list of error messages, emitted both by the current node and by
1893 children. An empty list means no errors
1896 if self.minor is None:
1897 err = "Not attached during SetSyncParams"
1901 children_result = super(DRBD8, self).SetSyncParams(params)
1902 children_result.extend(self._SetMinorSyncParams(self.minor, params))
1903 return children_result
1905 def PauseResumeSync(self, pause):
1906 """Pauses or resumes the sync of a DRBD device.
1908 @param pause: Wether to pause or resume
1909 @return: the success of the operation
1912 if self.minor is None:
1913 logging.info("Not attached during PauseSync")
1916 children_result = super(DRBD8, self).PauseResumeSync(pause)
1923 result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1925 logging.error("Can't %s: %s - %s", cmd,
1926 result.fail_reason, result.output)
1927 return not result.failed and children_result
1929 def GetProcStatus(self):
1930 """Return device data from /proc.
1933 if self.minor is None:
1934 _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1935 proc_info = self._MassageProcData(self._GetProcData())
1936 if self.minor not in proc_info:
1937 _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1938 return DRBD8Status(proc_info[self.minor])
1940 def GetSyncStatus(self):
1941 """Returns the sync status of the device.
1944 If sync_percent is None, it means all is ok
1945 If estimated_time is None, it means we can't estimate
1946 the time needed, otherwise it's the time left in seconds.
1949 We set the is_degraded parameter to True on two conditions:
1950 network not connected or local disk missing.
1952 We compute the ldisk parameter based on whether we have a local
1955 @rtype: objects.BlockDevStatus
1958 if self.minor is None and not self.Attach():
1959 _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1961 stats = self.GetProcStatus()
1962 is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1964 if stats.is_disk_uptodate:
1965 ldisk_status = constants.LDS_OKAY
1966 elif stats.is_diskless:
1967 ldisk_status = constants.LDS_FAULTY
1969 ldisk_status = constants.LDS_UNKNOWN
1971 return objects.BlockDevStatus(dev_path=self.dev_path,
1974 sync_percent=stats.sync_percent,
1975 estimated_time=stats.est_time,
1976 is_degraded=is_degraded,
1977 ldisk_status=ldisk_status)
1979 def Open(self, force=False):
1980 """Make the local state primary.
1982 If the 'force' parameter is given, the '-o' option is passed to
1983 drbdsetup. Since this is a potentially dangerous operation, the
1984 force flag should be only given after creation, when it actually
1988 if self.minor is None and not self.Attach():
1989 logging.error("DRBD cannot attach to a device during open")
1991 cmd = ["drbdsetup", self.dev_path, "primary"]
1994 result = utils.RunCmd(cmd)
1996 _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2000 """Make the local state secondary.
2002 This will, of course, fail if the device is in use.
2005 if self.minor is None and not self.Attach():
2006 _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2007 result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2009 _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2010 self.minor, result.output)
2012 def DisconnectNet(self):
2013 """Removes network configuration.
2015 This method shutdowns the network side of the device.
2017 The method will wait up to a hardcoded timeout for the device to
2018 go into standalone after the 'disconnect' command before
2019 re-configuring it, as sometimes it takes a while for the
2020 disconnect to actually propagate and thus we might issue a 'net'
2021 command while the device is still connected. If the device will
2022 still be attached to the network and we time out, we raise an
2026 if self.minor is None:
2027 _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2029 if None in (self._lhost, self._lport, self._rhost, self._rport):
2030 _ThrowError("drbd%d: DRBD disk missing network info in"
2031 " DisconnectNet()", self.minor)
2033 class _DisconnectStatus:
2034 def __init__(self, ever_disconnected):
2035 self.ever_disconnected = ever_disconnected
2037 dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2039 def _WaitForDisconnect():
2040 if self.GetProcStatus().is_standalone:
2043 # retry the disconnect, it seems possible that due to a well-time
2044 # disconnect on the peer, my disconnect command might be ignored and
2046 dstatus.ever_disconnected = \
2047 _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2049 raise utils.RetryAgain()
2052 start_time = time.time()
2055 # Start delay at 100 milliseconds and grow up to 2 seconds
2056 utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2057 self._NET_RECONFIG_TIMEOUT)
2058 except utils.RetryTimeout:
2059 if dstatus.ever_disconnected:
2060 msg = ("drbd%d: device did not react to the"
2061 " 'disconnect' command in a timely manner")
2063 msg = "drbd%d: can't shutdown network, even after multiple retries"
2065 _ThrowError(msg, self.minor)
2067 reconfig_time = time.time() - start_time
2068 if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2069 logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2070 self.minor, reconfig_time)
2072 def AttachNet(self, multimaster):
2073 """Reconnects the network.
2075 This method connects the network side of the device with a
2076 specified multi-master flag. The device needs to be 'Standalone'
2077 but have valid network configuration data.
2080 - multimaster: init the network in dual-primary mode
2083 if self.minor is None:
2084 _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2086 if None in (self._lhost, self._lport, self._rhost, self._rport):
2087 _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2089 status = self.GetProcStatus()
2091 if not status.is_standalone:
2092 _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2094 self._AssembleNet(self.minor,
2095 (self._lhost, self._lport, self._rhost, self._rport),
2096 constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2097 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2100 """Check if our minor is configured.
2102 This doesn't do any device configurations - it only checks if the
2103 minor is in a state different from Unconfigured.
2105 Note that this function will not change the state of the system in
2106 any way (except in case of side-effects caused by reading from
2110 used_devs = self.GetUsedDevs()
2111 if self._aminor in used_devs:
2112 minor = self._aminor
2116 self._SetFromMinor(minor)
2117 return minor is not None
2120 """Assemble the drbd.
2123 - if we have a configured device, we try to ensure that it matches
2125 - if not, we create it from zero
2126 - anyway, set the device parameters
2129 super(DRBD8, self).Assemble()
2132 if self.minor is None:
2133 # local device completely unconfigured
2134 self._FastAssemble()
2136 # we have to recheck the local and network status and try to fix
2138 self._SlowAssemble()
2140 sync_errors = self.SetSyncParams(self.params)
2142 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2143 (self.minor, utils.CommaJoin(sync_errors)))
2145 def _SlowAssemble(self):
2146 """Assembles the DRBD device from a (partially) configured device.
2148 In case of partially attached (local device matches but no network
2149 setup), we perform the network attach. If successful, we re-test
2150 the attach if can return success.
2153 # TODO: Rewrite to not use a for loop just because there is 'break'
2154 # pylint: disable=W0631
2155 net_data = (self._lhost, self._lport, self._rhost, self._rport)
2156 for minor in (self._aminor,):
2157 info = self._GetDevInfo(self._GetShowData(minor))
2158 match_l = self._MatchesLocal(info)
2159 match_r = self._MatchesNet(info)
2161 if match_l and match_r:
2162 # everything matches
2165 if match_l and not match_r and "local_addr" not in info:
2166 # disk matches, but not attached to network, attach and recheck
2167 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2168 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2169 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2172 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2173 " show' disagrees", minor)
2175 if match_r and "local_dev" not in info:
2176 # no local disk, but network attached and it matches
2177 self._AssembleLocal(minor, self._children[0].dev_path,
2178 self._children[1].dev_path, self.size)
2179 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2182 _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2183 " show' disagrees", minor)
2185 # this case must be considered only if we actually have local
2186 # storage, i.e. not in diskless mode, because all diskless
2187 # devices are equal from the point of view of local
2189 if (match_l and "local_dev" in info and
2190 not match_r and "local_addr" in info):
2191 # strange case - the device network part points to somewhere
2192 # else, even though its local storage is ours; as we own the
2193 # drbd space, we try to disconnect from the remote peer and
2194 # reconnect to our correct one
2196 self._ShutdownNet(minor)
2197 except errors.BlockDeviceError, err:
2198 _ThrowError("drbd%d: device has correct local storage, wrong"
2199 " remote peer and is unable to disconnect in order"
2200 " to attach to the correct peer: %s", minor, str(err))
2201 # note: _AssembleNet also handles the case when we don't want
2202 # local storage (i.e. one or more of the _[lr](host|port) is
2204 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2205 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2206 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2209 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2210 " show' disagrees", minor)
2215 self._SetFromMinor(minor)
2217 _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2220 def _FastAssemble(self):
2221 """Assemble the drbd device from zero.
2223 This is run when in Assemble we detect our minor is unused.
2226 minor = self._aminor
2227 if self._children and self._children[0] and self._children[1]:
2228 self._AssembleLocal(minor, self._children[0].dev_path,
2229 self._children[1].dev_path, self.size)
2230 if self._lhost and self._lport and self._rhost and self._rport:
2231 self._AssembleNet(minor,
2232 (self._lhost, self._lport, self._rhost, self._rport),
2233 constants.DRBD_NET_PROTOCOL,
2234 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2235 self._SetFromMinor(minor)
2238 def _ShutdownLocal(cls, minor):
2239 """Detach from the local device.
2241 I/Os will continue to be served from the remote device. If we
2242 don't have a remote device, this operation will fail.
2245 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2247 _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2250 def _ShutdownNet(cls, minor):
2251 """Disconnect from the remote peer.
2253 This fails if we don't have a local device.
2256 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2258 _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2261 def _ShutdownAll(cls, minor):
2262 """Deactivate the device.
2264 This will, of course, fail if the device is in use.
2267 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2269 _ThrowError("drbd%d: can't shutdown drbd device: %s",
2270 minor, result.output)
2273 """Shutdown the DRBD device.
2276 if self.minor is None and not self.Attach():
2277 logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2281 self.dev_path = None
2282 self._ShutdownAll(minor)
2285 """Stub remove for DRBD devices.
2291 def Create(cls, unique_id, children, size, params, excl_stor):
2292 """Create a new DRBD8 device.
2294 Since DRBD devices are not created per se, just assembled, this
2295 function only initializes the metadata.
2298 if len(children) != 2:
2299 raise errors.ProgrammerError("Invalid setup for the drbd device")
2301 raise errors.ProgrammerError("DRBD device requested with"
2302 " exclusive_storage")
2303 # check that the minor is unused
2304 aminor = unique_id[4]
2305 proc_info = cls._MassageProcData(cls._GetProcData())
2306 if aminor in proc_info:
2307 status = DRBD8Status(proc_info[aminor])
2308 in_use = status.is_in_use
2312 _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2315 if not meta.Attach():
2316 _ThrowError("drbd%d: can't attach to meta device '%s'",
2318 cls._CheckMetaSize(meta.dev_path)
2319 cls._InitMeta(aminor, meta.dev_path)
2320 return cls(unique_id, children, size, params)
2322 def Grow(self, amount, dryrun, backingstore):
2323 """Resize the DRBD device and its backing storage.
2326 if self.minor is None:
2327 _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2328 if len(self._children) != 2 or None in self._children:
2329 _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2330 self._children[0].Grow(amount, dryrun, backingstore)
2331 if dryrun or backingstore:
2332 # DRBD does not support dry-run mode and is not backing storage,
2333 # so we'll return here
2335 result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2336 "%dm" % (self.size + amount)])
2338 _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2341 class FileStorage(BlockDev):
2344 This class represents the a file storage backend device.
2346 The unique_id for the file device is a (file_driver, file_path) tuple.
2349 def __init__(self, unique_id, children, size, params):
2350 """Initalizes a file device backend.
2354 raise errors.BlockDeviceError("Invalid setup for file device")
2355 super(FileStorage, self).__init__(unique_id, children, size, params)
2356 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2357 raise ValueError("Invalid configuration data %s" % str(unique_id))
2358 self.driver = unique_id[0]
2359 self.dev_path = unique_id[1]
2361 CheckFileStoragePath(self.dev_path)
2366 """Assemble the device.
2368 Checks whether the file device exists, raises BlockDeviceError otherwise.
2371 if not os.path.exists(self.dev_path):
2372 _ThrowError("File device '%s' does not exist" % self.dev_path)
2375 """Shutdown the device.
2377 This is a no-op for the file type, as we don't deactivate
2378 the file on shutdown.
2383 def Open(self, force=False):
2384 """Make the device ready for I/O.
2386 This is a no-op for the file type.
2392 """Notifies that the device will no longer be used for I/O.
2394 This is a no-op for the file type.
2400 """Remove the file backing the block device.
2403 @return: True if the removal was successful
2407 os.remove(self.dev_path)
2408 except OSError, err:
2409 if err.errno != errno.ENOENT:
2410 _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2412 def Rename(self, new_id):
2413 """Renames the file.
2416 # TODO: implement rename for file-based storage
2417 _ThrowError("Rename is not supported for file-based storage")
2419 def Grow(self, amount, dryrun, backingstore):
2422 @param amount: the amount (in mebibytes) to grow with
2425 if not backingstore:
2427 # Check that the file exists
2429 current_size = self.GetActualSize()
2430 new_size = current_size + amount * 1024 * 1024
2431 assert new_size > current_size, "Cannot Grow with a negative amount"
2432 # We can't really simulate the growth
2436 f = open(self.dev_path, "a+")
2437 f.truncate(new_size)
2439 except EnvironmentError, err:
2440 _ThrowError("Error in file growth: %", str(err))
2443 """Attach to an existing file.
2445 Check if this file already exists.
2448 @return: True if file exists
2451 self.attached = os.path.exists(self.dev_path)
2452 return self.attached
2454 def GetActualSize(self):
2455 """Return the actual disk size.
2457 @note: the device needs to be active when this is called
2460 assert self.attached, "BlockDevice not attached in GetActualSize()"
2462 st = os.stat(self.dev_path)
2464 except OSError, err:
2465 _ThrowError("Can't stat %s: %s", self.dev_path, err)
2468 def Create(cls, unique_id, children, size, params, excl_stor):
2469 """Create a new file.
2471 @param size: the size of file in MiB
2473 @rtype: L{bdev.FileStorage}
2474 @return: an instance of FileStorage
2478 raise errors.ProgrammerError("FileStorage device requested with"
2479 " exclusive_storage")
2480 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2481 raise ValueError("Invalid configuration data %s" % str(unique_id))
2483 dev_path = unique_id[1]
2485 CheckFileStoragePath(dev_path)
2488 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2489 f = os.fdopen(fd, "w")
2490 f.truncate(size * 1024 * 1024)
2492 except EnvironmentError, err:
2493 if err.errno == errno.EEXIST:
2494 _ThrowError("File already existing: %s", dev_path)
2495 _ThrowError("Error in file creation: %", str(err))
2497 return FileStorage(unique_id, children, size, params)
2500 class PersistentBlockDevice(BlockDev):
2501 """A block device with persistent node
2503 May be either directly attached, or exposed through DM (e.g. dm-multipath).
2504 udev helpers are probably required to give persistent, human-friendly
2507 For the time being, pathnames are required to lie under /dev.
2510 def __init__(self, unique_id, children, size, params):
2511 """Attaches to a static block device.
2513 The unique_id is a path under /dev.
2516 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2518 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2519 raise ValueError("Invalid configuration data %s" % str(unique_id))
2520 self.dev_path = unique_id[1]
2521 if not os.path.realpath(self.dev_path).startswith("/dev/"):
2522 raise ValueError("Full path '%s' lies outside /dev" %
2523 os.path.realpath(self.dev_path))
2524 # TODO: this is just a safety guard checking that we only deal with devices
2525 # we know how to handle. In the future this will be integrated with
2526 # external storage backends and possible values will probably be collected
2527 # from the cluster configuration.
2528 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2529 raise ValueError("Got persistent block device of invalid type: %s" %
2532 self.major = self.minor = None
2536 def Create(cls, unique_id, children, size, params, excl_stor):
2537 """Create a new device
2539 This is a noop, we only return a PersistentBlockDevice instance
2543 raise errors.ProgrammerError("Persistent block device requested with"
2544 " exclusive_storage")
2545 return PersistentBlockDevice(unique_id, children, 0, params)
2555 def Rename(self, new_id):
2556 """Rename this device.
2559 _ThrowError("Rename is not supported for PersistentBlockDev storage")
2562 """Attach to an existing block device.
2566 self.attached = False
2568 st = os.stat(self.dev_path)
2569 except OSError, err:
2570 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2573 if not stat.S_ISBLK(st.st_mode):
2574 logging.error("%s is not a block device", self.dev_path)
2577 self.major = os.major(st.st_rdev)
2578 self.minor = os.minor(st.st_rdev)
2579 self.attached = True
2584 """Assemble the device.
2590 """Shutdown the device.
2595 def Open(self, force=False):
2596 """Make the device ready for I/O.
2602 """Notifies that the device will no longer be used for I/O.
2607 def Grow(self, amount, dryrun, backingstore):
2608 """Grow the logical volume.
2611 _ThrowError("Grow is not supported for PersistentBlockDev storage")
2614 class RADOSBlockDevice(BlockDev):
2615 """A RADOS Block Device (rbd).
2617 This class implements the RADOS Block Device for the backend. You need
2618 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2619 this to be functional.
2622 def __init__(self, unique_id, children, size, params):
2623 """Attaches to an rbd device.
2626 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2627 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2628 raise ValueError("Invalid configuration data %s" % str(unique_id))
2630 self.driver, self.rbd_name = unique_id
2632 self.major = self.minor = None
2636 def Create(cls, unique_id, children, size, params, excl_stor):
2637 """Create a new rbd device.
2639 Provision a new rbd volume inside a RADOS pool.
2642 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2643 raise errors.ProgrammerError("Invalid configuration data %s" %
2646 raise errors.ProgrammerError("RBD device requested with"
2647 " exclusive_storage")
2648 rbd_pool = params[constants.LDP_POOL]
2649 rbd_name = unique_id[1]
2651 # Provision a new rbd volume (Image) inside the RADOS cluster.
2652 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2653 rbd_name, "--size", "%s" % size]
2654 result = utils.RunCmd(cmd)
2656 _ThrowError("rbd creation failed (%s): %s",
2657 result.fail_reason, result.output)
2659 return RADOSBlockDevice(unique_id, children, size, params)
2662 """Remove the rbd device.
2665 rbd_pool = self.params[constants.LDP_POOL]
2666 rbd_name = self.unique_id[1]
2668 if not self.minor and not self.Attach():
2669 # The rbd device doesn't exist.
2672 # First shutdown the device (remove mappings).
2675 # Remove the actual Volume (Image) from the RADOS cluster.
2676 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2677 result = utils.RunCmd(cmd)
2679 _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2680 result.fail_reason, result.output)
2682 def Rename(self, new_id):
2683 """Rename this device.
2689 """Attach to an existing rbd device.
2691 This method maps the rbd volume that matches our name with
2692 an rbd device and then attaches to this device.
2695 self.attached = False
2697 # Map the rbd volume to a block device under /dev
2698 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2701 st = os.stat(self.dev_path)
2702 except OSError, err:
2703 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2706 if not stat.S_ISBLK(st.st_mode):
2707 logging.error("%s is not a block device", self.dev_path)
2710 self.major = os.major(st.st_rdev)
2711 self.minor = os.minor(st.st_rdev)
2712 self.attached = True
2716 def _MapVolumeToBlockdev(self, unique_id):
2717 """Maps existing rbd volumes to block devices.
2719 This method should be idempotent if the mapping already exists.
2722 @return: the block device path that corresponds to the volume
2725 pool = self.params[constants.LDP_POOL]
2728 # Check if the mapping already exists.
2729 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2730 result = utils.RunCmd(showmap_cmd)
2732 _ThrowError("rbd showmapped failed (%s): %s",
2733 result.fail_reason, result.output)
2735 rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2738 # The mapping exists. Return it.
2741 # The mapping doesn't exist. Create it.
2742 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2743 result = utils.RunCmd(map_cmd)
2745 _ThrowError("rbd map failed (%s): %s",
2746 result.fail_reason, result.output)
2748 # Find the corresponding rbd device.
2749 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2750 result = utils.RunCmd(showmap_cmd)
2752 _ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
2753 result.fail_reason, result.output)
2755 rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2758 _ThrowError("rbd map succeeded, but could not find the rbd block"
2759 " device in output of showmapped, for volume: %s", name)
2761 # The device was successfully mapped. Return it.
2765 def _ParseRbdShowmappedOutput(output, volume_name):
2766 """Parse the output of `rbd showmapped'.
2768 This method parses the output of `rbd showmapped' and returns
2769 the rbd block device path (e.g. /dev/rbd0) that matches the
2772 @type output: string
2773 @param output: the whole output of `rbd showmapped'
2774 @type volume_name: string
2775 @param volume_name: the name of the volume whose device we search for
2776 @rtype: string or None
2777 @return: block device path if the volume is mapped, else None
2786 lines = output.splitlines()
2787 splitted_lines = map(lambda l: l.split(field_sep), lines)
2789 # Check empty output.
2790 if not splitted_lines:
2791 _ThrowError("rbd showmapped returned empty output")
2793 # Check showmapped header line, to determine number of fields.
2794 field_cnt = len(splitted_lines[0])
2795 if field_cnt != allfields:
2796 _ThrowError("Cannot parse rbd showmapped output because its format"
2797 " seems to have changed; expected %s fields, found %s",
2798 allfields, field_cnt)
2801 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2804 if len(matched_lines) > 1:
2805 _ThrowError("The rbd volume %s is mapped more than once."
2806 " This shouldn't happen, try to unmap the extra"
2807 " devices manually.", volume_name)
2810 # rbd block device found. Return it.
2811 rbd_dev = matched_lines[0][devicefield]
2814 # The given volume is not mapped.
2818 """Assemble the device.
2824 """Shutdown the device.
2827 if not self.minor and not self.Attach():
2828 # The rbd device doesn't exist.
2831 # Unmap the block device from the Volume.
2832 self._UnmapVolumeFromBlockdev(self.unique_id)
2835 self.dev_path = None
2837 def _UnmapVolumeFromBlockdev(self, unique_id):
2838 """Unmaps the rbd device from the Volume it is mapped.
2840 Unmaps the rbd device from the Volume it was previously mapped to.
2841 This method should be idempotent if the Volume isn't mapped.
2844 pool = self.params[constants.LDP_POOL]
2847 # Check if the mapping already exists.
2848 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2849 result = utils.RunCmd(showmap_cmd)
2851 _ThrowError("rbd showmapped failed [during unmap](%s): %s",
2852 result.fail_reason, result.output)
2854 rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2857 # The mapping exists. Unmap the rbd device.
2858 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2859 result = utils.RunCmd(unmap_cmd)
2861 _ThrowError("rbd unmap failed (%s): %s",
2862 result.fail_reason, result.output)
2864 def Open(self, force=False):
2865 """Make the device ready for I/O.
2871 """Notifies that the device will no longer be used for I/O.
2876 def Grow(self, amount, dryrun, backingstore):
2879 @type amount: integer
2880 @param amount: the amount (in mebibytes) to grow with
2881 @type dryrun: boolean
2882 @param dryrun: whether to execute the operation in simulation mode
2883 only, without actually increasing the size
2886 if not backingstore:
2888 if not self.Attach():
2889 _ThrowError("Can't attach to rbd device during Grow()")
2892 # the rbd tool does not support dry runs of resize operations.
2893 # Since rbd volumes are thinly provisioned, we assume
2894 # there is always enough free space for the operation.
2897 rbd_pool = self.params[constants.LDP_POOL]
2898 rbd_name = self.unique_id[1]
2899 new_size = self.size + amount
2901 # Resize the rbd volume (Image) inside the RADOS cluster.
2902 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2903 rbd_name, "--size", "%s" % new_size]
2904 result = utils.RunCmd(cmd)
2906 _ThrowError("rbd resize failed (%s): %s",
2907 result.fail_reason, result.output)
2910 class ExtStorageDevice(BlockDev):
2911 """A block device provided by an ExtStorage Provider.
2913 This class implements the External Storage Interface, which means
2914 handling of the externally provided block devices.
2917 def __init__(self, unique_id, children, size, params):
2918 """Attaches to an extstorage block device.
2921 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2922 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2923 raise ValueError("Invalid configuration data %s" % str(unique_id))
2925 self.driver, self.vol_name = unique_id
2926 self.ext_params = params
2928 self.major = self.minor = None
2932 def Create(cls, unique_id, children, size, params, excl_stor):
2933 """Create a new extstorage device.
2935 Provision a new volume using an extstorage provider, which will
2936 then be mapped to a block device.
2939 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2940 raise errors.ProgrammerError("Invalid configuration data %s" %
2943 raise errors.ProgrammerError("extstorage device requested with"
2944 " exclusive_storage")
2946 # Call the External Storage's create script,
2947 # to provision a new Volume inside the External Storage
2948 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
2951 return ExtStorageDevice(unique_id, children, size, params)
2954 """Remove the extstorage device.
2957 if not self.minor and not self.Attach():
2958 # The extstorage device doesn't exist.
2961 # First shutdown the device (remove mappings).
2964 # Call the External Storage's remove script,
2965 # to remove the Volume from the External Storage
2966 _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
2969 def Rename(self, new_id):
2970 """Rename this device.
2976 """Attach to an existing extstorage device.
2978 This method maps the extstorage volume that matches our name with
2979 a corresponding block device and then attaches to this device.
2982 self.attached = False
2984 # Call the External Storage's attach script,
2985 # to attach an existing Volume to a block device under /dev
2986 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
2987 self.unique_id, self.ext_params)
2990 st = os.stat(self.dev_path)
2991 except OSError, err:
2992 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2995 if not stat.S_ISBLK(st.st_mode):
2996 logging.error("%s is not a block device", self.dev_path)
2999 self.major = os.major(st.st_rdev)
3000 self.minor = os.minor(st.st_rdev)
3001 self.attached = True
3006 """Assemble the device.
3012 """Shutdown the device.
3015 if not self.minor and not self.Attach():
3016 # The extstorage device doesn't exist.
3019 # Call the External Storage's detach script,
3020 # to detach an existing Volume from it's block device under /dev
3021 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3025 self.dev_path = None
3027 def Open(self, force=False):
3028 """Make the device ready for I/O.
3034 """Notifies that the device will no longer be used for I/O.
3039 def Grow(self, amount, dryrun, backingstore):
3042 @type amount: integer
3043 @param amount: the amount (in mebibytes) to grow with
3044 @type dryrun: boolean
3045 @param dryrun: whether to execute the operation in simulation mode
3046 only, without actually increasing the size
3049 if not backingstore:
3051 if not self.Attach():
3052 _ThrowError("Can't attach to extstorage device during Grow()")
3055 # we do not support dry runs of resize operations for now.
3058 new_size = self.size + amount
3060 # Call the External Storage's grow script,
3061 # to grow an existing Volume inside the External Storage
3062 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3063 self.ext_params, str(self.size), grow=str(new_size))
3065 def SetInfo(self, text):
3066 """Update metadata with info text.
3069 # Replace invalid characters
3070 text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3071 text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3073 # Only up to 128 characters are allowed
3076 # Call the External Storage's setinfo script,
3077 # to set metadata for an existing Volume inside the External Storage
3078 _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3079 self.ext_params, metadata=text)
3082 def _ExtStorageAction(action, unique_id, ext_params,
3083 size=None, grow=None, metadata=None):
3084 """Take an External Storage action.
3086 Take an External Storage action concerning or affecting
3087 a specific Volume inside the External Storage.
3089 @type action: string
3090 @param action: which action to perform. One of:
3091 create / remove / grow / attach / detach
3092 @type unique_id: tuple (driver, vol_name)
3093 @param unique_id: a tuple containing the type of ExtStorage (driver)
3095 @type ext_params: dict
3096 @param ext_params: ExtStorage parameters
3098 @param size: the size of the Volume in mebibytes
3100 @param grow: the new size in mebibytes (after grow)
3101 @type metadata: string
3102 @param metadata: metadata info of the Volume, for use by the provider
3103 @rtype: None or a block device path (during attach)
3106 driver, vol_name = unique_id
3108 # Create an External Storage instance of type `driver'
3109 status, inst_es = ExtStorageFromDisk(driver)
3111 _ThrowError("%s" % inst_es)
3113 # Create the basic environment for the driver's scripts
3114 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3117 # Do not use log file for action `attach' as we need
3118 # to get the output from RunResult
3119 # TODO: find a way to have a log file for attach too
3121 if action is not constants.ES_ACTION_ATTACH:
3122 logfile = _VolumeLogName(action, driver, vol_name)
3124 # Make sure the given action results in a valid script
3125 if action not in constants.ES_SCRIPTS:
3126 _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3129 # Find out which external script to run according the given action
3130 script_name = action + "_script"
3131 script = getattr(inst_es, script_name)
3133 # Run the external script
3134 result = utils.RunCmd([script], env=create_env,
3135 cwd=inst_es.path, output=logfile,)
3137 logging.error("External storage's %s command '%s' returned"
3138 " error: %s, logfile: %s, output: %s",
3139 action, result.cmd, result.fail_reason,
3140 logfile, result.output)
3142 # If logfile is 'None' (during attach), it breaks TailFile
3143 # TODO: have a log file for attach too
3144 if action is not constants.ES_ACTION_ATTACH:
3145 lines = [utils.SafeEncode(val)
3146 for val in utils.TailFile(logfile, lines=20)]
3148 lines = result.output[-20:]
3150 _ThrowError("External storage's %s script failed (%s), last"
3151 " lines of output:\n%s",
3152 action, result.fail_reason, "\n".join(lines))
3154 if action == constants.ES_ACTION_ATTACH:
3155 return result.stdout
3158 def ExtStorageFromDisk(name, base_dir=None):
3159 """Create an ExtStorage instance from disk.
3161 This function will return an ExtStorage instance
3162 if the given name is a valid ExtStorage name.
3164 @type base_dir: string
3165 @keyword base_dir: Base directory containing ExtStorage installations.
3166 Defaults to a search in all the ES_SEARCH_PATH dirs.
3168 @return: True and the ExtStorage instance if we find a valid one, or
3169 False and the diagnose message on error
3172 if base_dir is None:
3173 es_base_dir = pathutils.ES_SEARCH_PATH
3175 es_base_dir = [base_dir]
3177 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3180 return False, ("Directory for External Storage Provider %s not"
3181 " found in search path" % name)
3183 # ES Files dictionary, we will populate it with the absolute path
3184 # names; if the value is True, then it is a required file, otherwise
3186 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3188 es_files[constants.ES_PARAMETERS_FILE] = True
3190 for (filename, _) in es_files.items():
3191 es_files[filename] = utils.PathJoin(es_dir, filename)
3194 st = os.stat(es_files[filename])
3195 except EnvironmentError, err:
3196 return False, ("File '%s' under path '%s' is missing (%s)" %
3197 (filename, es_dir, utils.ErrnoOrStr(err)))
3199 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3200 return False, ("File '%s' under path '%s' is not a regular file" %
3203 if filename in constants.ES_SCRIPTS:
3204 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3205 return False, ("File '%s' under path '%s' is not executable" %
3209 if constants.ES_PARAMETERS_FILE in es_files:
3210 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3212 parameters = utils.ReadFile(parameters_file).splitlines()
3213 except EnvironmentError, err:
3214 return False, ("Error while reading the EXT parameters file at %s: %s" %
3215 (parameters_file, utils.ErrnoOrStr(err)))
3216 parameters = [v.split(None, 1) for v in parameters]
3219 objects.ExtStorage(name=name, path=es_dir,
3220 create_script=es_files[constants.ES_SCRIPT_CREATE],
3221 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3222 grow_script=es_files[constants.ES_SCRIPT_GROW],
3223 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3224 detach_script=es_files[constants.ES_SCRIPT_DETACH],
3225 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3226 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3227 supported_parameters=parameters)
3231 def _ExtStorageEnvironment(unique_id, ext_params,
3232 size=None, grow=None, metadata=None):
3233 """Calculate the environment for an External Storage script.
3235 @type unique_id: tuple (driver, vol_name)
3236 @param unique_id: ExtStorage pool and name of the Volume
3237 @type ext_params: dict
3238 @param ext_params: the EXT parameters
3240 @param size: size of the Volume (in mebibytes)
3242 @param grow: new size of Volume after grow (in mebibytes)
3243 @type metadata: string
3244 @param metadata: metadata info of the Volume
3246 @return: dict of environment variables
3249 vol_name = unique_id[1]
3252 result["VOL_NAME"] = vol_name
3255 for pname, pvalue in ext_params.items():
3256 result["EXTP_%s" % pname.upper()] = str(pvalue)
3258 if size is not None:
3259 result["VOL_SIZE"] = size
3261 if grow is not None:
3262 result["VOL_NEW_SIZE"] = grow
3264 if metadata is not None:
3265 result["VOL_METADATA"] = metadata
3270 def _VolumeLogName(kind, es_name, volume):
3271 """Compute the ExtStorage log filename for a given Volume and operation.
3274 @param kind: the operation type (e.g. create, remove etc.)
3275 @type es_name: string
3276 @param es_name: the ExtStorage name
3277 @type volume: string
3278 @param volume: the name of the Volume inside the External Storage
3281 # Check if the extstorage log dir is a valid dir
3282 if not os.path.isdir(pathutils.LOG_ES_DIR):
3283 _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3285 # TODO: Use tempfile.mkstemp to create unique filename
3286 base = ("%s-%s-%s-%s.log" %
3287 (kind, es_name, volume, utils.TimestampForFilename()))
3288 return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3292 constants.LD_LV: LogicalVolume,
3293 constants.LD_DRBD8: DRBD8,
3294 constants.LD_BLOCKDEV: PersistentBlockDevice,
3295 constants.LD_RBD: RADOSBlockDevice,
3296 constants.LD_EXT: ExtStorageDevice,
3299 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3300 DEV_MAP[constants.LD_FILE] = FileStorage
3303 def _VerifyDiskType(dev_type):
3304 if dev_type not in DEV_MAP:
3305 raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3308 def _VerifyDiskParams(disk):
3309 """Verifies if all disk parameters are set.
3312 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3314 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3318 def FindDevice(disk, children):
3319 """Search for an existing, assembled device.
3321 This will succeed only if the device exists and is assembled, but it
3322 does not do any actions in order to activate the device.
3324 @type disk: L{objects.Disk}
3325 @param disk: the disk object to find
3326 @type children: list of L{bdev.BlockDev}
3327 @param children: the list of block devices that are children of the device
3328 represented by the disk parameter
3331 _VerifyDiskType(disk.dev_type)
3332 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3334 if not device.attached:
3339 def Assemble(disk, children):
3340 """Try to attach or assemble an existing device.
3342 This will attach to assemble the device, as needed, to bring it
3343 fully up. It must be safe to run on already-assembled devices.
3345 @type disk: L{objects.Disk}
3346 @param disk: the disk object to assemble
3347 @type children: list of L{bdev.BlockDev}
3348 @param children: the list of block devices that are children of the device
3349 represented by the disk parameter
3352 _VerifyDiskType(disk.dev_type)
3353 _VerifyDiskParams(disk)
3354 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3360 def Create(disk, children, excl_stor):
3363 @type disk: L{objects.Disk}
3364 @param disk: the disk object to create
3365 @type children: list of L{bdev.BlockDev}
3366 @param children: the list of block devices that are children of the device
3367 represented by the disk parameter
3368 @type excl_stor: boolean
3369 @param excl_stor: Whether exclusive_storage is active
3372 _VerifyDiskType(disk.dev_type)
3373 _VerifyDiskParams(disk)
3374 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3375 disk.params, excl_stor)